repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
coumbole/mailscanner
mailscanner/reader.py
https://github.com/coumbole/mailscanner/blob/ead19ac8c7dee27e507c1593032863232c13f636/mailscanner/reader.py#L60-L72
def get_subject(self, msg): """Extracts the subject line from an EmailMessage object.""" text, encoding = decode_header(msg['subject'])[-1] try: text = text.decode(encoding) # If it's already decoded, ignore error except AttributeError: pass return text
[ "def", "get_subject", "(", "self", ",", "msg", ")", ":", "text", ",", "encoding", "=", "decode_header", "(", "msg", "[", "'subject'", "]", ")", "[", "-", "1", "]", "try", ":", "text", "=", "text", ".", "decode", "(", "encoding", ")", "# If it's already decoded, ignore error", "except", "AttributeError", ":", "pass", "return", "text" ]
Extracts the subject line from an EmailMessage object.
[ "Extracts", "the", "subject", "line", "from", "an", "EmailMessage", "object", "." ]
python
train
24.384615
timkpaine/pyEX
pyEX/marketdata/http.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/marketdata/http.py#L525-L556
def tradingStatus(symbol=None, token='', version=''): '''The Trading status message is used to indicate the current trading status of a security. For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons. For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable. IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities. In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session. If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System. After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is: Halted Paused* Released into an Order Acceptance Period* Released for trading *The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt. https://iexcloud.io/docs/api/#deep-trading-status Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) if symbol: return _getJson('deep/trading-status?symbols=' + symbol, token, version) return _getJson('deep/trading-status', token, version)
[ "def", "tradingStatus", "(", "symbol", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "_raiseIfNotStr", "(", "symbol", ")", "if", "symbol", ":", "return", "_getJson", "(", "'deep/trading-status?symbols='", "+", "symbol", ",", "token", ",", "version", ")", "return", "_getJson", "(", "'deep/trading-status'", ",", "token", ",", "version", ")" ]
The Trading status message is used to indicate the current trading status of a security. For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons. For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable. IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities. In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session. If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System. After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is: Halted Paused* Released into an Order Acceptance Period* Released for trading *The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt. https://iexcloud.io/docs/api/#deep-trading-status Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
[ "The", "Trading", "status", "message", "is", "used", "to", "indicate", "the", "current", "trading", "status", "of", "a", "security", ".", "For", "IEX", "-", "listed", "securities", "IEX", "acts", "as", "the", "primary", "market", "and", "has", "the", "authority", "to", "institute", "a", "trading", "halt", "or", "trading", "pause", "in", "a", "security", "due", "to", "news", "dissemination", "or", "regulatory", "reasons", ".", "For", "non", "-", "IEX", "-", "listed", "securities", "IEX", "abides", "by", "any", "regulatory", "trading", "halts", "and", "trading", "pauses", "instituted", "by", "the", "primary", "or", "listing", "market", "as", "applicable", "." ]
python
valid
56.21875
msmbuilder/osprey
osprey/plugins/plugin_pylearn2.py
https://github.com/msmbuilder/osprey/blob/ea09da24e45820e1300e24a52fefa6c849f7a986/osprey/plugins/plugin_pylearn2.py#L31-L41
def _get_param_names(self): """ Get mappable parameters from YAML. """ template = Template(self.yaml_string) names = ['yaml_string'] # always include the template for match in re.finditer(template.pattern, template.template): name = match.group('named') or match.group('braced') assert name is not None names.append(name) return names
[ "def", "_get_param_names", "(", "self", ")", ":", "template", "=", "Template", "(", "self", ".", "yaml_string", ")", "names", "=", "[", "'yaml_string'", "]", "# always include the template", "for", "match", "in", "re", ".", "finditer", "(", "template", ".", "pattern", ",", "template", ".", "template", ")", ":", "name", "=", "match", ".", "group", "(", "'named'", ")", "or", "match", ".", "group", "(", "'braced'", ")", "assert", "name", "is", "not", "None", "names", ".", "append", "(", "name", ")", "return", "names" ]
Get mappable parameters from YAML.
[ "Get", "mappable", "parameters", "from", "YAML", "." ]
python
valid
37.909091
openstack/networking-cisco
networking_cisco/plugins/cisco/db/device_manager/hosting_device_manager_db.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/device_manager/hosting_device_manager_db.py#L312-L374
def acquire_hosting_device_slots(self, context, hosting_device, resource, resource_type, resource_service, num, exclusive=False): """Assign <num> slots in <hosting_device> to logical <resource>. If exclusive is True the hosting device is bound to the resource's tenant. Otherwise it is not bound to any tenant. Returns True if allocation was granted, False otherwise. """ bound = hosting_device['tenant_bound'] if ((bound is not None and bound != resource['tenant_id']) or (exclusive and not self._exclusively_used(context, hosting_device, resource['tenant_id']))): LOG.debug( 'Rejecting allocation of %(num)d slots in tenant %(bound)s ' 'hosting device %(device)s to logical resource %(r_id)s due ' 'to exclusive use conflict.', {'num': num, 'bound': 'unbound' if bound is None else bound + ' bound', 'device': hosting_device['id'], 'r_id': resource['id']}) return False with context.session.begin(subtransactions=True): res_info = {'resource': resource, 'type': resource_type, 'service': resource_service} slot_info, query = self._get_or_create_slot_allocation( context, hosting_device, res_info) if slot_info is None: LOG.debug('Rejecting allocation of %(num)d slots in hosting ' 'device %(device)s to logical resource %(r_id)s', {'num': num, 'device': hosting_device['id'], 'r_id': resource['id']}) return False new_allocation = num + slot_info.num_allocated if hosting_device['template']['slot_capacity'] < new_allocation: LOG.debug('Rejecting allocation of %(num)d slots in ' 'hosting device %(device)s to logical resource ' '%(r_id)s due to insufficent slot availability.', {'num': num, 'device': hosting_device['id'], 'r_id': resource['id']}) self._dispatch_pool_maintenance_job(hosting_device['template']) return False # handle any changes to exclusive usage by tenant if exclusive and bound is None: self._update_hosting_device_exclusivity( context, hosting_device, resource['tenant_id']) bound = resource['tenant_id'] elif not exclusive and bound is not None: self._update_hosting_device_exclusivity(context, hosting_device, None) bound = None slot_info.num_allocated = new_allocation context.session.add(slot_info) self._dispatch_pool_maintenance_job(hosting_device['template']) # report success LOG.info('Allocated %(num)d additional slots in tenant %(bound)s' 'bound hosting device %(hd_id)s. In total %(total)d ' 'slots are now allocated in that hosting device for ' 'logical resource %(r_id)s.', {'num': num, 'bound': 'un-' if bound is None else bound + ' ', 'total': new_allocation, 'hd_id': hosting_device['id'], 'r_id': resource['id']}) return True
[ "def", "acquire_hosting_device_slots", "(", "self", ",", "context", ",", "hosting_device", ",", "resource", ",", "resource_type", ",", "resource_service", ",", "num", ",", "exclusive", "=", "False", ")", ":", "bound", "=", "hosting_device", "[", "'tenant_bound'", "]", "if", "(", "(", "bound", "is", "not", "None", "and", "bound", "!=", "resource", "[", "'tenant_id'", "]", ")", "or", "(", "exclusive", "and", "not", "self", ".", "_exclusively_used", "(", "context", ",", "hosting_device", ",", "resource", "[", "'tenant_id'", "]", ")", ")", ")", ":", "LOG", ".", "debug", "(", "'Rejecting allocation of %(num)d slots in tenant %(bound)s '", "'hosting device %(device)s to logical resource %(r_id)s due '", "'to exclusive use conflict.'", ",", "{", "'num'", ":", "num", ",", "'bound'", ":", "'unbound'", "if", "bound", "is", "None", "else", "bound", "+", "' bound'", ",", "'device'", ":", "hosting_device", "[", "'id'", "]", ",", "'r_id'", ":", "resource", "[", "'id'", "]", "}", ")", "return", "False", "with", "context", ".", "session", ".", "begin", "(", "subtransactions", "=", "True", ")", ":", "res_info", "=", "{", "'resource'", ":", "resource", ",", "'type'", ":", "resource_type", ",", "'service'", ":", "resource_service", "}", "slot_info", ",", "query", "=", "self", ".", "_get_or_create_slot_allocation", "(", "context", ",", "hosting_device", ",", "res_info", ")", "if", "slot_info", "is", "None", ":", "LOG", ".", "debug", "(", "'Rejecting allocation of %(num)d slots in hosting '", "'device %(device)s to logical resource %(r_id)s'", ",", "{", "'num'", ":", "num", ",", "'device'", ":", "hosting_device", "[", "'id'", "]", ",", "'r_id'", ":", "resource", "[", "'id'", "]", "}", ")", "return", "False", "new_allocation", "=", "num", "+", "slot_info", ".", "num_allocated", "if", "hosting_device", "[", "'template'", "]", "[", "'slot_capacity'", "]", "<", "new_allocation", ":", "LOG", ".", "debug", "(", "'Rejecting allocation of %(num)d slots in '", "'hosting device %(device)s to logical resource '", "'%(r_id)s due to insufficent slot availability.'", ",", "{", "'num'", ":", "num", ",", "'device'", ":", "hosting_device", "[", "'id'", "]", ",", "'r_id'", ":", "resource", "[", "'id'", "]", "}", ")", "self", ".", "_dispatch_pool_maintenance_job", "(", "hosting_device", "[", "'template'", "]", ")", "return", "False", "# handle any changes to exclusive usage by tenant", "if", "exclusive", "and", "bound", "is", "None", ":", "self", ".", "_update_hosting_device_exclusivity", "(", "context", ",", "hosting_device", ",", "resource", "[", "'tenant_id'", "]", ")", "bound", "=", "resource", "[", "'tenant_id'", "]", "elif", "not", "exclusive", "and", "bound", "is", "not", "None", ":", "self", ".", "_update_hosting_device_exclusivity", "(", "context", ",", "hosting_device", ",", "None", ")", "bound", "=", "None", "slot_info", ".", "num_allocated", "=", "new_allocation", "context", ".", "session", ".", "add", "(", "slot_info", ")", "self", ".", "_dispatch_pool_maintenance_job", "(", "hosting_device", "[", "'template'", "]", ")", "# report success", "LOG", ".", "info", "(", "'Allocated %(num)d additional slots in tenant %(bound)s'", "'bound hosting device %(hd_id)s. In total %(total)d '", "'slots are now allocated in that hosting device for '", "'logical resource %(r_id)s.'", ",", "{", "'num'", ":", "num", ",", "'bound'", ":", "'un-'", "if", "bound", "is", "None", "else", "bound", "+", "' '", ",", "'total'", ":", "new_allocation", ",", "'hd_id'", ":", "hosting_device", "[", "'id'", "]", ",", "'r_id'", ":", "resource", "[", "'id'", "]", "}", ")", "return", "True" ]
Assign <num> slots in <hosting_device> to logical <resource>. If exclusive is True the hosting device is bound to the resource's tenant. Otherwise it is not bound to any tenant. Returns True if allocation was granted, False otherwise.
[ "Assign", "<num", ">", "slots", "in", "<hosting_device", ">", "to", "logical", "<resource", ">", "." ]
python
train
56.079365
JasonKessler/scattertext
scattertext/termcompaction/AssociationCompactor.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/termcompaction/AssociationCompactor.py#L85-L98
def compact(self, term_doc_matrix): ''' Parameters ---------- term_doc_matrix : TermDocMatrix Term document matrix object to compact Returns ------- TermDocMatrix ''' rank_df = self.scorer.get_rank_df(term_doc_matrix) return self._prune_higher_ranked_terms(term_doc_matrix, rank_df, self.rank)
[ "def", "compact", "(", "self", ",", "term_doc_matrix", ")", ":", "rank_df", "=", "self", ".", "scorer", ".", "get_rank_df", "(", "term_doc_matrix", ")", "return", "self", ".", "_prune_higher_ranked_terms", "(", "term_doc_matrix", ",", "rank_df", ",", "self", ".", "rank", ")" ]
Parameters ---------- term_doc_matrix : TermDocMatrix Term document matrix object to compact Returns ------- TermDocMatrix
[ "Parameters", "----------", "term_doc_matrix", ":", "TermDocMatrix", "Term", "document", "matrix", "object", "to", "compact", "Returns", "-------", "TermDocMatrix" ]
python
train
26.714286
PGower/PyCanvas
pycanvas/apis/assignments.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L865-L890
def batch_retrieve_overrides_in_course(self, course_id, assignment_overrides_id, assignment_overrides_assignment_id): """ Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment_overrides[id] """Ids of overrides to retrieve""" params["assignment_overrides[id]"] = assignment_overrides_id # REQUIRED - assignment_overrides[assignment_id] """Ids of assignments for each override""" params["assignment_overrides[assignment_id]"] = assignment_overrides_assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/overrides".format(**path), data=data, params=params, all_pages=True)
[ "def", "batch_retrieve_overrides_in_course", "(", "self", ",", "course_id", ",", "assignment_overrides_id", ",", "assignment_overrides_assignment_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# REQUIRED - assignment_overrides[id]\r", "\"\"\"Ids of overrides to retrieve\"\"\"", "params", "[", "\"assignment_overrides[id]\"", "]", "=", "assignment_overrides_id", "# REQUIRED - assignment_overrides[assignment_id]\r", "\"\"\"Ids of assignments for each override\"\"\"", "params", "[", "\"assignment_overrides[assignment_id]\"", "]", "=", "assignment_overrides_assignment_id", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/courses/{course_id}/assignments/overrides\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "all_pages", "=", "True", ")" ]
Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found.
[ "Batch", "retrieve", "overrides", "in", "a", "course", ".", "Returns", "a", "list", "of", "specified", "overrides", "in", "this", "course", "providing", "they", "target", "sections", "/", "groups", "/", "students", "visible", "to", "the", "current", "user", ".", "Returns", "null", "elements", "in", "the", "list", "for", "requests", "that", "were", "not", "found", "." ]
python
train
47.961538
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1232-L1245
def _set_predictor(self, predictor): """Set predictor for continued training. It is not recommended for user to call this function. Please use init_model argument in engine.train() or engine.cv() instead. """ if predictor is self._predictor: return self if self.data is not None: self._predictor = predictor return self._free_handle() else: raise LightGBMError("Cannot set predictor after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.")
[ "def", "_set_predictor", "(", "self", ",", "predictor", ")", ":", "if", "predictor", "is", "self", ".", "_predictor", ":", "return", "self", "if", "self", ".", "data", "is", "not", "None", ":", "self", ".", "_predictor", "=", "predictor", "return", "self", ".", "_free_handle", "(", ")", "else", ":", "raise", "LightGBMError", "(", "\"Cannot set predictor after freed raw data, \"", "\"set free_raw_data=False when construct Dataset to avoid this.\"", ")" ]
Set predictor for continued training. It is not recommended for user to call this function. Please use init_model argument in engine.train() or engine.cv() instead.
[ "Set", "predictor", "for", "continued", "training", "." ]
python
train
42.5
a1ezzz/wasp-general
wasp_general/types/binarray.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/types/binarray.py#L178-L194
def split(self, bits_count): """ Split array into smaller parts. Each small array is fixed-length WBinArray (length of that array is bits_count). :param bits_count: array length :return: list of WBinArray """ result = [] array = WBinArray(self.__value, self.__size) if (len(array) % bits_count) > 0: array.resize(len(array) + (bits_count - (len(array) % bits_count))) while len(array): result.append(WBinArray(array[:bits_count], bits_count)) array = array[bits_count:] return result
[ "def", "split", "(", "self", ",", "bits_count", ")", ":", "result", "=", "[", "]", "array", "=", "WBinArray", "(", "self", ".", "__value", ",", "self", ".", "__size", ")", "if", "(", "len", "(", "array", ")", "%", "bits_count", ")", ">", "0", ":", "array", ".", "resize", "(", "len", "(", "array", ")", "+", "(", "bits_count", "-", "(", "len", "(", "array", ")", "%", "bits_count", ")", ")", ")", "while", "len", "(", "array", ")", ":", "result", ".", "append", "(", "WBinArray", "(", "array", "[", ":", "bits_count", "]", ",", "bits_count", ")", ")", "array", "=", "array", "[", "bits_count", ":", "]", "return", "result" ]
Split array into smaller parts. Each small array is fixed-length WBinArray (length of that array is bits_count). :param bits_count: array length :return: list of WBinArray
[ "Split", "array", "into", "smaller", "parts", ".", "Each", "small", "array", "is", "fixed", "-", "length", "WBinArray", "(", "length", "of", "that", "array", "is", "bits_count", ")", "." ]
python
train
29.352941
kdeldycke/maildir-deduplicate
maildir_deduplicate/deduplicate.py
https://github.com/kdeldycke/maildir-deduplicate/blob/f1c6ff25b80c6c1a4dc2dc7a65b34d808b0b7733/maildir_deduplicate/deduplicate.py#L122-L157
def check_differences(self): """ In-depth check of mail differences. Compare all mails of the duplicate set with each other, both in size and content. Raise an error if we're not within the limits imposed by the threshold setting. """ logger.info("Check that mail differences are within the limits.") if self.conf.size_threshold < 0: logger.info("Skip checking for size differences.") if self.conf.content_threshold < 0: logger.info("Skip checking for content differences.") if self.conf.size_threshold < 0 and self.conf.content_threshold < 0: return # Compute differences of mail against one another. for mail_a, mail_b in combinations(self.pool, 2): # Compare mails on size. if self.conf.size_threshold > -1: size_difference = abs(mail_a.size - mail_b.size) logger.debug("{} and {} differs by {} bytes in size.".format( mail_a, mail_b, size_difference)) if size_difference > self.conf.size_threshold: raise SizeDiffAboveThreshold # Compare mails on content. if self.conf.content_threshold > -1: content_difference = self.diff(mail_a, mail_b) logger.debug( "{} and {} differs by {} bytes in content.".format( mail_a, mail_b, content_difference)) if content_difference > self.conf.content_threshold: if self.conf.show_diff: logger.info(self.pretty_diff(mail_a, mail_b)) raise ContentDiffAboveThreshold
[ "def", "check_differences", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Check that mail differences are within the limits.\"", ")", "if", "self", ".", "conf", ".", "size_threshold", "<", "0", ":", "logger", ".", "info", "(", "\"Skip checking for size differences.\"", ")", "if", "self", ".", "conf", ".", "content_threshold", "<", "0", ":", "logger", ".", "info", "(", "\"Skip checking for content differences.\"", ")", "if", "self", ".", "conf", ".", "size_threshold", "<", "0", "and", "self", ".", "conf", ".", "content_threshold", "<", "0", ":", "return", "# Compute differences of mail against one another.", "for", "mail_a", ",", "mail_b", "in", "combinations", "(", "self", ".", "pool", ",", "2", ")", ":", "# Compare mails on size.", "if", "self", ".", "conf", ".", "size_threshold", ">", "-", "1", ":", "size_difference", "=", "abs", "(", "mail_a", ".", "size", "-", "mail_b", ".", "size", ")", "logger", ".", "debug", "(", "\"{} and {} differs by {} bytes in size.\"", ".", "format", "(", "mail_a", ",", "mail_b", ",", "size_difference", ")", ")", "if", "size_difference", ">", "self", ".", "conf", ".", "size_threshold", ":", "raise", "SizeDiffAboveThreshold", "# Compare mails on content.", "if", "self", ".", "conf", ".", "content_threshold", ">", "-", "1", ":", "content_difference", "=", "self", ".", "diff", "(", "mail_a", ",", "mail_b", ")", "logger", ".", "debug", "(", "\"{} and {} differs by {} bytes in content.\"", ".", "format", "(", "mail_a", ",", "mail_b", ",", "content_difference", ")", ")", "if", "content_difference", ">", "self", ".", "conf", ".", "content_threshold", ":", "if", "self", ".", "conf", ".", "show_diff", ":", "logger", ".", "info", "(", "self", ".", "pretty_diff", "(", "mail_a", ",", "mail_b", ")", ")", "raise", "ContentDiffAboveThreshold" ]
In-depth check of mail differences. Compare all mails of the duplicate set with each other, both in size and content. Raise an error if we're not within the limits imposed by the threshold setting.
[ "In", "-", "depth", "check", "of", "mail", "differences", "." ]
python
train
46.833333
spacetelescope/synphot_refactor
synphot/spectrum.py
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/spectrum.py#L495-L521
def avgwave(self, wavelengths=None): """Calculate the :ref:`average wavelength <synphot-formula-avgwv>`. Parameters ---------- wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None` Wavelength values for sampling. If not a Quantity, assumed to be in Angstrom. If `None`, `waveset` is used. Returns ------- avg_wave : `~astropy.units.quantity.Quantity` Average wavelength. """ x = self._validate_wavelengths(wavelengths).value y = self(x).value num = np.trapz(y * x, x=x) den = np.trapz(y, x=x) if den == 0: # pragma: no cover avg_wave = 0.0 else: avg_wave = abs(num / den) return avg_wave * self._internal_wave_unit
[ "def", "avgwave", "(", "self", ",", "wavelengths", "=", "None", ")", ":", "x", "=", "self", ".", "_validate_wavelengths", "(", "wavelengths", ")", ".", "value", "y", "=", "self", "(", "x", ")", ".", "value", "num", "=", "np", ".", "trapz", "(", "y", "*", "x", ",", "x", "=", "x", ")", "den", "=", "np", ".", "trapz", "(", "y", ",", "x", "=", "x", ")", "if", "den", "==", "0", ":", "# pragma: no cover", "avg_wave", "=", "0.0", "else", ":", "avg_wave", "=", "abs", "(", "num", "/", "den", ")", "return", "avg_wave", "*", "self", ".", "_internal_wave_unit" ]
Calculate the :ref:`average wavelength <synphot-formula-avgwv>`. Parameters ---------- wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None` Wavelength values for sampling. If not a Quantity, assumed to be in Angstrom. If `None`, `waveset` is used. Returns ------- avg_wave : `~astropy.units.quantity.Quantity` Average wavelength.
[ "Calculate", "the", ":", "ref", ":", "average", "wavelength", "<synphot", "-", "formula", "-", "avgwv", ">", "." ]
python
train
29.777778
sirfoga/pyhal
hal/ml/correlation.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/ml/correlation.py#L42-L50
def show_correlation_matrix(self, correlation_matrix): """Shows the given correlation matrix as image :param correlation_matrix: Correlation matrix of features """ cr_plot.create_correlation_matrix_plot( correlation_matrix, self.title, self.headers_to_test ) pyplot.show()
[ "def", "show_correlation_matrix", "(", "self", ",", "correlation_matrix", ")", ":", "cr_plot", ".", "create_correlation_matrix_plot", "(", "correlation_matrix", ",", "self", ".", "title", ",", "self", ".", "headers_to_test", ")", "pyplot", ".", "show", "(", ")" ]
Shows the given correlation matrix as image :param correlation_matrix: Correlation matrix of features
[ "Shows", "the", "given", "correlation", "matrix", "as", "image" ]
python
train
36.111111
textX/textX
textx/model.py
https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/model.py#L49-L66
def get_parent_of_type(typ, obj): """ Finds first object up the parent chain of the given type. If no parent of the given type exists None is returned. Args: typ(str or python class): The type of the model object we are looking for. obj (model object): Python model object which is the start of the search process. """ if type(typ) is not text: typ = typ.__name__ while hasattr(obj, 'parent'): obj = obj.parent if obj.__class__.__name__ == typ: return obj
[ "def", "get_parent_of_type", "(", "typ", ",", "obj", ")", ":", "if", "type", "(", "typ", ")", "is", "not", "text", ":", "typ", "=", "typ", ".", "__name__", "while", "hasattr", "(", "obj", ",", "'parent'", ")", ":", "obj", "=", "obj", ".", "parent", "if", "obj", ".", "__class__", ".", "__name__", "==", "typ", ":", "return", "obj" ]
Finds first object up the parent chain of the given type. If no parent of the given type exists None is returned. Args: typ(str or python class): The type of the model object we are looking for. obj (model object): Python model object which is the start of the search process.
[ "Finds", "first", "object", "up", "the", "parent", "chain", "of", "the", "given", "type", ".", "If", "no", "parent", "of", "the", "given", "type", "exists", "None", "is", "returned", "." ]
python
train
30.222222
Captricity/captools
captools/api/client.py
https://github.com/Captricity/captools/blob/e7dc069ff5ede95d4956c7a0a4614d0e53e5a955/captools/api/client.py#L258-L269
def launch_job(self, job_id): """ Convenience method for launching a job. We use POST for actions outside of HTTP verbs (job launch in this case). """ assert self.api_version.lower() in ['0.01a', '0.1'], \ 'This method is only supported in BETA (0.01) and ALPHA (0.01a) versions' try: self.create_job(job_id, {'submit_job_action': True}) except ValueError: pass return self.read_job(job_id)
[ "def", "launch_job", "(", "self", ",", "job_id", ")", ":", "assert", "self", ".", "api_version", ".", "lower", "(", ")", "in", "[", "'0.01a'", ",", "'0.1'", "]", ",", "'This method is only supported in BETA (0.01) and ALPHA (0.01a) versions'", "try", ":", "self", ".", "create_job", "(", "job_id", ",", "{", "'submit_job_action'", ":", "True", "}", ")", "except", "ValueError", ":", "pass", "return", "self", ".", "read_job", "(", "job_id", ")" ]
Convenience method for launching a job. We use POST for actions outside of HTTP verbs (job launch in this case).
[ "Convenience", "method", "for", "launching", "a", "job", ".", "We", "use", "POST", "for", "actions", "outside", "of", "HTTP", "verbs", "(", "job", "launch", "in", "this", "case", ")", "." ]
python
train
40
haikuginger/beekeeper
beekeeper/api.py
https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/api.py#L208-L219
def printed_out(self, name): """ Create a string representation of the action """ opt = self.variables().optional_namestring() req = self.variables().required_namestring() out = '' out += '| |\n' out += '| |---{}({}{})\n'.format(name, req, opt) if self.description: out += '| | {}\n'.format(self.description) return out
[ "def", "printed_out", "(", "self", ",", "name", ")", ":", "opt", "=", "self", ".", "variables", "(", ")", ".", "optional_namestring", "(", ")", "req", "=", "self", ".", "variables", "(", ")", ".", "required_namestring", "(", ")", "out", "=", "''", "out", "+=", "'| |\\n'", "out", "+=", "'| |---{}({}{})\\n'", ".", "format", "(", "name", ",", "req", ",", "opt", ")", "if", "self", ".", "description", ":", "out", "+=", "'| | {}\\n'", ".", "format", "(", "self", ".", "description", ")", "return", "out" ]
Create a string representation of the action
[ "Create", "a", "string", "representation", "of", "the", "action" ]
python
train
34.333333
bitcraze/crazyflie-lib-python
cflib/crazyflie/localization.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/localization.py#L113-L122
def send_short_lpp_packet(self, dest_id, data): """ Send ultra-wide-band LPP packet to dest_id """ pk = CRTPPacket() pk.port = CRTPPort.LOCALIZATION pk.channel = self.GENERIC_CH pk.data = struct.pack('<BB', self.LPS_SHORT_LPP_PACKET, dest_id) + data self._cf.send_packet(pk)
[ "def", "send_short_lpp_packet", "(", "self", ",", "dest_id", ",", "data", ")", ":", "pk", "=", "CRTPPacket", "(", ")", "pk", ".", "port", "=", "CRTPPort", ".", "LOCALIZATION", "pk", ".", "channel", "=", "self", ".", "GENERIC_CH", "pk", ".", "data", "=", "struct", ".", "pack", "(", "'<BB'", ",", "self", ".", "LPS_SHORT_LPP_PACKET", ",", "dest_id", ")", "+", "data", "self", ".", "_cf", ".", "send_packet", "(", "pk", ")" ]
Send ultra-wide-band LPP packet to dest_id
[ "Send", "ultra", "-", "wide", "-", "band", "LPP", "packet", "to", "dest_id" ]
python
train
33
openstack/networking-cisco
networking_cisco/apps/saf/server/dfa_server.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L809-L849
def network_delete_event(self, network_info): """Process network delete event.""" net_id = network_info['network_id'] if net_id not in self.network: LOG.error('network_delete_event: net_id %s does not exist.', net_id) return segid = self.network[net_id].get('segmentation_id') tenant_id = self.network[net_id].get('tenant_id') tenant_name = self.get_project_name(tenant_id) net = utils.Dict2Obj(self.network[net_id]) if not tenant_name: LOG.error('Project %(tenant_id)s does not exist.', {'tenant_id': tenant_id}) self.update_network_db(net.id, constants.DELETE_FAIL) return try: self.dcnm_client.delete_network(tenant_name, net) # Put back the segmentation id into the pool. self.seg_drvr.release_segmentation_id(segid) # Remove entry from database and cache. self.delete_network_db(net_id) del self.network[net_id] snets = [k for k in self.subnet if ( self.subnet[k].get('network_id') == net_id)] [self.subnet.pop(s) for s in snets] except dexc.DfaClientRequestFailed: LOG.error('Failed to create network %(net)s.', {'net': net.name}) self.update_network_db(net_id, constants.DELETE_FAIL) # deleting all related VMs instances = self.get_vms() instances_related = [k for k in instances if k.network_id == net_id] for vm in instances_related: LOG.debug("deleting vm %s because network is deleted", vm.name) self.delete_vm_function(vm.port_id, vm) self.network_del_notif(tenant_id, tenant_name, net_id)
[ "def", "network_delete_event", "(", "self", ",", "network_info", ")", ":", "net_id", "=", "network_info", "[", "'network_id'", "]", "if", "net_id", "not", "in", "self", ".", "network", ":", "LOG", ".", "error", "(", "'network_delete_event: net_id %s does not exist.'", ",", "net_id", ")", "return", "segid", "=", "self", ".", "network", "[", "net_id", "]", ".", "get", "(", "'segmentation_id'", ")", "tenant_id", "=", "self", ".", "network", "[", "net_id", "]", ".", "get", "(", "'tenant_id'", ")", "tenant_name", "=", "self", ".", "get_project_name", "(", "tenant_id", ")", "net", "=", "utils", ".", "Dict2Obj", "(", "self", ".", "network", "[", "net_id", "]", ")", "if", "not", "tenant_name", ":", "LOG", ".", "error", "(", "'Project %(tenant_id)s does not exist.'", ",", "{", "'tenant_id'", ":", "tenant_id", "}", ")", "self", ".", "update_network_db", "(", "net", ".", "id", ",", "constants", ".", "DELETE_FAIL", ")", "return", "try", ":", "self", ".", "dcnm_client", ".", "delete_network", "(", "tenant_name", ",", "net", ")", "# Put back the segmentation id into the pool.", "self", ".", "seg_drvr", ".", "release_segmentation_id", "(", "segid", ")", "# Remove entry from database and cache.", "self", ".", "delete_network_db", "(", "net_id", ")", "del", "self", ".", "network", "[", "net_id", "]", "snets", "=", "[", "k", "for", "k", "in", "self", ".", "subnet", "if", "(", "self", ".", "subnet", "[", "k", "]", ".", "get", "(", "'network_id'", ")", "==", "net_id", ")", "]", "[", "self", ".", "subnet", ".", "pop", "(", "s", ")", "for", "s", "in", "snets", "]", "except", "dexc", ".", "DfaClientRequestFailed", ":", "LOG", ".", "error", "(", "'Failed to create network %(net)s.'", ",", "{", "'net'", ":", "net", ".", "name", "}", ")", "self", ".", "update_network_db", "(", "net_id", ",", "constants", ".", "DELETE_FAIL", ")", "# deleting all related VMs", "instances", "=", "self", ".", "get_vms", "(", ")", "instances_related", "=", "[", "k", "for", "k", "in", "instances", "if", "k", ".", "network_id", "==", "net_id", "]", "for", "vm", "in", "instances_related", ":", "LOG", ".", "debug", "(", "\"deleting vm %s because network is deleted\"", ",", "vm", ".", "name", ")", "self", ".", "delete_vm_function", "(", "vm", ".", "port_id", ",", "vm", ")", "self", ".", "network_del_notif", "(", "tenant_id", ",", "tenant_name", ",", "net_id", ")" ]
Process network delete event.
[ "Process", "network", "delete", "event", "." ]
python
train
43.195122
rossdylan/sham
sham/storage/pools.py
https://github.com/rossdylan/sham/blob/d938ae3da43814c3c45ae95b6116bd87282c8691/sham/storage/pools.py#L21-L54
def create_backed_vol(self, name, backer, _format='qcow2'): """ TODO(rdelinger) think about changing _format This is a pretty specialized function. It takes an existing volume, and creates a new volume that is backed by the existing volume Sadly there is no easy way to do this in libvirt, the best way I've found is to just create some xml and use the createXML function """ vol_xml = ElementTree.Element('volume') vol_name = ElementTree.SubElement(vol_xml, 'name') name = '{0}.{1}'.format(name, _format) vol_name.text = name target = ElementTree.SubElement(vol_xml, 'target') target_format = ElementTree.SubElement(target, 'format') target_format.set('type', _format) vol_cap = ElementTree.SubElement(vol_xml, 'capacity') vol_cap.set('unit', 'bytes') # @TODO(rdelinger) this should be dynamic vol_cap.text = backer.capacity backing_store = ElementTree.SubElement(vol_xml, 'backingStore') bs_path = ElementTree.SubElement(backing_store, 'path') bs_path.text = backer.path bs_format = ElementTree.SubElement(backing_store, 'format') bs_format.set('type', backer.format) XMLString = ElementTree.tostring(vol_xml) self.virsp.createXML(XMLString, 0) return self.find_volume(name)
[ "def", "create_backed_vol", "(", "self", ",", "name", ",", "backer", ",", "_format", "=", "'qcow2'", ")", ":", "vol_xml", "=", "ElementTree", ".", "Element", "(", "'volume'", ")", "vol_name", "=", "ElementTree", ".", "SubElement", "(", "vol_xml", ",", "'name'", ")", "name", "=", "'{0}.{1}'", ".", "format", "(", "name", ",", "_format", ")", "vol_name", ".", "text", "=", "name", "target", "=", "ElementTree", ".", "SubElement", "(", "vol_xml", ",", "'target'", ")", "target_format", "=", "ElementTree", ".", "SubElement", "(", "target", ",", "'format'", ")", "target_format", ".", "set", "(", "'type'", ",", "_format", ")", "vol_cap", "=", "ElementTree", ".", "SubElement", "(", "vol_xml", ",", "'capacity'", ")", "vol_cap", ".", "set", "(", "'unit'", ",", "'bytes'", ")", "# @TODO(rdelinger) this should be dynamic", "vol_cap", ".", "text", "=", "backer", ".", "capacity", "backing_store", "=", "ElementTree", ".", "SubElement", "(", "vol_xml", ",", "'backingStore'", ")", "bs_path", "=", "ElementTree", ".", "SubElement", "(", "backing_store", ",", "'path'", ")", "bs_path", ".", "text", "=", "backer", ".", "path", "bs_format", "=", "ElementTree", ".", "SubElement", "(", "backing_store", ",", "'format'", ")", "bs_format", ".", "set", "(", "'type'", ",", "backer", ".", "format", ")", "XMLString", "=", "ElementTree", ".", "tostring", "(", "vol_xml", ")", "self", ".", "virsp", ".", "createXML", "(", "XMLString", ",", "0", ")", "return", "self", ".", "find_volume", "(", "name", ")" ]
TODO(rdelinger) think about changing _format This is a pretty specialized function. It takes an existing volume, and creates a new volume that is backed by the existing volume Sadly there is no easy way to do this in libvirt, the best way I've found is to just create some xml and use the createXML function
[ "TODO", "(", "rdelinger", ")", "think", "about", "changing", "_format", "This", "is", "a", "pretty", "specialized", "function", ".", "It", "takes", "an", "existing", "volume", "and", "creates", "a", "new", "volume", "that", "is", "backed", "by", "the", "existing", "volume", "Sadly", "there", "is", "no", "easy", "way", "to", "do", "this", "in", "libvirt", "the", "best", "way", "I", "ve", "found", "is", "to", "just", "create", "some", "xml", "and", "use", "the", "createXML", "function" ]
python
train
40.176471
ellmetha/django-machina
machina/apps/forum_conversation/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/views.py#L97-L116
def get_context_data(self, **kwargs): """ Returns the context data to provide to the template. """ context = super().get_context_data(**kwargs) # Insert the considered topic and the associated forum into the context topic = self.get_topic() context['topic'] = topic context['forum'] = topic.forum # Handles the case when a poll is associated to the topic try: if hasattr(topic, 'poll') and topic.poll.options.exists(): context['poll'] = topic.poll context['poll_form'] = self.poll_form_class(poll=topic.poll) context['view_results_action'] = self.request.GET.get('view_results', None) context['change_vote_action'] = self.request.GET.get('change_vote', None) except ObjectDoesNotExist: # pragma: no cover pass return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "# Insert the considered topic and the associated forum into the context", "topic", "=", "self", ".", "get_topic", "(", ")", "context", "[", "'topic'", "]", "=", "topic", "context", "[", "'forum'", "]", "=", "topic", ".", "forum", "# Handles the case when a poll is associated to the topic", "try", ":", "if", "hasattr", "(", "topic", ",", "'poll'", ")", "and", "topic", ".", "poll", ".", "options", ".", "exists", "(", ")", ":", "context", "[", "'poll'", "]", "=", "topic", ".", "poll", "context", "[", "'poll_form'", "]", "=", "self", ".", "poll_form_class", "(", "poll", "=", "topic", ".", "poll", ")", "context", "[", "'view_results_action'", "]", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "'view_results'", ",", "None", ")", "context", "[", "'change_vote_action'", "]", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "'change_vote'", ",", "None", ")", "except", "ObjectDoesNotExist", ":", "# pragma: no cover", "pass", "return", "context" ]
Returns the context data to provide to the template.
[ "Returns", "the", "context", "data", "to", "provide", "to", "the", "template", "." ]
python
train
43.85
bokeh/bokeh
bokeh/util/paths.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/paths.py#L44-L50
def serverdir(): """ Get the location of the server subpackage """ path = join(ROOT_DIR, 'server') path = normpath(path) if sys.platform == 'cygwin': path = realpath(path) return path
[ "def", "serverdir", "(", ")", ":", "path", "=", "join", "(", "ROOT_DIR", ",", "'server'", ")", "path", "=", "normpath", "(", "path", ")", "if", "sys", ".", "platform", "==", "'cygwin'", ":", "path", "=", "realpath", "(", "path", ")", "return", "path" ]
Get the location of the server subpackage
[ "Get", "the", "location", "of", "the", "server", "subpackage" ]
python
train
28.714286
neuropsychology/NeuroKit.py
neurokit/signal/events.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/signal/events.py#L19-L65
def binarize_signal(signal, treshold="auto", cut="higher"): """ Binarize a channel based on a continuous channel. Parameters ---------- signal = array or list The signal channel. treshold = float The treshold value by which to select the events. If "auto", takes the value between the max and the min. cut = str "higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower". Returns ---------- list binary_signal Example ---------- >>> import neurokit as nk >>> binary_signal = nk.binarize_signal(signal, treshold=4) Authors ---------- - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ Dependencies ---------- None """ if treshold == "auto": treshold = (np.max(np.array(signal)) - np.min(np.array(signal)))/2 signal = list(signal) binary_signal = [] for i in range(len(signal)): if cut == "higher": if signal[i] > treshold: binary_signal.append(1) else: binary_signal.append(0) else: if signal[i] < treshold: binary_signal.append(1) else: binary_signal.append(0) return(binary_signal)
[ "def", "binarize_signal", "(", "signal", ",", "treshold", "=", "\"auto\"", ",", "cut", "=", "\"higher\"", ")", ":", "if", "treshold", "==", "\"auto\"", ":", "treshold", "=", "(", "np", ".", "max", "(", "np", ".", "array", "(", "signal", ")", ")", "-", "np", ".", "min", "(", "np", ".", "array", "(", "signal", ")", ")", ")", "/", "2", "signal", "=", "list", "(", "signal", ")", "binary_signal", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "signal", ")", ")", ":", "if", "cut", "==", "\"higher\"", ":", "if", "signal", "[", "i", "]", ">", "treshold", ":", "binary_signal", ".", "append", "(", "1", ")", "else", ":", "binary_signal", ".", "append", "(", "0", ")", "else", ":", "if", "signal", "[", "i", "]", "<", "treshold", ":", "binary_signal", ".", "append", "(", "1", ")", "else", ":", "binary_signal", ".", "append", "(", "0", ")", "return", "(", "binary_signal", ")" ]
Binarize a channel based on a continuous channel. Parameters ---------- signal = array or list The signal channel. treshold = float The treshold value by which to select the events. If "auto", takes the value between the max and the min. cut = str "higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower". Returns ---------- list binary_signal Example ---------- >>> import neurokit as nk >>> binary_signal = nk.binarize_signal(signal, treshold=4) Authors ---------- - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ Dependencies ---------- None
[ "Binarize", "a", "channel", "based", "on", "a", "continuous", "channel", "." ]
python
train
30.744681
nikdon/pyEntropy
pyentrp/entropy.py
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L212-L278
def permutation_entropy(time_series, order=3, delay=1, normalize=False): """Permutation Entropy. Parameters ---------- time_series : list or np.array Time series order : int Order of permutation entropy delay : int Time delay normalize : bool If True, divide by log2(factorial(m)) to normalize the entropy between 0 and 1. Otherwise, return the permutation entropy in bit. Returns ------- pe : float Permutation Entropy References ---------- .. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main Biomedical and Econophysics Applications: A Review. http://www.mdpi.com/1099-4300/14/8/1553/pdf .. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural complexity measure for time series. http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf Notes ----- Last updated (Oct 2018) by Raphael Vallat ([email protected]): - Major speed improvements - Use of base 2 instead of base e - Added normalization Examples -------- 1. Permutation entropy with order 2 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value between 0 and log2(factorial(order)) >>> print(permutation_entropy(x, order=2)) 0.918 2. Normalized permutation entropy with order 3 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value comprised between 0 and 1. >>> print(permutation_entropy(x, order=3, normalize=True)) 0.589 """ x = np.array(time_series) hashmult = np.power(order, np.arange(order)) # Embed x and sort the order of permutations sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort') # Associate unique integer to each permutations hashval = (np.multiply(sorted_idx, hashmult)).sum(1) # Return the counts _, c = np.unique(hashval, return_counts=True) # Use np.true_divide for Python 2 compatibility p = np.true_divide(c, c.sum()) pe = -np.multiply(p, np.log2(p)).sum() if normalize: pe /= np.log2(factorial(order)) return pe
[ "def", "permutation_entropy", "(", "time_series", ",", "order", "=", "3", ",", "delay", "=", "1", ",", "normalize", "=", "False", ")", ":", "x", "=", "np", ".", "array", "(", "time_series", ")", "hashmult", "=", "np", ".", "power", "(", "order", ",", "np", ".", "arange", "(", "order", ")", ")", "# Embed x and sort the order of permutations", "sorted_idx", "=", "_embed", "(", "x", ",", "order", "=", "order", ",", "delay", "=", "delay", ")", ".", "argsort", "(", "kind", "=", "'quicksort'", ")", "# Associate unique integer to each permutations", "hashval", "=", "(", "np", ".", "multiply", "(", "sorted_idx", ",", "hashmult", ")", ")", ".", "sum", "(", "1", ")", "# Return the counts", "_", ",", "c", "=", "np", ".", "unique", "(", "hashval", ",", "return_counts", "=", "True", ")", "# Use np.true_divide for Python 2 compatibility", "p", "=", "np", ".", "true_divide", "(", "c", ",", "c", ".", "sum", "(", ")", ")", "pe", "=", "-", "np", ".", "multiply", "(", "p", ",", "np", ".", "log2", "(", "p", ")", ")", ".", "sum", "(", ")", "if", "normalize", ":", "pe", "/=", "np", ".", "log2", "(", "factorial", "(", "order", ")", ")", "return", "pe" ]
Permutation Entropy. Parameters ---------- time_series : list or np.array Time series order : int Order of permutation entropy delay : int Time delay normalize : bool If True, divide by log2(factorial(m)) to normalize the entropy between 0 and 1. Otherwise, return the permutation entropy in bit. Returns ------- pe : float Permutation Entropy References ---------- .. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main Biomedical and Econophysics Applications: A Review. http://www.mdpi.com/1099-4300/14/8/1553/pdf .. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural complexity measure for time series. http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf Notes ----- Last updated (Oct 2018) by Raphael Vallat ([email protected]): - Major speed improvements - Use of base 2 instead of base e - Added normalization Examples -------- 1. Permutation entropy with order 2 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value between 0 and log2(factorial(order)) >>> print(permutation_entropy(x, order=2)) 0.918 2. Normalized permutation entropy with order 3 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value comprised between 0 and 1. >>> print(permutation_entropy(x, order=3, normalize=True)) 0.589
[ "Permutation", "Entropy", "." ]
python
train
31.641791
tensorflow/probability
tensorflow_probability/python/internal/nest_util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L76-L79
def expand_as_args(args): """Returns `True` if `args` should be expanded as `*args`.""" return (isinstance(args, collections.Sequence) and not _is_namedtuple(args) and not _force_leaf(args))
[ "def", "expand_as_args", "(", "args", ")", ":", "return", "(", "isinstance", "(", "args", ",", "collections", ".", "Sequence", ")", "and", "not", "_is_namedtuple", "(", "args", ")", "and", "not", "_force_leaf", "(", "args", ")", ")" ]
Returns `True` if `args` should be expanded as `*args`.
[ "Returns", "True", "if", "args", "should", "be", "expanded", "as", "*", "args", "." ]
python
test
50.25
bugra/angel-list
angel/angel.py
https://github.com/bugra/angel-list/blob/75ac453e873727675ba18e1f45b5bc0cfda26fd7/angel/angel.py#L375-L381
def get_startup(self, id_): """ Get startup based on id """ return _get_request(_STARTUP.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
[ "def", "get_startup", "(", "self", ",", "id_", ")", ":", "return", "_get_request", "(", "_STARTUP", ".", "format", "(", "c_api", "=", "_C_API_BEGINNING", ",", "api", "=", "_API_VERSION", ",", "id_", "=", "id_", ",", "at", "=", "self", ".", "access_token", ")", ")" ]
Get startup based on id
[ "Get", "startup", "based", "on", "id" ]
python
train
42.142857
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L2626-L2635
def set_ifo(self,ifo): """ Set the ifo name to analyze. If the channel name for the job is defined, then the name of the ifo is prepended to the channel name obtained from the job configuration file and passed with a --channel-name option. @param ifo: two letter ifo code (e.g. L1, H1 or H2). """ self.__ifo = ifo if self.job().channel(): self.add_var_opt('channel-name', ifo + ':' + self.job().channel())
[ "def", "set_ifo", "(", "self", ",", "ifo", ")", ":", "self", ".", "__ifo", "=", "ifo", "if", "self", ".", "job", "(", ")", ".", "channel", "(", ")", ":", "self", ".", "add_var_opt", "(", "'channel-name'", ",", "ifo", "+", "':'", "+", "self", ".", "job", "(", ")", ".", "channel", "(", ")", ")" ]
Set the ifo name to analyze. If the channel name for the job is defined, then the name of the ifo is prepended to the channel name obtained from the job configuration file and passed with a --channel-name option. @param ifo: two letter ifo code (e.g. L1, H1 or H2).
[ "Set", "the", "ifo", "name", "to", "analyze", ".", "If", "the", "channel", "name", "for", "the", "job", "is", "defined", "then", "the", "name", "of", "the", "ifo", "is", "prepended", "to", "the", "channel", "name", "obtained", "from", "the", "job", "configuration", "file", "and", "passed", "with", "a", "--", "channel", "-", "name", "option", "." ]
python
train
43.4
ask/redish
redish/client.py
https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/client.py#L135-L142
def rename(self, old_name, new_name): """Rename key to a new name.""" try: self.api.rename(mkey(old_name), mkey(new_name)) except ResponseError, exc: if "no such key" in exc.args: raise KeyError(old_name) raise
[ "def", "rename", "(", "self", ",", "old_name", ",", "new_name", ")", ":", "try", ":", "self", ".", "api", ".", "rename", "(", "mkey", "(", "old_name", ")", ",", "mkey", "(", "new_name", ")", ")", "except", "ResponseError", ",", "exc", ":", "if", "\"no such key\"", "in", "exc", ".", "args", ":", "raise", "KeyError", "(", "old_name", ")", "raise" ]
Rename key to a new name.
[ "Rename", "key", "to", "a", "new", "name", "." ]
python
train
34.875
tanghaibao/goatools
goatools/cli/wr_sections.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/wr_sections.py#L66-L93
def cli(self, prt=sys.stdout): """Command-line interface for go_draw script.""" kws = self.objdoc.get_docargs(prt=None) godag = get_godag(kws['obo'], prt=None, loading_bar=False, optional_attrs=['relationship']) usrgos = GetGOs(godag, max_gos=200).get_usrgos(kws.get('GO_FILE'), prt) tcntobj = self._get_tcntobj(usrgos, godag, **kws) # Gets TermCounts or None self.gosubdag = GoSubDag(usrgos, godag, relationships=True, tcntobj=tcntobj, prt=None) grprdflt = GrouperDflts(self.gosubdag, kws['slims']) ver_list = [godag.version, grprdflt.ver_goslims] prt.write("{VER}\n".format(VER="\n".join(ver_list))) sections = self._read_sections(kws['ifile']) # print("SECSECSEC", sections) hdrobj = HdrgosSections(self.gosubdag, grprdflt.hdrgos_dflt, sections) grprobj = Grouper("init", usrgos, hdrobj, self.gosubdag) # Write sections objsecwr = WrSectionsTxt(grprobj, ver_list) if not os.path.exists(kws['ifile']): objsecwr.wr_txt_section_hdrgos(kws['ifile']) objsecwr.wr_txt_section_hdrgos(kws['ofile']) objsecpy = WrSectionsPy(grprobj, ver_list) if 'py' in kws: objsecpy.wr_py_sections(kws['py'], sections, doc=godag.version) # Write user GO IDs in sections sortobj = Sorter(grprobj) objgowr = WrXlsxSortedGos("init", sortobj, ver_list) objgowr.wr_txt_gos(kws['txt'], sortby=objsecpy.fncsortnt) #objwr.wr_txt_section_hdrgos(kws['ofile'], sortby=objwr.fncsortnt) self._prt_cnt_usrgos(usrgos, sys.stdout)
[ "def", "cli", "(", "self", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "kws", "=", "self", ".", "objdoc", ".", "get_docargs", "(", "prt", "=", "None", ")", "godag", "=", "get_godag", "(", "kws", "[", "'obo'", "]", ",", "prt", "=", "None", ",", "loading_bar", "=", "False", ",", "optional_attrs", "=", "[", "'relationship'", "]", ")", "usrgos", "=", "GetGOs", "(", "godag", ",", "max_gos", "=", "200", ")", ".", "get_usrgos", "(", "kws", ".", "get", "(", "'GO_FILE'", ")", ",", "prt", ")", "tcntobj", "=", "self", ".", "_get_tcntobj", "(", "usrgos", ",", "godag", ",", "*", "*", "kws", ")", "# Gets TermCounts or None", "self", ".", "gosubdag", "=", "GoSubDag", "(", "usrgos", ",", "godag", ",", "relationships", "=", "True", ",", "tcntobj", "=", "tcntobj", ",", "prt", "=", "None", ")", "grprdflt", "=", "GrouperDflts", "(", "self", ".", "gosubdag", ",", "kws", "[", "'slims'", "]", ")", "ver_list", "=", "[", "godag", ".", "version", ",", "grprdflt", ".", "ver_goslims", "]", "prt", ".", "write", "(", "\"{VER}\\n\"", ".", "format", "(", "VER", "=", "\"\\n\"", ".", "join", "(", "ver_list", ")", ")", ")", "sections", "=", "self", ".", "_read_sections", "(", "kws", "[", "'ifile'", "]", ")", "# print(\"SECSECSEC\", sections)", "hdrobj", "=", "HdrgosSections", "(", "self", ".", "gosubdag", ",", "grprdflt", ".", "hdrgos_dflt", ",", "sections", ")", "grprobj", "=", "Grouper", "(", "\"init\"", ",", "usrgos", ",", "hdrobj", ",", "self", ".", "gosubdag", ")", "# Write sections", "objsecwr", "=", "WrSectionsTxt", "(", "grprobj", ",", "ver_list", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "kws", "[", "'ifile'", "]", ")", ":", "objsecwr", ".", "wr_txt_section_hdrgos", "(", "kws", "[", "'ifile'", "]", ")", "objsecwr", ".", "wr_txt_section_hdrgos", "(", "kws", "[", "'ofile'", "]", ")", "objsecpy", "=", "WrSectionsPy", "(", "grprobj", ",", "ver_list", ")", "if", "'py'", "in", "kws", ":", "objsecpy", ".", "wr_py_sections", "(", "kws", "[", "'py'", "]", ",", "sections", ",", "doc", "=", "godag", ".", "version", ")", "# Write user GO IDs in sections", "sortobj", "=", "Sorter", "(", "grprobj", ")", "objgowr", "=", "WrXlsxSortedGos", "(", "\"init\"", ",", "sortobj", ",", "ver_list", ")", "objgowr", ".", "wr_txt_gos", "(", "kws", "[", "'txt'", "]", ",", "sortby", "=", "objsecpy", ".", "fncsortnt", ")", "#objwr.wr_txt_section_hdrgos(kws['ofile'], sortby=objwr.fncsortnt)", "self", ".", "_prt_cnt_usrgos", "(", "usrgos", ",", "sys", ".", "stdout", ")" ]
Command-line interface for go_draw script.
[ "Command", "-", "line", "interface", "for", "go_draw", "script", "." ]
python
train
56.821429
CloudGenix/sdk-python
cloudgenix/interactive.py
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/interactive.py#L706-L734
def quick_str_input(prompt, default_value): """ Function to display a quick question for text input. **Parameters:** - **prompt:** Text / question to display - **default_value:** Default value for no entry **Returns:** text_type() or default_value. """ valid = False str_val = default_value while not valid: input_val = raw_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": str_val = default_value valid = True else: try: str_val = text_type(input_val) valid = True except ValueError: print("ERROR: must be text.") valid = False return str_val
[ "def", "quick_str_input", "(", "prompt", ",", "default_value", ")", ":", "valid", "=", "False", "str_val", "=", "default_value", "while", "not", "valid", ":", "input_val", "=", "raw_input", "(", "prompt", "+", "\"[{0}]: \"", ".", "format", "(", "default_value", ")", ")", "if", "input_val", "==", "\"\"", ":", "str_val", "=", "default_value", "valid", "=", "True", "else", ":", "try", ":", "str_val", "=", "text_type", "(", "input_val", ")", "valid", "=", "True", "except", "ValueError", ":", "print", "(", "\"ERROR: must be text.\"", ")", "valid", "=", "False", "return", "str_val" ]
Function to display a quick question for text input. **Parameters:** - **prompt:** Text / question to display - **default_value:** Default value for no entry **Returns:** text_type() or default_value.
[ "Function", "to", "display", "a", "quick", "question", "for", "text", "input", "." ]
python
train
27.965517
coursera-dl/coursera-dl
coursera/coursera_dl.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/coursera_dl.py#L217-L283
def main(): """ Main entry point for execution as a program (instead of as a module). """ args = parse_args() logging.info('coursera_dl version %s', __version__) completed_classes = [] classes_with_errors = [] mkdir_p(PATH_CACHE, 0o700) if args.clear_cache: shutil.rmtree(PATH_CACHE) if args.list_courses: logging.info('Listing enrolled courses') list_courses(args) return session = get_session() login(session, args.username, args.password) if args.specialization: args.class_names = expand_specializations(session, args.class_names) for class_index, class_name in enumerate(args.class_names): try: logging.info('Downloading class: %s (%d / %d)', class_name, class_index + 1, len(args.class_names)) error_occurred, completed = download_class( session, args, class_name) if completed: completed_classes.append(class_name) if error_occurred: classes_with_errors.append(class_name) except requests.exceptions.HTTPError as e: logging.error('HTTPError %s', e) if is_debug_run(): logging.exception('HTTPError %s', e) except requests.exceptions.SSLError as e: logging.error('SSLError %s', e) print_ssl_error_message(e) if is_debug_run(): raise except ClassNotFound as e: logging.error('Could not find class: %s', e) except AuthenticationFailed as e: logging.error('Could not authenticate: %s', e) if class_index + 1 != len(args.class_names): logging.info('Sleeping for %d seconds before downloading next course. ' 'You can change this with --download-delay option.', args.download_delay) time.sleep(args.download_delay) if completed_classes: logging.info('-' * 80) logging.info( "Classes which appear completed: " + " ".join(completed_classes)) if classes_with_errors: logging.info('-' * 80) logging.info('The following classes had errors during the syllabus' ' parsing stage. You may want to review error messages and' ' courses (sometimes enrolling to the course or switching' ' session helps):') for class_name in classes_with_errors: logging.info('%s (https://www.coursera.org/learn/%s)', class_name, class_name)
[ "def", "main", "(", ")", ":", "args", "=", "parse_args", "(", ")", "logging", ".", "info", "(", "'coursera_dl version %s'", ",", "__version__", ")", "completed_classes", "=", "[", "]", "classes_with_errors", "=", "[", "]", "mkdir_p", "(", "PATH_CACHE", ",", "0o700", ")", "if", "args", ".", "clear_cache", ":", "shutil", ".", "rmtree", "(", "PATH_CACHE", ")", "if", "args", ".", "list_courses", ":", "logging", ".", "info", "(", "'Listing enrolled courses'", ")", "list_courses", "(", "args", ")", "return", "session", "=", "get_session", "(", ")", "login", "(", "session", ",", "args", ".", "username", ",", "args", ".", "password", ")", "if", "args", ".", "specialization", ":", "args", ".", "class_names", "=", "expand_specializations", "(", "session", ",", "args", ".", "class_names", ")", "for", "class_index", ",", "class_name", "in", "enumerate", "(", "args", ".", "class_names", ")", ":", "try", ":", "logging", ".", "info", "(", "'Downloading class: %s (%d / %d)'", ",", "class_name", ",", "class_index", "+", "1", ",", "len", "(", "args", ".", "class_names", ")", ")", "error_occurred", ",", "completed", "=", "download_class", "(", "session", ",", "args", ",", "class_name", ")", "if", "completed", ":", "completed_classes", ".", "append", "(", "class_name", ")", "if", "error_occurred", ":", "classes_with_errors", ".", "append", "(", "class_name", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "logging", ".", "error", "(", "'HTTPError %s'", ",", "e", ")", "if", "is_debug_run", "(", ")", ":", "logging", ".", "exception", "(", "'HTTPError %s'", ",", "e", ")", "except", "requests", ".", "exceptions", ".", "SSLError", "as", "e", ":", "logging", ".", "error", "(", "'SSLError %s'", ",", "e", ")", "print_ssl_error_message", "(", "e", ")", "if", "is_debug_run", "(", ")", ":", "raise", "except", "ClassNotFound", "as", "e", ":", "logging", ".", "error", "(", "'Could not find class: %s'", ",", "e", ")", "except", "AuthenticationFailed", "as", "e", ":", "logging", ".", "error", "(", "'Could not authenticate: %s'", ",", "e", ")", "if", "class_index", "+", "1", "!=", "len", "(", "args", ".", "class_names", ")", ":", "logging", ".", "info", "(", "'Sleeping for %d seconds before downloading next course. '", "'You can change this with --download-delay option.'", ",", "args", ".", "download_delay", ")", "time", ".", "sleep", "(", "args", ".", "download_delay", ")", "if", "completed_classes", ":", "logging", ".", "info", "(", "'-'", "*", "80", ")", "logging", ".", "info", "(", "\"Classes which appear completed: \"", "+", "\" \"", ".", "join", "(", "completed_classes", ")", ")", "if", "classes_with_errors", ":", "logging", ".", "info", "(", "'-'", "*", "80", ")", "logging", ".", "info", "(", "'The following classes had errors during the syllabus'", "' parsing stage. You may want to review error messages and'", "' courses (sometimes enrolling to the course or switching'", "' session helps):'", ")", "for", "class_name", "in", "classes_with_errors", ":", "logging", ".", "info", "(", "'%s (https://www.coursera.org/learn/%s)'", ",", "class_name", ",", "class_name", ")" ]
Main entry point for execution as a program (instead of as a module).
[ "Main", "entry", "point", "for", "execution", "as", "a", "program", "(", "instead", "of", "as", "a", "module", ")", "." ]
python
train
38.298507
lk-geimfari/mimesis
mimesis/builtins/nl.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/builtins/nl.py#L23-L49
def bsn(self) -> str: """Generate a random, but valid ``Burgerservicenummer``. :returns: Random BSN. :Example: 255159705 """ def _is_valid_bsn(number: str) -> bool: total = 0 multiplier = 9 for char in number: multiplier = -multiplier if multiplier == 1 else multiplier total += int(char) * multiplier multiplier -= 1 result = total % 11 == 0 return result a, b = (100000000, 999999999) sample = str(self.random.randint(a, b)) while not _is_valid_bsn(sample): sample = str(self.random.randint(a, b)) return sample
[ "def", "bsn", "(", "self", ")", "->", "str", ":", "def", "_is_valid_bsn", "(", "number", ":", "str", ")", "->", "bool", ":", "total", "=", "0", "multiplier", "=", "9", "for", "char", "in", "number", ":", "multiplier", "=", "-", "multiplier", "if", "multiplier", "==", "1", "else", "multiplier", "total", "+=", "int", "(", "char", ")", "*", "multiplier", "multiplier", "-=", "1", "result", "=", "total", "%", "11", "==", "0", "return", "result", "a", ",", "b", "=", "(", "100000000", ",", "999999999", ")", "sample", "=", "str", "(", "self", ".", "random", ".", "randint", "(", "a", ",", "b", ")", ")", "while", "not", "_is_valid_bsn", "(", "sample", ")", ":", "sample", "=", "str", "(", "self", ".", "random", ".", "randint", "(", "a", ",", "b", ")", ")", "return", "sample" ]
Generate a random, but valid ``Burgerservicenummer``. :returns: Random BSN. :Example: 255159705
[ "Generate", "a", "random", "but", "valid", "Burgerservicenummer", "." ]
python
train
25.851852
ConsenSys/mythril-classic
mythril/ethereum/interface/leveldb/client.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/ethereum/interface/leveldb/client.py#L273-L284
def eth_getBlockByNumber(self, number): """Get block body by block number. :param number: :return: """ block_hash = self.reader._get_block_hash(number) block_number = _format_block_number(number) body_key = body_prefix + block_number + block_hash block_data = self.db.get(body_key) body = rlp.decode(block_data, sedes=Block) return body
[ "def", "eth_getBlockByNumber", "(", "self", ",", "number", ")", ":", "block_hash", "=", "self", ".", "reader", ".", "_get_block_hash", "(", "number", ")", "block_number", "=", "_format_block_number", "(", "number", ")", "body_key", "=", "body_prefix", "+", "block_number", "+", "block_hash", "block_data", "=", "self", ".", "db", ".", "get", "(", "body_key", ")", "body", "=", "rlp", ".", "decode", "(", "block_data", ",", "sedes", "=", "Block", ")", "return", "body" ]
Get block body by block number. :param number: :return:
[ "Get", "block", "body", "by", "block", "number", "." ]
python
train
33.833333
sprockets/sprockets.clients.cassandra
sprockets/clients/cassandra/__init__.py
https://github.com/sprockets/sprockets.clients.cassandra/blob/c0a3ffe550ceb89b23a59959a0645d29d257e624/sprockets/clients/cassandra/__init__.py#L108-L119
def execute(self, query, *args, **kwargs): """Asynchronously execute the specified CQL query. The execute command also takes optional parameters and trace keyword arguments. See cassandra-python documentation for definition of those parameters. """ tornado_future = Future() cassandra_future = self._session.execute_async(query, *args, **kwargs) self._ioloop.add_callback( self._callback, cassandra_future, tornado_future) return tornado_future
[ "def", "execute", "(", "self", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tornado_future", "=", "Future", "(", ")", "cassandra_future", "=", "self", ".", "_session", ".", "execute_async", "(", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_ioloop", ".", "add_callback", "(", "self", ".", "_callback", ",", "cassandra_future", ",", "tornado_future", ")", "return", "tornado_future" ]
Asynchronously execute the specified CQL query. The execute command also takes optional parameters and trace keyword arguments. See cassandra-python documentation for definition of those parameters.
[ "Asynchronously", "execute", "the", "specified", "CQL", "query", "." ]
python
train
43.166667
materialsproject/pymatgen
pymatgen/analysis/elasticity/elastic.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L684-L711
def thermal_expansion_coeff(self, structure, temperature, mode="debye"): """ Gets thermal expansion coefficient from third-order constants. Args: temperature (float): Temperature in kelvin, if not specified will return non-cv-normalized value structure (Structure): Structure to be used in directional heat capacity determination, only necessary if temperature is specified mode (string): mode for finding average heat-capacity, current supported modes are 'debye' and 'dulong-petit' """ soec = ElasticTensor(self[0]) v0 = (structure.volume * 1e-30 / structure.num_sites) if mode == "debye": td = soec.debye_temperature(structure) t_ratio = temperature / td integrand = lambda x: (x**4 * np.exp(x)) / (np.exp(x) - 1)**2 cv = 9 * 8.314 * t_ratio**3 * quad(integrand, 0, t_ratio**-1)[0] elif mode == "dulong-petit": cv = 3 * 8.314 else: raise ValueError("Mode must be debye or dulong-petit") tgt = self.get_tgt(temperature, structure) alpha = np.einsum('ijkl,ij', soec.compliance_tensor, tgt) alpha *= cv / (1e9 * v0 * 6.022e23) return SquareTensor(alpha)
[ "def", "thermal_expansion_coeff", "(", "self", ",", "structure", ",", "temperature", ",", "mode", "=", "\"debye\"", ")", ":", "soec", "=", "ElasticTensor", "(", "self", "[", "0", "]", ")", "v0", "=", "(", "structure", ".", "volume", "*", "1e-30", "/", "structure", ".", "num_sites", ")", "if", "mode", "==", "\"debye\"", ":", "td", "=", "soec", ".", "debye_temperature", "(", "structure", ")", "t_ratio", "=", "temperature", "/", "td", "integrand", "=", "lambda", "x", ":", "(", "x", "**", "4", "*", "np", ".", "exp", "(", "x", ")", ")", "/", "(", "np", ".", "exp", "(", "x", ")", "-", "1", ")", "**", "2", "cv", "=", "9", "*", "8.314", "*", "t_ratio", "**", "3", "*", "quad", "(", "integrand", ",", "0", ",", "t_ratio", "**", "-", "1", ")", "[", "0", "]", "elif", "mode", "==", "\"dulong-petit\"", ":", "cv", "=", "3", "*", "8.314", "else", ":", "raise", "ValueError", "(", "\"Mode must be debye or dulong-petit\"", ")", "tgt", "=", "self", ".", "get_tgt", "(", "temperature", ",", "structure", ")", "alpha", "=", "np", ".", "einsum", "(", "'ijkl,ij'", ",", "soec", ".", "compliance_tensor", ",", "tgt", ")", "alpha", "*=", "cv", "/", "(", "1e9", "*", "v0", "*", "6.022e23", ")", "return", "SquareTensor", "(", "alpha", ")" ]
Gets thermal expansion coefficient from third-order constants. Args: temperature (float): Temperature in kelvin, if not specified will return non-cv-normalized value structure (Structure): Structure to be used in directional heat capacity determination, only necessary if temperature is specified mode (string): mode for finding average heat-capacity, current supported modes are 'debye' and 'dulong-petit'
[ "Gets", "thermal", "expansion", "coefficient", "from", "third", "-", "order", "constants", "." ]
python
train
46.535714
pschmitt/python-opsview
opsview/opsview.py
https://github.com/pschmitt/python-opsview/blob/720acc06c491db32d18c79d20f04cae16e57a7fb/opsview/opsview.py#L86-L91
def api_version(self, verbose=False): ''' Get information about the API http://docs.opsview.com/doku.php?id=opsview4.6:restapi#api_version_information ''' return self.__auth_req_get(self.rest_url, verbose=verbose)
[ "def", "api_version", "(", "self", ",", "verbose", "=", "False", ")", ":", "return", "self", ".", "__auth_req_get", "(", "self", ".", "rest_url", ",", "verbose", "=", "verbose", ")" ]
Get information about the API http://docs.opsview.com/doku.php?id=opsview4.6:restapi#api_version_information
[ "Get", "information", "about", "the", "API", "http", ":", "//", "docs", ".", "opsview", ".", "com", "/", "doku", ".", "php?id", "=", "opsview4", ".", "6", ":", "restapi#api_version_information" ]
python
train
41.333333
mardix/Mocha
mocha/contrib/auth/decorators.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/decorators.py#L186-L200
def accepts_contributor_roles(func): """ Decorator that accepts only contributor roles :param func: :return: """ if inspect.isclass(func): apply_function_to_members(func, accepts_contributor_roles) return func else: @functools.wraps(func) def decorator(*args, **kwargs): return accepts_roles(*ROLES_CONTRIBUTOR)(func)(*args, **kwargs) return decorator
[ "def", "accepts_contributor_roles", "(", "func", ")", ":", "if", "inspect", ".", "isclass", "(", "func", ")", ":", "apply_function_to_members", "(", "func", ",", "accepts_contributor_roles", ")", "return", "func", "else", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "accepts_roles", "(", "*", "ROLES_CONTRIBUTOR", ")", "(", "func", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorator" ]
Decorator that accepts only contributor roles :param func: :return:
[ "Decorator", "that", "accepts", "only", "contributor", "roles", ":", "param", "func", ":", ":", "return", ":" ]
python
train
27.866667
eerimoq/bincopy
bincopy.py
https://github.com/eerimoq/bincopy/blob/5e02cd001c3e9b54729425db6bffad5f03e1beac/bincopy.py#L738-L752
def add(self, data, overwrite=False): """Add given data string by guessing its format. The format must be Motorola S-Records, Intel HEX or TI-TXT. Set `overwrite` to ``True`` to allow already added data to be overwritten. """ if is_srec(data): self.add_srec(data, overwrite) elif is_ihex(data): self.add_ihex(data, overwrite) elif is_ti_txt(data): self.add_ti_txt(data, overwrite) else: raise UnsupportedFileFormatError()
[ "def", "add", "(", "self", ",", "data", ",", "overwrite", "=", "False", ")", ":", "if", "is_srec", "(", "data", ")", ":", "self", ".", "add_srec", "(", "data", ",", "overwrite", ")", "elif", "is_ihex", "(", "data", ")", ":", "self", ".", "add_ihex", "(", "data", ",", "overwrite", ")", "elif", "is_ti_txt", "(", "data", ")", ":", "self", ".", "add_ti_txt", "(", "data", ",", "overwrite", ")", "else", ":", "raise", "UnsupportedFileFormatError", "(", ")" ]
Add given data string by guessing its format. The format must be Motorola S-Records, Intel HEX or TI-TXT. Set `overwrite` to ``True`` to allow already added data to be overwritten.
[ "Add", "given", "data", "string", "by", "guessing", "its", "format", ".", "The", "format", "must", "be", "Motorola", "S", "-", "Records", "Intel", "HEX", "or", "TI", "-", "TXT", ".", "Set", "overwrite", "to", "True", "to", "allow", "already", "added", "data", "to", "be", "overwritten", "." ]
python
train
34.733333
ArduPilot/MAVProxy
MAVProxy/modules/lib/wxhorizon_ui.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/wxhorizon_ui.py#L154-L160
def calcFontScaling(self): '''Calculates the current font size and left position for the current window.''' self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi self.fontSize = self.vertSize*(self.ypx/2.0) self.leftPos = self.axes.get_xlim()[0] self.rightPos = self.axes.get_xlim()[1]
[ "def", "calcFontScaling", "(", "self", ")", ":", "self", ".", "ypx", "=", "self", ".", "figure", ".", "get_size_inches", "(", ")", "[", "1", "]", "*", "self", ".", "figure", ".", "dpi", "self", ".", "xpx", "=", "self", ".", "figure", ".", "get_size_inches", "(", ")", "[", "0", "]", "*", "self", ".", "figure", ".", "dpi", "self", ".", "fontSize", "=", "self", ".", "vertSize", "*", "(", "self", ".", "ypx", "/", "2.0", ")", "self", ".", "leftPos", "=", "self", ".", "axes", ".", "get_xlim", "(", ")", "[", "0", "]", "self", ".", "rightPos", "=", "self", ".", "axes", ".", "get_xlim", "(", ")", "[", "1", "]" ]
Calculates the current font size and left position for the current window.
[ "Calculates", "the", "current", "font", "size", "and", "left", "position", "for", "the", "current", "window", "." ]
python
train
56.142857
adrn/gala
gala/potential/frame/builtin/transformations.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/frame/builtin/transformations.py#L31-L49
def z_angle_rotate(xy, theta): """ Rotated the input vector or set of vectors `xy` by the angle `theta`. Parameters ---------- xy : array_like The vector or array of vectors to transform. Must have shape """ xy = np.array(xy).T theta = np.array(theta).T out = np.zeros_like(xy) out[...,0] = np.cos(theta)*xy[...,0] - np.sin(theta)*xy[...,1] out[...,1] = np.sin(theta)*xy[...,0] + np.cos(theta)*xy[...,1] return out.T
[ "def", "z_angle_rotate", "(", "xy", ",", "theta", ")", ":", "xy", "=", "np", ".", "array", "(", "xy", ")", ".", "T", "theta", "=", "np", ".", "array", "(", "theta", ")", ".", "T", "out", "=", "np", ".", "zeros_like", "(", "xy", ")", "out", "[", "...", ",", "0", "]", "=", "np", ".", "cos", "(", "theta", ")", "*", "xy", "[", "...", ",", "0", "]", "-", "np", ".", "sin", "(", "theta", ")", "*", "xy", "[", "...", ",", "1", "]", "out", "[", "...", ",", "1", "]", "=", "np", ".", "sin", "(", "theta", ")", "*", "xy", "[", "...", ",", "0", "]", "+", "np", ".", "cos", "(", "theta", ")", "*", "xy", "[", "...", ",", "1", "]", "return", "out", ".", "T" ]
Rotated the input vector or set of vectors `xy` by the angle `theta`. Parameters ---------- xy : array_like The vector or array of vectors to transform. Must have shape
[ "Rotated", "the", "input", "vector", "or", "set", "of", "vectors", "xy", "by", "the", "angle", "theta", "." ]
python
train
24.157895
exosite-labs/pyonep
pyonep/portals/__init__.py
https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/portals/__init__.py#L91-L111
def get_portal_by_name(self, portal_name): """ Set active portal according to the name passed in 'portal_name'. Returns dictionary of device 'serial_number: rid' """ portals = self.get_portals_list() for p in portals: # print("Checking {!r}".format(p)) if portal_name == p[1]: # print("Found Portal!") self.set_portal_name( p[1] ) self.set_portal_id( p[0] ) self.set_portal_cik( p[2][1]['info']['key'] ) # print("Active Portal Details:\nName: {0}\nId: {1}\nCIK: {2}".format( # self.portal_name(), # self.portal_id(), # self.portal_cik())) return p return None
[ "def", "get_portal_by_name", "(", "self", ",", "portal_name", ")", ":", "portals", "=", "self", ".", "get_portals_list", "(", ")", "for", "p", "in", "portals", ":", "# print(\"Checking {!r}\".format(p))", "if", "portal_name", "==", "p", "[", "1", "]", ":", "# print(\"Found Portal!\")", "self", ".", "set_portal_name", "(", "p", "[", "1", "]", ")", "self", ".", "set_portal_id", "(", "p", "[", "0", "]", ")", "self", ".", "set_portal_cik", "(", "p", "[", "2", "]", "[", "1", "]", "[", "'info'", "]", "[", "'key'", "]", ")", "# print(\"Active Portal Details:\\nName: {0}\\nId: {1}\\nCIK: {2}\".format(", "# self.portal_name(),", "# self.portal_id(),", "# self.portal_cik()))", "return", "p", "return", "None" ]
Set active portal according to the name passed in 'portal_name'. Returns dictionary of device 'serial_number: rid'
[ "Set", "active", "portal", "according", "to", "the", "name", "passed", "in", "portal_name", "." ]
python
train
41.095238
ejeschke/ginga
ginga/opengl/Camera.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/opengl/Camera.py#L155-L175
def pan_delta(self, dx_px, dy_px): """ This causes the scene to appear to translate right and up (i.e., what really happens is the camera is translated left and down). This is also called "panning" in some software packages. Passing in negative delta values causes the opposite motion. """ direction = self.target - self.position distance_from_target = direction.length() direction = direction.normalized() speed_per_radius = self.get_translation_speed(distance_from_target) px_per_unit = self.vport_radius_px / speed_per_radius right = direction ^ self.up translation = (right * (-dx_px / px_per_unit) + self.up * (-dy_px / px_per_unit)) self.position = self.position + translation self.target = self.target + translation
[ "def", "pan_delta", "(", "self", ",", "dx_px", ",", "dy_px", ")", ":", "direction", "=", "self", ".", "target", "-", "self", ".", "position", "distance_from_target", "=", "direction", ".", "length", "(", ")", "direction", "=", "direction", ".", "normalized", "(", ")", "speed_per_radius", "=", "self", ".", "get_translation_speed", "(", "distance_from_target", ")", "px_per_unit", "=", "self", ".", "vport_radius_px", "/", "speed_per_radius", "right", "=", "direction", "^", "self", ".", "up", "translation", "=", "(", "right", "*", "(", "-", "dx_px", "/", "px_per_unit", ")", "+", "self", ".", "up", "*", "(", "-", "dy_px", "/", "px_per_unit", ")", ")", "self", ".", "position", "=", "self", ".", "position", "+", "translation", "self", ".", "target", "=", "self", ".", "target", "+", "translation" ]
This causes the scene to appear to translate right and up (i.e., what really happens is the camera is translated left and down). This is also called "panning" in some software packages. Passing in negative delta values causes the opposite motion.
[ "This", "causes", "the", "scene", "to", "appear", "to", "translate", "right", "and", "up", "(", "i", ".", "e", ".", "what", "really", "happens", "is", "the", "camera", "is", "translated", "left", "and", "down", ")", ".", "This", "is", "also", "called", "panning", "in", "some", "software", "packages", ".", "Passing", "in", "negative", "delta", "values", "causes", "the", "opposite", "motion", "." ]
python
train
40.428571
ansible/ansible-container
container/core.py
https://github.com/ansible/ansible-container/blob/d031c1a6133d5482a5d054fcbdbecafb923f8b4b/container/core.py#L371-L390
def hostcmd_push(base_path, project_name, engine_name, vars_files=None, config_file=None, **kwargs): """ Push images to a registry. Requires authenticating with the registry prior to starting the push. If your engine's config file does not already contain an authorization for the registry, pass username and/or password. If you exclude password, you will be prompted. """ assert_initialized(base_path, config_file) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name, config_file=config_file) engine_obj = load_engine(['LOGIN', 'PUSH'], engine_name, config.project_name, config['services'], **kwargs) logger.debug('PROJECT NAME', project_name=config.project_name) push_images(base_path, config.image_namespace, engine_obj, config, save_conductor=config.save_conductor, **kwargs)
[ "def", "hostcmd_push", "(", "base_path", ",", "project_name", ",", "engine_name", ",", "vars_files", "=", "None", ",", "config_file", "=", "None", ",", "*", "*", "kwargs", ")", ":", "assert_initialized", "(", "base_path", ",", "config_file", ")", "config", "=", "get_config", "(", "base_path", ",", "vars_files", "=", "vars_files", ",", "engine_name", "=", "engine_name", ",", "project_name", "=", "project_name", ",", "config_file", "=", "config_file", ")", "engine_obj", "=", "load_engine", "(", "[", "'LOGIN'", ",", "'PUSH'", "]", ",", "engine_name", ",", "config", ".", "project_name", ",", "config", "[", "'services'", "]", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "'PROJECT NAME'", ",", "project_name", "=", "config", ".", "project_name", ")", "push_images", "(", "base_path", ",", "config", ".", "image_namespace", ",", "engine_obj", ",", "config", ",", "save_conductor", "=", "config", ".", "save_conductor", ",", "*", "*", "kwargs", ")" ]
Push images to a registry. Requires authenticating with the registry prior to starting the push. If your engine's config file does not already contain an authorization for the registry, pass username and/or password. If you exclude password, you will be prompted.
[ "Push", "images", "to", "a", "registry", ".", "Requires", "authenticating", "with", "the", "registry", "prior", "to", "starting", "the", "push", ".", "If", "your", "engine", "s", "config", "file", "does", "not", "already", "contain", "an", "authorization", "for", "the", "registry", "pass", "username", "and", "/", "or", "password", ".", "If", "you", "exclude", "password", "you", "will", "be", "prompted", "." ]
python
train
50.8
marrow/mailer
marrow/mailer/message.py
https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/message.py#L209-L252
def mime(self): """Produce the final MIME message.""" author = self.author sender = self.sender if not author: raise ValueError("You must specify an author.") if not self.subject: raise ValueError("You must specify a subject.") if len(self.recipients) == 0: raise ValueError("You must specify at least one recipient.") if not self.plain: raise ValueError("You must provide plain text content.") # DISCUSS: Take the first author, or raise this error? # if len(author) > 1 and len(sender) == 0: # raise ValueError('If there are multiple authors of message, you must specify a sender!') # if len(sender) > 1: # raise ValueError('You must not specify more than one sender!') if not self._dirty and self._processed: return self._mime self._processed = False plain = MIMEText(self._callable(self.plain), 'plain', self.encoding) rich = None if self.rich: rich = MIMEText(self._callable(self.rich), 'html', self.encoding) message = self._mime_document(plain, rich) headers = self._build_header_list(author, sender) self._add_headers_to_message(message, headers) self._mime = message self._processed = True self._dirty = False return message
[ "def", "mime", "(", "self", ")", ":", "author", "=", "self", ".", "author", "sender", "=", "self", ".", "sender", "if", "not", "author", ":", "raise", "ValueError", "(", "\"You must specify an author.\"", ")", "if", "not", "self", ".", "subject", ":", "raise", "ValueError", "(", "\"You must specify a subject.\"", ")", "if", "len", "(", "self", ".", "recipients", ")", "==", "0", ":", "raise", "ValueError", "(", "\"You must specify at least one recipient.\"", ")", "if", "not", "self", ".", "plain", ":", "raise", "ValueError", "(", "\"You must provide plain text content.\"", ")", "# DISCUSS: Take the first author, or raise this error?", "# if len(author) > 1 and len(sender) == 0:", "#\t raise ValueError('If there are multiple authors of message, you must specify a sender!')", "# if len(sender) > 1:", "#\t raise ValueError('You must not specify more than one sender!')", "if", "not", "self", ".", "_dirty", "and", "self", ".", "_processed", ":", "return", "self", ".", "_mime", "self", ".", "_processed", "=", "False", "plain", "=", "MIMEText", "(", "self", ".", "_callable", "(", "self", ".", "plain", ")", ",", "'plain'", ",", "self", ".", "encoding", ")", "rich", "=", "None", "if", "self", ".", "rich", ":", "rich", "=", "MIMEText", "(", "self", ".", "_callable", "(", "self", ".", "rich", ")", ",", "'html'", ",", "self", ".", "encoding", ")", "message", "=", "self", ".", "_mime_document", "(", "plain", ",", "rich", ")", "headers", "=", "self", ".", "_build_header_list", "(", "author", ",", "sender", ")", "self", ".", "_add_headers_to_message", "(", "message", ",", "headers", ")", "self", ".", "_mime", "=", "message", "self", ".", "_processed", "=", "True", "self", ".", "_dirty", "=", "False", "return", "message" ]
Produce the final MIME message.
[ "Produce", "the", "final", "MIME", "message", "." ]
python
train
26.636364
cni/MRS
MRS/utils.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/utils.py#L479-L490
def two_lorentzian(freq, freq0_1, freq0_2, area1, area2, hwhm1, hwhm2, phase1, phase2, offset, drift): """ A two-Lorentzian model. This is simply the sum of two lorentzian functions in some part of the spectrum. Each individual Lorentzian has its own peak frequency, area, hwhm and phase, but they share common offset and drift parameters. """ return (lorentzian(freq, freq0_1, area1, hwhm1, phase1, offset, drift) + lorentzian(freq, freq0_2, area2, hwhm2, phase2, offset, drift))
[ "def", "two_lorentzian", "(", "freq", ",", "freq0_1", ",", "freq0_2", ",", "area1", ",", "area2", ",", "hwhm1", ",", "hwhm2", ",", "phase1", ",", "phase2", ",", "offset", ",", "drift", ")", ":", "return", "(", "lorentzian", "(", "freq", ",", "freq0_1", ",", "area1", ",", "hwhm1", ",", "phase1", ",", "offset", ",", "drift", ")", "+", "lorentzian", "(", "freq", ",", "freq0_2", ",", "area2", ",", "hwhm2", ",", "phase2", ",", "offset", ",", "drift", ")", ")" ]
A two-Lorentzian model. This is simply the sum of two lorentzian functions in some part of the spectrum. Each individual Lorentzian has its own peak frequency, area, hwhm and phase, but they share common offset and drift parameters.
[ "A", "two", "-", "Lorentzian", "model", "." ]
python
train
43.75
pytroll/satpy
satpy/multiscene.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/multiscene.py#L258-L273
def blend(self, blend_function=stack): """Blend the datasets into one scene. .. note:: Blending is not currently optimized for generator-based MultiScene. """ new_scn = Scene() common_datasets = self.shared_dataset_ids for ds_id in common_datasets: datasets = [scn[ds_id] for scn in self.scenes if ds_id in scn] new_scn[ds_id] = blend_function(datasets) return new_scn
[ "def", "blend", "(", "self", ",", "blend_function", "=", "stack", ")", ":", "new_scn", "=", "Scene", "(", ")", "common_datasets", "=", "self", ".", "shared_dataset_ids", "for", "ds_id", "in", "common_datasets", ":", "datasets", "=", "[", "scn", "[", "ds_id", "]", "for", "scn", "in", "self", ".", "scenes", "if", "ds_id", "in", "scn", "]", "new_scn", "[", "ds_id", "]", "=", "blend_function", "(", "datasets", ")", "return", "new_scn" ]
Blend the datasets into one scene. .. note:: Blending is not currently optimized for generator-based MultiScene.
[ "Blend", "the", "datasets", "into", "one", "scene", "." ]
python
train
28.8125
Erotemic/ubelt
ubelt/util_import.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_import.py#L430-L479
def normalize_modpath(modpath, hide_init=True, hide_main=False): """ Normalizes __init__ and __main__ paths. Notes: Adds __init__ if reasonable, but only removes __main__ by default Args: hide_init (bool): if True, always return package modules as __init__.py files otherwise always return the dpath. hide_init (bool): if True, always strip away main files otherwise ignore __main__.py. CommandLine: xdoctest -m xdoctest.static_analysis normalize_modpath Example: >>> import xdoctest.static_analysis as static >>> modpath = static.__file__ >>> assert static.normalize_modpath(modpath) == modpath.replace('.pyc', '.py') >>> dpath = dirname(modpath) >>> res0 = static.normalize_modpath(dpath, hide_init=0, hide_main=0) >>> res1 = static.normalize_modpath(dpath, hide_init=0, hide_main=1) >>> res2 = static.normalize_modpath(dpath, hide_init=1, hide_main=0) >>> res3 = static.normalize_modpath(dpath, hide_init=1, hide_main=1) >>> assert res0.endswith('__init__.py') >>> assert res1.endswith('__init__.py') >>> assert not res2.endswith('.py') >>> assert not res3.endswith('.py') """ if six.PY2: if modpath.endswith('.pyc'): modpath = modpath[:-1] if hide_init: if basename(modpath) == '__init__.py': modpath = dirname(modpath) hide_main = True else: # add in init, if reasonable modpath_with_init = join(modpath, '__init__.py') if exists(modpath_with_init): modpath = modpath_with_init if hide_main: # We can remove main, but dont add it if basename(modpath) == '__main__.py': # corner case where main might just be a module name not in a pkg parallel_init = join(dirname(modpath), '__init__.py') if exists(parallel_init): modpath = dirname(modpath) return modpath
[ "def", "normalize_modpath", "(", "modpath", ",", "hide_init", "=", "True", ",", "hide_main", "=", "False", ")", ":", "if", "six", ".", "PY2", ":", "if", "modpath", ".", "endswith", "(", "'.pyc'", ")", ":", "modpath", "=", "modpath", "[", ":", "-", "1", "]", "if", "hide_init", ":", "if", "basename", "(", "modpath", ")", "==", "'__init__.py'", ":", "modpath", "=", "dirname", "(", "modpath", ")", "hide_main", "=", "True", "else", ":", "# add in init, if reasonable", "modpath_with_init", "=", "join", "(", "modpath", ",", "'__init__.py'", ")", "if", "exists", "(", "modpath_with_init", ")", ":", "modpath", "=", "modpath_with_init", "if", "hide_main", ":", "# We can remove main, but dont add it", "if", "basename", "(", "modpath", ")", "==", "'__main__.py'", ":", "# corner case where main might just be a module name not in a pkg", "parallel_init", "=", "join", "(", "dirname", "(", "modpath", ")", ",", "'__init__.py'", ")", "if", "exists", "(", "parallel_init", ")", ":", "modpath", "=", "dirname", "(", "modpath", ")", "return", "modpath" ]
Normalizes __init__ and __main__ paths. Notes: Adds __init__ if reasonable, but only removes __main__ by default Args: hide_init (bool): if True, always return package modules as __init__.py files otherwise always return the dpath. hide_init (bool): if True, always strip away main files otherwise ignore __main__.py. CommandLine: xdoctest -m xdoctest.static_analysis normalize_modpath Example: >>> import xdoctest.static_analysis as static >>> modpath = static.__file__ >>> assert static.normalize_modpath(modpath) == modpath.replace('.pyc', '.py') >>> dpath = dirname(modpath) >>> res0 = static.normalize_modpath(dpath, hide_init=0, hide_main=0) >>> res1 = static.normalize_modpath(dpath, hide_init=0, hide_main=1) >>> res2 = static.normalize_modpath(dpath, hide_init=1, hide_main=0) >>> res3 = static.normalize_modpath(dpath, hide_init=1, hide_main=1) >>> assert res0.endswith('__init__.py') >>> assert res1.endswith('__init__.py') >>> assert not res2.endswith('.py') >>> assert not res3.endswith('.py')
[ "Normalizes", "__init__", "and", "__main__", "paths", "." ]
python
valid
39.4
earwig/mwparserfromhell
mwparserfromhell/wikicode.py
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/wikicode.py#L70-L74
def _slice_replace(code, index, old, new): """Replace the string *old* with *new* across *index* in *code*.""" nodes = [str(node) for node in code.get(index)] substring = "".join(nodes).replace(old, new) code.nodes[index] = parse_anything(substring).nodes
[ "def", "_slice_replace", "(", "code", ",", "index", ",", "old", ",", "new", ")", ":", "nodes", "=", "[", "str", "(", "node", ")", "for", "node", "in", "code", ".", "get", "(", "index", ")", "]", "substring", "=", "\"\"", ".", "join", "(", "nodes", ")", ".", "replace", "(", "old", ",", "new", ")", "code", ".", "nodes", "[", "index", "]", "=", "parse_anything", "(", "substring", ")", ".", "nodes" ]
Replace the string *old* with *new* across *index* in *code*.
[ "Replace", "the", "string", "*", "old", "*", "with", "*", "new", "*", "across", "*", "index", "*", "in", "*", "code", "*", "." ]
python
train
56.6
APSL/transmanager
transmanager/manager.py
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L306-L318
def get_languages_from_item(ct_item, item): """ Get the languages configured for the current item :param ct_item: :param item: :return: """ try: item_lan = TransItemLanguage.objects.filter(content_type__pk=ct_item.id, object_id=item.id).get() languages = [lang.code for lang in item_lan.languages.all()] return languages except TransItemLanguage.DoesNotExist: return []
[ "def", "get_languages_from_item", "(", "ct_item", ",", "item", ")", ":", "try", ":", "item_lan", "=", "TransItemLanguage", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "ct_item", ".", "id", ",", "object_id", "=", "item", ".", "id", ")", ".", "get", "(", ")", "languages", "=", "[", "lang", ".", "code", "for", "lang", "in", "item_lan", ".", "languages", ".", "all", "(", ")", "]", "return", "languages", "except", "TransItemLanguage", ".", "DoesNotExist", ":", "return", "[", "]" ]
Get the languages configured for the current item :param ct_item: :param item: :return:
[ "Get", "the", "languages", "configured", "for", "the", "current", "item", ":", "param", "ct_item", ":", ":", "param", "item", ":", ":", "return", ":" ]
python
train
36.076923
zblz/naima
naima/plot.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/plot.py#L741-L782
def plot_blob( sampler, blobidx=0, label=None, last_step=False, figure=None, **kwargs ): """ Plot a metadata blob as a fit to spectral data or value distribution Additional ``kwargs`` are passed to `plot_fit`. Parameters ---------- sampler : `emcee.EnsembleSampler` Sampler with a stored chain. blobidx : int, optional Metadata blob index to plot. label : str, optional Label for the value distribution. Labels for the fit plot can be passed as ``xlabel`` and ``ylabel`` and will be passed to `plot_fit`. Returns ------- figure : `matplotlib.pyplot.Figure` `matplotlib` figure instance containing the plot. """ modelx, model = _process_blob(sampler, blobidx, last_step) if label is None: label = "Model output {0}".format(blobidx) if modelx is None: # Blob is scalar, plot distribution f = plot_distribution(model, label, figure=figure) else: f = plot_fit( sampler, modelidx=blobidx, last_step=last_step, label=label, figure=figure, **kwargs ) return f
[ "def", "plot_blob", "(", "sampler", ",", "blobidx", "=", "0", ",", "label", "=", "None", ",", "last_step", "=", "False", ",", "figure", "=", "None", ",", "*", "*", "kwargs", ")", ":", "modelx", ",", "model", "=", "_process_blob", "(", "sampler", ",", "blobidx", ",", "last_step", ")", "if", "label", "is", "None", ":", "label", "=", "\"Model output {0}\"", ".", "format", "(", "blobidx", ")", "if", "modelx", "is", "None", ":", "# Blob is scalar, plot distribution", "f", "=", "plot_distribution", "(", "model", ",", "label", ",", "figure", "=", "figure", ")", "else", ":", "f", "=", "plot_fit", "(", "sampler", ",", "modelidx", "=", "blobidx", ",", "last_step", "=", "last_step", ",", "label", "=", "label", ",", "figure", "=", "figure", ",", "*", "*", "kwargs", ")", "return", "f" ]
Plot a metadata blob as a fit to spectral data or value distribution Additional ``kwargs`` are passed to `plot_fit`. Parameters ---------- sampler : `emcee.EnsembleSampler` Sampler with a stored chain. blobidx : int, optional Metadata blob index to plot. label : str, optional Label for the value distribution. Labels for the fit plot can be passed as ``xlabel`` and ``ylabel`` and will be passed to `plot_fit`. Returns ------- figure : `matplotlib.pyplot.Figure` `matplotlib` figure instance containing the plot.
[ "Plot", "a", "metadata", "blob", "as", "a", "fit", "to", "spectral", "data", "or", "value", "distribution" ]
python
train
27.166667
python-visualization/folium
folium/features.py
https://github.com/python-visualization/folium/blob/8595240517135d1637ca4cf7cc624045f1d911b3/folium/features.py#L725-L736
def style_data(self): """Applies self.style_function to each feature of self.data.""" def recursive_get(data, keys): if len(keys): return recursive_get(data.get(keys[0]), keys[1:]) else: return data geometries = recursive_get(self.data, self.object_path.split('.'))['geometries'] # noqa for feature in geometries: feature.setdefault('properties', {}).setdefault('style', {}).update(self.style_function(feature))
[ "def", "style_data", "(", "self", ")", ":", "def", "recursive_get", "(", "data", ",", "keys", ")", ":", "if", "len", "(", "keys", ")", ":", "return", "recursive_get", "(", "data", ".", "get", "(", "keys", "[", "0", "]", ")", ",", "keys", "[", "1", ":", "]", ")", "else", ":", "return", "data", "geometries", "=", "recursive_get", "(", "self", ".", "data", ",", "self", ".", "object_path", ".", "split", "(", "'.'", ")", ")", "[", "'geometries'", "]", "# noqa", "for", "feature", "in", "geometries", ":", "feature", ".", "setdefault", "(", "'properties'", ",", "{", "}", ")", ".", "setdefault", "(", "'style'", ",", "{", "}", ")", ".", "update", "(", "self", ".", "style_function", "(", "feature", ")", ")" ]
Applies self.style_function to each feature of self.data.
[ "Applies", "self", ".", "style_function", "to", "each", "feature", "of", "self", ".", "data", "." ]
python
train
41.916667
bharadwajyarlagadda/bingmaps
bingmaps/urls/elevations_build_urls.py
https://github.com/bharadwajyarlagadda/bingmaps/blob/6bb3cdadfb121aaff96704509cedff2710a62b6d/bingmaps/urls/elevations_build_urls.py#L285-L309
def build_query_string(self, data): """This method occurs after dumping the data into the class. Args: data (dict): dictionary of all the query values Returns: data (dict): ordered dict of all the values """ query = [] keys_to_be_removed = [] for key, value in data.items(): if key not in ['version', 'restApi', 'resourcePath']: if not key == 'method': if key == 'points': value = ','.join(str(val) for val in value) keys_to_be_removed.append(key) query.append('{0}={1}'.format(key, value)) keys_to_be_removed.append(key) keys_to_be_removed.append(key) querystring = '&'.join(query) data['query'] = '{0}?{1}'.format(data['method'], querystring) for k in list(set(keys_to_be_removed)): del data[k] return data
[ "def", "build_query_string", "(", "self", ",", "data", ")", ":", "query", "=", "[", "]", "keys_to_be_removed", "=", "[", "]", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "key", "not", "in", "[", "'version'", ",", "'restApi'", ",", "'resourcePath'", "]", ":", "if", "not", "key", "==", "'method'", ":", "if", "key", "==", "'points'", ":", "value", "=", "','", ".", "join", "(", "str", "(", "val", ")", "for", "val", "in", "value", ")", "keys_to_be_removed", ".", "append", "(", "key", ")", "query", ".", "append", "(", "'{0}={1}'", ".", "format", "(", "key", ",", "value", ")", ")", "keys_to_be_removed", ".", "append", "(", "key", ")", "keys_to_be_removed", ".", "append", "(", "key", ")", "querystring", "=", "'&'", ".", "join", "(", "query", ")", "data", "[", "'query'", "]", "=", "'{0}?{1}'", ".", "format", "(", "data", "[", "'method'", "]", ",", "querystring", ")", "for", "k", "in", "list", "(", "set", "(", "keys_to_be_removed", ")", ")", ":", "del", "data", "[", "k", "]", "return", "data" ]
This method occurs after dumping the data into the class. Args: data (dict): dictionary of all the query values Returns: data (dict): ordered dict of all the values
[ "This", "method", "occurs", "after", "dumping", "the", "data", "into", "the", "class", "." ]
python
train
38.48
the01/python-paps
paps/person.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/person.py#L103-L118
def from_bits(self, bits): """ Set this person from bits (ignores the id) :param bits: Bits representing a person :type bits: bytearray :rtype: Person :raises ValueError: Bits has an unexpected length """ # TODO include id if len(bits) != Person.BITS_PER_PERSON: raise ValueError(u"Person requires exactly {} bits".format( Person.BITS_PER_PERSON )) self.sitting = bool(bits[0]) return self
[ "def", "from_bits", "(", "self", ",", "bits", ")", ":", "# TODO include id", "if", "len", "(", "bits", ")", "!=", "Person", ".", "BITS_PER_PERSON", ":", "raise", "ValueError", "(", "u\"Person requires exactly {} bits\"", ".", "format", "(", "Person", ".", "BITS_PER_PERSON", ")", ")", "self", ".", "sitting", "=", "bool", "(", "bits", "[", "0", "]", ")", "return", "self" ]
Set this person from bits (ignores the id) :param bits: Bits representing a person :type bits: bytearray :rtype: Person :raises ValueError: Bits has an unexpected length
[ "Set", "this", "person", "from", "bits", "(", "ignores", "the", "id", ")" ]
python
train
31.4375
apple/turicreate
src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L35-L402
def create(style_dataset, content_dataset, style_feature=None, content_feature=None, max_iterations=None, model='resnet-16', verbose=True, batch_size = 6, **kwargs): """ Create a :class:`StyleTransfer` model. Parameters ---------- style_dataset: SFrame Input style images. The columns named by the ``style_feature`` parameters will be extracted for training the model. content_dataset : SFrame Input content images. The columns named by the ``content_feature`` parameters will be extracted for training the model. style_feature: string Name of the column containing the input images in style SFrame. 'None' (the default) indicates the only image column in the style SFrame should be used as the feature. content_feature: string Name of the column containing the input images in content SFrame. 'None' (the default) indicates the only image column in the content SFrame should be used as the feature. max_iterations : int The number of training iterations. If 'None' (the default), then it will be automatically determined based on the amount of data you provide. model : string optional Style transfer model to use: - "resnet-16" : Fast and small-sized residual network that uses VGG-16 as reference network during training. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve training throughput. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : StyleTransfer A trained :class:`StyleTransfer` model. See Also -------- StyleTransfer Examples -------- .. sourcecode:: python # Create datasets >>> content_dataset = turicreate.image_analysis.load_images('content_images/') >>> style_dataset = turicreate.image_analysis.load_images('style_images/') # Train a style transfer model >>> model = turicreate.style_transfer.create(content_dataset, style_dataset) # Stylize an image on all styles >>> stylized_images = model.stylize(data) # Visualize the stylized images >>> stylized_images.explore() """ if len(style_dataset) == 0: raise _ToolkitError("style_dataset SFrame cannot be empty") if len(content_dataset) == 0: raise _ToolkitError("content_dataset SFrame cannot be empty") if(batch_size < 1): raise _ToolkitError("'batch_size' must be greater than or equal to 1") from ._sframe_loader import SFrameSTIter as _SFrameSTIter import mxnet as _mx from .._mxnet import _mxnet_utils if style_feature is None: style_feature = _tkutl._find_only_image_column(style_dataset) if content_feature is None: content_feature = _tkutl._find_only_image_column(content_dataset) if verbose: print("Using '{}' in style_dataset as feature column and using " "'{}' in content_dataset as feature column".format(style_feature, content_feature)) _raise_error_if_not_training_sframe(style_dataset, style_feature) _raise_error_if_not_training_sframe(content_dataset, content_feature) params = { 'batch_size': batch_size, 'vgg16_content_loss_layer': 2, # conv3_3 layer 'lr': 0.001, 'content_loss_mult': 1.0, 'style_loss_mult': [1e-4, 1e-4, 1e-4, 1e-4], # conv 1-4 layers 'finetune_all_params': True, 'pretrained_weights': False, 'print_loss_breakdown': False, 'input_shape': (256, 256), 'training_content_loader_type': 'stretch', 'use_augmentation': False, 'sequential_image_processing': False, # Only used if use_augmentaion is True 'aug_resize': 0, 'aug_min_object_covered': 0, 'aug_rand_crop': 0.9, 'aug_rand_pad': 0.9, 'aug_rand_gray': 0.0, 'aug_aspect_ratio': 1.25, 'aug_hue': 0.05, 'aug_brightness': 0.05, 'aug_saturation': 0.05, 'aug_contrast': 0.05, 'aug_horizontal_flip': True, 'aug_area_range': (.05, 1.5), 'aug_pca_noise': 0.0, 'aug_max_attempts': 20, 'aug_inter_method': 2, } if '_advanced_parameters' in kwargs: # Make sure no additional parameters are provided new_keys = set(kwargs['_advanced_parameters'].keys()) set_keys = set(params.keys()) unsupported = new_keys - set_keys if unsupported: raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported)) params.update(kwargs['_advanced_parameters']) _content_loss_mult = params['content_loss_mult'] _style_loss_mult = params['style_loss_mult'] num_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=params['batch_size']) batch_size_each = params['batch_size'] // max(num_gpus, 1) batch_size = max(num_gpus, 1) * batch_size_each input_shape = params['input_shape'] iterations = 0 if max_iterations is None: max_iterations = len(style_dataset) * 10000 if verbose: print('Setting max_iterations to be {}'.format(max_iterations)) # data loader if params['use_augmentation']: content_loader_type = '%s-with-augmentation' % params['training_content_loader_type'] else: content_loader_type = params['training_content_loader_type'] content_images_loader = _SFrameSTIter(content_dataset, batch_size, shuffle=True, feature_column=content_feature, input_shape=input_shape, loader_type=content_loader_type, aug_params=params, sequential=params['sequential_image_processing']) ctx = _mxnet_utils.get_mxnet_context(max_devices=params['batch_size']) num_styles = len(style_dataset) # TRANSFORMER MODEL from ._model import Transformer as _Transformer transformer_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS[model]().get_model_path() transformer = _Transformer(num_styles, batch_size_each) transformer.collect_params().initialize(ctx=ctx) if params['pretrained_weights']: transformer.load_params(transformer_model_path, ctx, allow_missing=True) # For some reason, the transformer fails to hybridize for training, so we # avoid this until resolved # transformer.hybridize() # VGG MODEL from ._model import Vgg16 as _Vgg16 vgg_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS['Vgg16']().get_model_path() vgg_model = _Vgg16() vgg_model.collect_params().initialize(ctx=ctx) vgg_model.load_params(vgg_model_path, ctx=ctx, ignore_extra=True) vgg_model.hybridize() # TRAINER from mxnet import gluon as _gluon from ._model import gram_matrix as _gram_matrix if params['finetune_all_params']: trainable_params = transformer.collect_params() else: trainable_params = transformer.collect_params('.*gamma|.*beta') trainer = _gluon.Trainer(trainable_params, 'adam', {'learning_rate': params['lr']}) mse_loss = _gluon.loss.L2Loss() start_time = _time.time() smoothed_loss = None last_time = 0 cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=params['batch_size']) num_mxnet_gpus = len(cuda_gpus) if verbose: # Estimate memory usage (based on experiments) cuda_mem_req = 260 + batch_size_each * 880 + num_styles * 1.4 _tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=False, cuda_mem_req=cuda_mem_req, has_mps_impl=False) # # Pre-compute gram matrices for style images # if verbose: print('Analyzing visual features of the style images') style_images_loader = _SFrameSTIter(style_dataset, batch_size, shuffle=False, num_epochs=1, feature_column=style_feature, input_shape=input_shape, loader_type='stretch', sequential=params['sequential_image_processing']) num_layers = len(params['style_loss_mult']) gram_chunks = [[] for _ in range(num_layers)] for s_batch in style_images_loader: s_data = _gluon.utils.split_and_load(s_batch.data[0], ctx_list=ctx, batch_axis=0) for s in s_data: vgg16_s = _vgg16_data_prep(s) ret = vgg_model(vgg16_s) grams = [_gram_matrix(x) for x in ret] for i, gram in enumerate(grams): if gram.context != _mx.cpu(0): gram = gram.as_in_context(_mx.cpu(0)) gram_chunks[i].append(gram) del style_images_loader grams = [ # The concatenated styles may be padded, so we slice overflow _mx.nd.concat(*chunks, dim=0)[:num_styles] for chunks in gram_chunks ] # A context->grams look-up table, where all the gram matrices have been # distributed ctx_grams = {} if ctx[0] == _mx.cpu(0): ctx_grams[_mx.cpu(0)] = grams else: for ctx0 in ctx: ctx_grams[ctx0] = [gram.as_in_context(ctx0) for gram in grams] # # Training loop # vgg_content_loss_layer = params['vgg16_content_loss_layer'] rs = _np.random.RandomState(1234) while iterations < max_iterations: content_images_loader.reset() for c_batch in content_images_loader: c_data = _gluon.utils.split_and_load(c_batch.data[0], ctx_list=ctx, batch_axis=0) Ls = [] curr_content_loss = [] curr_style_loss = [] with _mx.autograd.record(): for c in c_data: # Randomize styles to train indices = _mx.nd.array(rs.randint(num_styles, size=batch_size_each), dtype=_np.int64, ctx=c.context) # Generate pastiche p = transformer(c, indices) # mean subtraction vgg16_p = _vgg16_data_prep(p) vgg16_c = _vgg16_data_prep(c) # vgg forward p_vgg_outputs = vgg_model(vgg16_p) c_vgg_outputs = vgg_model(vgg16_c) c_content_layer = c_vgg_outputs[vgg_content_loss_layer] p_content_layer = p_vgg_outputs[vgg_content_loss_layer] # Calculate Loss # Style Loss between style image and stylized image # Ls = sum of L2 norm of gram matrix of vgg16's conv layers style_losses = [] for gram, p_vgg_output, style_loss_mult in zip(ctx_grams[c.context], p_vgg_outputs, _style_loss_mult): gram_s_vgg = gram[indices] gram_p_vgg = _gram_matrix(p_vgg_output) style_losses.append(style_loss_mult * mse_loss(gram_s_vgg, gram_p_vgg)) style_loss = _mx.nd.add_n(*style_losses) # Content Loss between content image and stylized image # Lc = L2 norm at a single layer in vgg16 content_loss = _content_loss_mult * mse_loss(c_content_layer, p_content_layer) curr_content_loss.append(content_loss) curr_style_loss.append(style_loss) # Divide loss by large number to get into a more legible # range total_loss = (content_loss + style_loss) / 10000.0 Ls.append(total_loss) for L in Ls: L.backward() cur_loss = _np.mean([L.asnumpy()[0] for L in Ls]) if smoothed_loss is None: smoothed_loss = cur_loss else: smoothed_loss = 0.9 * smoothed_loss + 0.1 * cur_loss iterations += 1 trainer.step(batch_size) if verbose and iterations == 1: # Print progress table header column_names = ['Iteration', 'Loss', 'Elapsed Time'] num_columns = len(column_names) column_width = max(map(lambda x: len(x), column_names)) + 2 hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+' print(hr) print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1)) print(hr) cur_time = _time.time() if verbose and (cur_time > last_time + 10 or iterations == max_iterations): # Print progress table row elapsed_time = cur_time - start_time print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format( cur_iter = iterations, loss = smoothed_loss, time = elapsed_time , width = column_width-1)) if params['print_loss_breakdown']: print_content_loss = _np.mean([L.asnumpy()[0] for L in curr_content_loss]) print_style_loss = _np.mean([L.asnumpy()[0] for L in curr_style_loss]) print('Total Loss: {:6.3f} | Content Loss: {:6.3f} | Style Loss: {:6.3f}'.format(cur_loss, print_content_loss, print_style_loss)) last_time = cur_time if iterations == max_iterations: print(hr) break training_time = _time.time() - start_time style_sa = style_dataset[style_feature] idx_column = _tc.SArray(range(0, style_sa.shape[0])) style_sframe = _tc.SFrame({"style": idx_column, style_feature: style_sa}) # Save the model state state = { '_model': transformer, '_training_time_as_string': _seconds_as_string(training_time), 'batch_size': batch_size, 'num_styles': num_styles, 'model': model, 'input_image_shape': input_shape, 'styles': style_sframe, 'num_content_images': len(content_dataset), 'training_time': training_time, 'max_iterations': max_iterations, 'training_iterations': iterations, 'training_epochs': content_images_loader.cur_epoch, 'style_feature': style_feature, 'content_feature': content_feature, "_index_column": "style", 'training_loss': smoothed_loss, } return StyleTransfer(state)
[ "def", "create", "(", "style_dataset", ",", "content_dataset", ",", "style_feature", "=", "None", ",", "content_feature", "=", "None", ",", "max_iterations", "=", "None", ",", "model", "=", "'resnet-16'", ",", "verbose", "=", "True", ",", "batch_size", "=", "6", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "style_dataset", ")", "==", "0", ":", "raise", "_ToolkitError", "(", "\"style_dataset SFrame cannot be empty\"", ")", "if", "len", "(", "content_dataset", ")", "==", "0", ":", "raise", "_ToolkitError", "(", "\"content_dataset SFrame cannot be empty\"", ")", "if", "(", "batch_size", "<", "1", ")", ":", "raise", "_ToolkitError", "(", "\"'batch_size' must be greater than or equal to 1\"", ")", "from", ".", "_sframe_loader", "import", "SFrameSTIter", "as", "_SFrameSTIter", "import", "mxnet", "as", "_mx", "from", ".", ".", "_mxnet", "import", "_mxnet_utils", "if", "style_feature", "is", "None", ":", "style_feature", "=", "_tkutl", ".", "_find_only_image_column", "(", "style_dataset", ")", "if", "content_feature", "is", "None", ":", "content_feature", "=", "_tkutl", ".", "_find_only_image_column", "(", "content_dataset", ")", "if", "verbose", ":", "print", "(", "\"Using '{}' in style_dataset as feature column and using \"", "\"'{}' in content_dataset as feature column\"", ".", "format", "(", "style_feature", ",", "content_feature", ")", ")", "_raise_error_if_not_training_sframe", "(", "style_dataset", ",", "style_feature", ")", "_raise_error_if_not_training_sframe", "(", "content_dataset", ",", "content_feature", ")", "params", "=", "{", "'batch_size'", ":", "batch_size", ",", "'vgg16_content_loss_layer'", ":", "2", ",", "# conv3_3 layer", "'lr'", ":", "0.001", ",", "'content_loss_mult'", ":", "1.0", ",", "'style_loss_mult'", ":", "[", "1e-4", ",", "1e-4", ",", "1e-4", ",", "1e-4", "]", ",", "# conv 1-4 layers", "'finetune_all_params'", ":", "True", ",", "'pretrained_weights'", ":", "False", ",", "'print_loss_breakdown'", ":", "False", ",", "'input_shape'", ":", "(", "256", ",", "256", ")", ",", "'training_content_loader_type'", ":", "'stretch'", ",", "'use_augmentation'", ":", "False", ",", "'sequential_image_processing'", ":", "False", ",", "# Only used if use_augmentaion is True", "'aug_resize'", ":", "0", ",", "'aug_min_object_covered'", ":", "0", ",", "'aug_rand_crop'", ":", "0.9", ",", "'aug_rand_pad'", ":", "0.9", ",", "'aug_rand_gray'", ":", "0.0", ",", "'aug_aspect_ratio'", ":", "1.25", ",", "'aug_hue'", ":", "0.05", ",", "'aug_brightness'", ":", "0.05", ",", "'aug_saturation'", ":", "0.05", ",", "'aug_contrast'", ":", "0.05", ",", "'aug_horizontal_flip'", ":", "True", ",", "'aug_area_range'", ":", "(", ".05", ",", "1.5", ")", ",", "'aug_pca_noise'", ":", "0.0", ",", "'aug_max_attempts'", ":", "20", ",", "'aug_inter_method'", ":", "2", ",", "}", "if", "'_advanced_parameters'", "in", "kwargs", ":", "# Make sure no additional parameters are provided", "new_keys", "=", "set", "(", "kwargs", "[", "'_advanced_parameters'", "]", ".", "keys", "(", ")", ")", "set_keys", "=", "set", "(", "params", ".", "keys", "(", ")", ")", "unsupported", "=", "new_keys", "-", "set_keys", "if", "unsupported", ":", "raise", "_ToolkitError", "(", "'Unknown advanced parameters: {}'", ".", "format", "(", "unsupported", ")", ")", "params", ".", "update", "(", "kwargs", "[", "'_advanced_parameters'", "]", ")", "_content_loss_mult", "=", "params", "[", "'content_loss_mult'", "]", "_style_loss_mult", "=", "params", "[", "'style_loss_mult'", "]", "num_gpus", "=", "_mxnet_utils", ".", "get_num_gpus_in_use", "(", "max_devices", "=", "params", "[", "'batch_size'", "]", ")", "batch_size_each", "=", "params", "[", "'batch_size'", "]", "//", "max", "(", "num_gpus", ",", "1", ")", "batch_size", "=", "max", "(", "num_gpus", ",", "1", ")", "*", "batch_size_each", "input_shape", "=", "params", "[", "'input_shape'", "]", "iterations", "=", "0", "if", "max_iterations", "is", "None", ":", "max_iterations", "=", "len", "(", "style_dataset", ")", "*", "10000", "if", "verbose", ":", "print", "(", "'Setting max_iterations to be {}'", ".", "format", "(", "max_iterations", ")", ")", "# data loader", "if", "params", "[", "'use_augmentation'", "]", ":", "content_loader_type", "=", "'%s-with-augmentation'", "%", "params", "[", "'training_content_loader_type'", "]", "else", ":", "content_loader_type", "=", "params", "[", "'training_content_loader_type'", "]", "content_images_loader", "=", "_SFrameSTIter", "(", "content_dataset", ",", "batch_size", ",", "shuffle", "=", "True", ",", "feature_column", "=", "content_feature", ",", "input_shape", "=", "input_shape", ",", "loader_type", "=", "content_loader_type", ",", "aug_params", "=", "params", ",", "sequential", "=", "params", "[", "'sequential_image_processing'", "]", ")", "ctx", "=", "_mxnet_utils", ".", "get_mxnet_context", "(", "max_devices", "=", "params", "[", "'batch_size'", "]", ")", "num_styles", "=", "len", "(", "style_dataset", ")", "# TRANSFORMER MODEL", "from", ".", "_model", "import", "Transformer", "as", "_Transformer", "transformer_model_path", "=", "_pre_trained_models", ".", "STYLE_TRANSFER_BASE_MODELS", "[", "model", "]", "(", ")", ".", "get_model_path", "(", ")", "transformer", "=", "_Transformer", "(", "num_styles", ",", "batch_size_each", ")", "transformer", ".", "collect_params", "(", ")", ".", "initialize", "(", "ctx", "=", "ctx", ")", "if", "params", "[", "'pretrained_weights'", "]", ":", "transformer", ".", "load_params", "(", "transformer_model_path", ",", "ctx", ",", "allow_missing", "=", "True", ")", "# For some reason, the transformer fails to hybridize for training, so we", "# avoid this until resolved", "# transformer.hybridize()", "# VGG MODEL", "from", ".", "_model", "import", "Vgg16", "as", "_Vgg16", "vgg_model_path", "=", "_pre_trained_models", ".", "STYLE_TRANSFER_BASE_MODELS", "[", "'Vgg16'", "]", "(", ")", ".", "get_model_path", "(", ")", "vgg_model", "=", "_Vgg16", "(", ")", "vgg_model", ".", "collect_params", "(", ")", ".", "initialize", "(", "ctx", "=", "ctx", ")", "vgg_model", ".", "load_params", "(", "vgg_model_path", ",", "ctx", "=", "ctx", ",", "ignore_extra", "=", "True", ")", "vgg_model", ".", "hybridize", "(", ")", "# TRAINER", "from", "mxnet", "import", "gluon", "as", "_gluon", "from", ".", "_model", "import", "gram_matrix", "as", "_gram_matrix", "if", "params", "[", "'finetune_all_params'", "]", ":", "trainable_params", "=", "transformer", ".", "collect_params", "(", ")", "else", ":", "trainable_params", "=", "transformer", ".", "collect_params", "(", "'.*gamma|.*beta'", ")", "trainer", "=", "_gluon", ".", "Trainer", "(", "trainable_params", ",", "'adam'", ",", "{", "'learning_rate'", ":", "params", "[", "'lr'", "]", "}", ")", "mse_loss", "=", "_gluon", ".", "loss", ".", "L2Loss", "(", ")", "start_time", "=", "_time", ".", "time", "(", ")", "smoothed_loss", "=", "None", "last_time", "=", "0", "cuda_gpus", "=", "_mxnet_utils", ".", "get_gpus_in_use", "(", "max_devices", "=", "params", "[", "'batch_size'", "]", ")", "num_mxnet_gpus", "=", "len", "(", "cuda_gpus", ")", "if", "verbose", ":", "# Estimate memory usage (based on experiments)", "cuda_mem_req", "=", "260", "+", "batch_size_each", "*", "880", "+", "num_styles", "*", "1.4", "_tkutl", ".", "_print_neural_compute_device", "(", "cuda_gpus", "=", "cuda_gpus", ",", "use_mps", "=", "False", ",", "cuda_mem_req", "=", "cuda_mem_req", ",", "has_mps_impl", "=", "False", ")", "#", "# Pre-compute gram matrices for style images", "#", "if", "verbose", ":", "print", "(", "'Analyzing visual features of the style images'", ")", "style_images_loader", "=", "_SFrameSTIter", "(", "style_dataset", ",", "batch_size", ",", "shuffle", "=", "False", ",", "num_epochs", "=", "1", ",", "feature_column", "=", "style_feature", ",", "input_shape", "=", "input_shape", ",", "loader_type", "=", "'stretch'", ",", "sequential", "=", "params", "[", "'sequential_image_processing'", "]", ")", "num_layers", "=", "len", "(", "params", "[", "'style_loss_mult'", "]", ")", "gram_chunks", "=", "[", "[", "]", "for", "_", "in", "range", "(", "num_layers", ")", "]", "for", "s_batch", "in", "style_images_loader", ":", "s_data", "=", "_gluon", ".", "utils", ".", "split_and_load", "(", "s_batch", ".", "data", "[", "0", "]", ",", "ctx_list", "=", "ctx", ",", "batch_axis", "=", "0", ")", "for", "s", "in", "s_data", ":", "vgg16_s", "=", "_vgg16_data_prep", "(", "s", ")", "ret", "=", "vgg_model", "(", "vgg16_s", ")", "grams", "=", "[", "_gram_matrix", "(", "x", ")", "for", "x", "in", "ret", "]", "for", "i", ",", "gram", "in", "enumerate", "(", "grams", ")", ":", "if", "gram", ".", "context", "!=", "_mx", ".", "cpu", "(", "0", ")", ":", "gram", "=", "gram", ".", "as_in_context", "(", "_mx", ".", "cpu", "(", "0", ")", ")", "gram_chunks", "[", "i", "]", ".", "append", "(", "gram", ")", "del", "style_images_loader", "grams", "=", "[", "# The concatenated styles may be padded, so we slice overflow", "_mx", ".", "nd", ".", "concat", "(", "*", "chunks", ",", "dim", "=", "0", ")", "[", ":", "num_styles", "]", "for", "chunks", "in", "gram_chunks", "]", "# A context->grams look-up table, where all the gram matrices have been", "# distributed", "ctx_grams", "=", "{", "}", "if", "ctx", "[", "0", "]", "==", "_mx", ".", "cpu", "(", "0", ")", ":", "ctx_grams", "[", "_mx", ".", "cpu", "(", "0", ")", "]", "=", "grams", "else", ":", "for", "ctx0", "in", "ctx", ":", "ctx_grams", "[", "ctx0", "]", "=", "[", "gram", ".", "as_in_context", "(", "ctx0", ")", "for", "gram", "in", "grams", "]", "#", "# Training loop", "#", "vgg_content_loss_layer", "=", "params", "[", "'vgg16_content_loss_layer'", "]", "rs", "=", "_np", ".", "random", ".", "RandomState", "(", "1234", ")", "while", "iterations", "<", "max_iterations", ":", "content_images_loader", ".", "reset", "(", ")", "for", "c_batch", "in", "content_images_loader", ":", "c_data", "=", "_gluon", ".", "utils", ".", "split_and_load", "(", "c_batch", ".", "data", "[", "0", "]", ",", "ctx_list", "=", "ctx", ",", "batch_axis", "=", "0", ")", "Ls", "=", "[", "]", "curr_content_loss", "=", "[", "]", "curr_style_loss", "=", "[", "]", "with", "_mx", ".", "autograd", ".", "record", "(", ")", ":", "for", "c", "in", "c_data", ":", "# Randomize styles to train", "indices", "=", "_mx", ".", "nd", ".", "array", "(", "rs", ".", "randint", "(", "num_styles", ",", "size", "=", "batch_size_each", ")", ",", "dtype", "=", "_np", ".", "int64", ",", "ctx", "=", "c", ".", "context", ")", "# Generate pastiche", "p", "=", "transformer", "(", "c", ",", "indices", ")", "# mean subtraction", "vgg16_p", "=", "_vgg16_data_prep", "(", "p", ")", "vgg16_c", "=", "_vgg16_data_prep", "(", "c", ")", "# vgg forward", "p_vgg_outputs", "=", "vgg_model", "(", "vgg16_p", ")", "c_vgg_outputs", "=", "vgg_model", "(", "vgg16_c", ")", "c_content_layer", "=", "c_vgg_outputs", "[", "vgg_content_loss_layer", "]", "p_content_layer", "=", "p_vgg_outputs", "[", "vgg_content_loss_layer", "]", "# Calculate Loss", "# Style Loss between style image and stylized image", "# Ls = sum of L2 norm of gram matrix of vgg16's conv layers", "style_losses", "=", "[", "]", "for", "gram", ",", "p_vgg_output", ",", "style_loss_mult", "in", "zip", "(", "ctx_grams", "[", "c", ".", "context", "]", ",", "p_vgg_outputs", ",", "_style_loss_mult", ")", ":", "gram_s_vgg", "=", "gram", "[", "indices", "]", "gram_p_vgg", "=", "_gram_matrix", "(", "p_vgg_output", ")", "style_losses", ".", "append", "(", "style_loss_mult", "*", "mse_loss", "(", "gram_s_vgg", ",", "gram_p_vgg", ")", ")", "style_loss", "=", "_mx", ".", "nd", ".", "add_n", "(", "*", "style_losses", ")", "# Content Loss between content image and stylized image", "# Lc = L2 norm at a single layer in vgg16", "content_loss", "=", "_content_loss_mult", "*", "mse_loss", "(", "c_content_layer", ",", "p_content_layer", ")", "curr_content_loss", ".", "append", "(", "content_loss", ")", "curr_style_loss", ".", "append", "(", "style_loss", ")", "# Divide loss by large number to get into a more legible", "# range", "total_loss", "=", "(", "content_loss", "+", "style_loss", ")", "/", "10000.0", "Ls", ".", "append", "(", "total_loss", ")", "for", "L", "in", "Ls", ":", "L", ".", "backward", "(", ")", "cur_loss", "=", "_np", ".", "mean", "(", "[", "L", ".", "asnumpy", "(", ")", "[", "0", "]", "for", "L", "in", "Ls", "]", ")", "if", "smoothed_loss", "is", "None", ":", "smoothed_loss", "=", "cur_loss", "else", ":", "smoothed_loss", "=", "0.9", "*", "smoothed_loss", "+", "0.1", "*", "cur_loss", "iterations", "+=", "1", "trainer", ".", "step", "(", "batch_size", ")", "if", "verbose", "and", "iterations", "==", "1", ":", "# Print progress table header", "column_names", "=", "[", "'Iteration'", ",", "'Loss'", ",", "'Elapsed Time'", "]", "num_columns", "=", "len", "(", "column_names", ")", "column_width", "=", "max", "(", "map", "(", "lambda", "x", ":", "len", "(", "x", ")", ",", "column_names", ")", ")", "+", "2", "hr", "=", "'+'", "+", "'+'", ".", "join", "(", "[", "'-'", "*", "column_width", "]", "*", "num_columns", ")", "+", "'+'", "print", "(", "hr", ")", "print", "(", "(", "'| {:<{width}}'", "*", "num_columns", "+", "'|'", ")", ".", "format", "(", "*", "column_names", ",", "width", "=", "column_width", "-", "1", ")", ")", "print", "(", "hr", ")", "cur_time", "=", "_time", ".", "time", "(", ")", "if", "verbose", "and", "(", "cur_time", ">", "last_time", "+", "10", "or", "iterations", "==", "max_iterations", ")", ":", "# Print progress table row", "elapsed_time", "=", "cur_time", "-", "start_time", "print", "(", "\"| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|\"", ".", "format", "(", "cur_iter", "=", "iterations", ",", "loss", "=", "smoothed_loss", ",", "time", "=", "elapsed_time", ",", "width", "=", "column_width", "-", "1", ")", ")", "if", "params", "[", "'print_loss_breakdown'", "]", ":", "print_content_loss", "=", "_np", ".", "mean", "(", "[", "L", ".", "asnumpy", "(", ")", "[", "0", "]", "for", "L", "in", "curr_content_loss", "]", ")", "print_style_loss", "=", "_np", ".", "mean", "(", "[", "L", ".", "asnumpy", "(", ")", "[", "0", "]", "for", "L", "in", "curr_style_loss", "]", ")", "print", "(", "'Total Loss: {:6.3f} | Content Loss: {:6.3f} | Style Loss: {:6.3f}'", ".", "format", "(", "cur_loss", ",", "print_content_loss", ",", "print_style_loss", ")", ")", "last_time", "=", "cur_time", "if", "iterations", "==", "max_iterations", ":", "print", "(", "hr", ")", "break", "training_time", "=", "_time", ".", "time", "(", ")", "-", "start_time", "style_sa", "=", "style_dataset", "[", "style_feature", "]", "idx_column", "=", "_tc", ".", "SArray", "(", "range", "(", "0", ",", "style_sa", ".", "shape", "[", "0", "]", ")", ")", "style_sframe", "=", "_tc", ".", "SFrame", "(", "{", "\"style\"", ":", "idx_column", ",", "style_feature", ":", "style_sa", "}", ")", "# Save the model state", "state", "=", "{", "'_model'", ":", "transformer", ",", "'_training_time_as_string'", ":", "_seconds_as_string", "(", "training_time", ")", ",", "'batch_size'", ":", "batch_size", ",", "'num_styles'", ":", "num_styles", ",", "'model'", ":", "model", ",", "'input_image_shape'", ":", "input_shape", ",", "'styles'", ":", "style_sframe", ",", "'num_content_images'", ":", "len", "(", "content_dataset", ")", ",", "'training_time'", ":", "training_time", ",", "'max_iterations'", ":", "max_iterations", ",", "'training_iterations'", ":", "iterations", ",", "'training_epochs'", ":", "content_images_loader", ".", "cur_epoch", ",", "'style_feature'", ":", "style_feature", ",", "'content_feature'", ":", "content_feature", ",", "\"_index_column\"", ":", "\"style\"", ",", "'training_loss'", ":", "smoothed_loss", ",", "}", "return", "StyleTransfer", "(", "state", ")" ]
Create a :class:`StyleTransfer` model. Parameters ---------- style_dataset: SFrame Input style images. The columns named by the ``style_feature`` parameters will be extracted for training the model. content_dataset : SFrame Input content images. The columns named by the ``content_feature`` parameters will be extracted for training the model. style_feature: string Name of the column containing the input images in style SFrame. 'None' (the default) indicates the only image column in the style SFrame should be used as the feature. content_feature: string Name of the column containing the input images in content SFrame. 'None' (the default) indicates the only image column in the content SFrame should be used as the feature. max_iterations : int The number of training iterations. If 'None' (the default), then it will be automatically determined based on the amount of data you provide. model : string optional Style transfer model to use: - "resnet-16" : Fast and small-sized residual network that uses VGG-16 as reference network during training. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve training throughput. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : StyleTransfer A trained :class:`StyleTransfer` model. See Also -------- StyleTransfer Examples -------- .. sourcecode:: python # Create datasets >>> content_dataset = turicreate.image_analysis.load_images('content_images/') >>> style_dataset = turicreate.image_analysis.load_images('style_images/') # Train a style transfer model >>> model = turicreate.style_transfer.create(content_dataset, style_dataset) # Stylize an image on all styles >>> stylized_images = model.stylize(data) # Visualize the stylized images >>> stylized_images.explore()
[ "Create", "a", ":", "class", ":", "StyleTransfer", "model", "." ]
python
train
39.059783
pyca/pyopenssl
src/OpenSSL/SSL.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/SSL.py#L931-L943
def use_certificate(self, cert): """ Load a certificate from a X509 object :param cert: The X509 object :return: None """ if not isinstance(cert, X509): raise TypeError("cert must be an X509 instance") use_result = _lib.SSL_CTX_use_certificate(self._context, cert._x509) if not use_result: _raise_current_error()
[ "def", "use_certificate", "(", "self", ",", "cert", ")", ":", "if", "not", "isinstance", "(", "cert", ",", "X509", ")", ":", "raise", "TypeError", "(", "\"cert must be an X509 instance\"", ")", "use_result", "=", "_lib", ".", "SSL_CTX_use_certificate", "(", "self", ".", "_context", ",", "cert", ".", "_x509", ")", "if", "not", "use_result", ":", "_raise_current_error", "(", ")" ]
Load a certificate from a X509 object :param cert: The X509 object :return: None
[ "Load", "a", "certificate", "from", "a", "X509", "object" ]
python
test
30
peri-source/peri
peri/viz/plots.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/viz/plots.py#L1056-L1103
def circles(st, layer, axis, ax=None, talpha=1.0, cedge='white', cface='white'): """ Plots a set of circles corresponding to a slice through the platonic structure. Copied from twoslice_overlay with comments, standaloneness. Inputs ------ pos : array of particle positions; [N,3] rad : array of particle radii; [N] ax : plt.axis instance layer : Which layer of the slice to use. axis : The slice of the image, 0, 1, or 2. cedge : edge color cface : face color talpha : Alpha of the thing """ pos = st.obj_get_positions() rad = st.obj_get_radii() shape = st.ishape.shape.tolist() shape.pop(axis) #shape is now the shape of the image if ax is None: fig = plt.figure() axisbg = 'white' if cface == 'black' else 'black' sx, sy = ((1,shape[1]/float(shape[0])) if shape[0] > shape[1] else (shape[0]/float(shape[1]), 1)) ax = fig.add_axes((0,0, sx, sy), axisbg=axisbg) # get the index of the particles we want to include particles = np.arange(len(pos))[np.abs(pos[:,axis] - layer) < rad] # for each of these particles display the effective radius # in the proper place scale = 1.0 #np.max(shape).astype('float') for i in particles: p = pos[i].copy() r = 2*np.sqrt(rad[i]**2 - (p[axis] - layer)**2) #CIRCLE IS IN FIGURE COORDINATES!!! if axis==0: ix = 1; iy = 2 elif axis == 1: ix = 0; iy = 2 elif axis==2: ix = 0; iy = 1 c = Circle((p[ix]/scale, p[iy]/scale), radius=r/2/scale, fc=cface, ec=cedge, alpha=talpha) ax.add_patch(c) # plt.axis([0,1,0,1]) plt.axis('equal') #circles not ellipses return ax
[ "def", "circles", "(", "st", ",", "layer", ",", "axis", ",", "ax", "=", "None", ",", "talpha", "=", "1.0", ",", "cedge", "=", "'white'", ",", "cface", "=", "'white'", ")", ":", "pos", "=", "st", ".", "obj_get_positions", "(", ")", "rad", "=", "st", ".", "obj_get_radii", "(", ")", "shape", "=", "st", ".", "ishape", ".", "shape", ".", "tolist", "(", ")", "shape", ".", "pop", "(", "axis", ")", "#shape is now the shape of the image", "if", "ax", "is", "None", ":", "fig", "=", "plt", ".", "figure", "(", ")", "axisbg", "=", "'white'", "if", "cface", "==", "'black'", "else", "'black'", "sx", ",", "sy", "=", "(", "(", "1", ",", "shape", "[", "1", "]", "/", "float", "(", "shape", "[", "0", "]", ")", ")", "if", "shape", "[", "0", "]", ">", "shape", "[", "1", "]", "else", "(", "shape", "[", "0", "]", "/", "float", "(", "shape", "[", "1", "]", ")", ",", "1", ")", ")", "ax", "=", "fig", ".", "add_axes", "(", "(", "0", ",", "0", ",", "sx", ",", "sy", ")", ",", "axisbg", "=", "axisbg", ")", "# get the index of the particles we want to include", "particles", "=", "np", ".", "arange", "(", "len", "(", "pos", ")", ")", "[", "np", ".", "abs", "(", "pos", "[", ":", ",", "axis", "]", "-", "layer", ")", "<", "rad", "]", "# for each of these particles display the effective radius", "# in the proper place", "scale", "=", "1.0", "#np.max(shape).astype('float')", "for", "i", "in", "particles", ":", "p", "=", "pos", "[", "i", "]", ".", "copy", "(", ")", "r", "=", "2", "*", "np", ".", "sqrt", "(", "rad", "[", "i", "]", "**", "2", "-", "(", "p", "[", "axis", "]", "-", "layer", ")", "**", "2", ")", "#CIRCLE IS IN FIGURE COORDINATES!!!", "if", "axis", "==", "0", ":", "ix", "=", "1", "iy", "=", "2", "elif", "axis", "==", "1", ":", "ix", "=", "0", "iy", "=", "2", "elif", "axis", "==", "2", ":", "ix", "=", "0", "iy", "=", "1", "c", "=", "Circle", "(", "(", "p", "[", "ix", "]", "/", "scale", ",", "p", "[", "iy", "]", "/", "scale", ")", ",", "radius", "=", "r", "/", "2", "/", "scale", ",", "fc", "=", "cface", ",", "ec", "=", "cedge", ",", "alpha", "=", "talpha", ")", "ax", ".", "add_patch", "(", "c", ")", "# plt.axis([0,1,0,1])", "plt", ".", "axis", "(", "'equal'", ")", "#circles not ellipses", "return", "ax" ]
Plots a set of circles corresponding to a slice through the platonic structure. Copied from twoslice_overlay with comments, standaloneness. Inputs ------ pos : array of particle positions; [N,3] rad : array of particle radii; [N] ax : plt.axis instance layer : Which layer of the slice to use. axis : The slice of the image, 0, 1, or 2. cedge : edge color cface : face color talpha : Alpha of the thing
[ "Plots", "a", "set", "of", "circles", "corresponding", "to", "a", "slice", "through", "the", "platonic", "structure", ".", "Copied", "from", "twoslice_overlay", "with", "comments", "standaloneness", "." ]
python
valid
36.5625
inspirehep/inspire-query-parser
inspire_query_parser/visitors/elastic_search_visitor.py
https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L753-L794
def visit_exact_match_value(self, node, fieldnames=None): """Generates a term query (exact search in ElasticSearch).""" if not fieldnames: fieldnames = ['_all'] else: fieldnames = force_list(fieldnames) if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'] == fieldnames[0]: return self._generate_exact_author_query(node.value) elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['type-code'] == fieldnames[0]: return self._generate_type_code_query(node.value) elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['journal'] == fieldnames: return self._generate_journal_nested_queries(node.value) bai_fieldnames = self._generate_fieldnames_if_bai_query( node.value, bai_field_variation=FieldVariations.raw, query_bai_field_if_dots_in_name=False ) if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['date'] == fieldnames: term_queries = [] for field in fieldnames: term_query = \ {'term': {field: _truncate_date_value_according_on_date_field(field, node.value).dumps()}} term_queries.append( generate_nested_query(ElasticSearchVisitor.DATE_NESTED_QUERY_PATH, term_query) if field in ElasticSearchVisitor.DATE_NESTED_FIELDS else term_query ) elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] in fieldnames: term_queries = [ generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, {'term': {field: node.value}}) for field in (bai_fieldnames or fieldnames) ] else: term_queries = [{'term': {field: node.value}} for field in (bai_fieldnames or fieldnames)] return wrap_queries_in_bool_clauses_if_more_than_one(term_queries, use_must_clause=False)
[ "def", "visit_exact_match_value", "(", "self", ",", "node", ",", "fieldnames", "=", "None", ")", ":", "if", "not", "fieldnames", ":", "fieldnames", "=", "[", "'_all'", "]", "else", ":", "fieldnames", "=", "force_list", "(", "fieldnames", ")", "if", "ElasticSearchVisitor", ".", "KEYWORD_TO_ES_FIELDNAME", "[", "'exact-author'", "]", "==", "fieldnames", "[", "0", "]", ":", "return", "self", ".", "_generate_exact_author_query", "(", "node", ".", "value", ")", "elif", "ElasticSearchVisitor", ".", "KEYWORD_TO_ES_FIELDNAME", "[", "'type-code'", "]", "==", "fieldnames", "[", "0", "]", ":", "return", "self", ".", "_generate_type_code_query", "(", "node", ".", "value", ")", "elif", "ElasticSearchVisitor", ".", "KEYWORD_TO_ES_FIELDNAME", "[", "'journal'", "]", "==", "fieldnames", ":", "return", "self", ".", "_generate_journal_nested_queries", "(", "node", ".", "value", ")", "bai_fieldnames", "=", "self", ".", "_generate_fieldnames_if_bai_query", "(", "node", ".", "value", ",", "bai_field_variation", "=", "FieldVariations", ".", "raw", ",", "query_bai_field_if_dots_in_name", "=", "False", ")", "if", "ElasticSearchVisitor", ".", "KEYWORD_TO_ES_FIELDNAME", "[", "'date'", "]", "==", "fieldnames", ":", "term_queries", "=", "[", "]", "for", "field", "in", "fieldnames", ":", "term_query", "=", "{", "'term'", ":", "{", "field", ":", "_truncate_date_value_according_on_date_field", "(", "field", ",", "node", ".", "value", ")", ".", "dumps", "(", ")", "}", "}", "term_queries", ".", "append", "(", "generate_nested_query", "(", "ElasticSearchVisitor", ".", "DATE_NESTED_QUERY_PATH", ",", "term_query", ")", "if", "field", "in", "ElasticSearchVisitor", ".", "DATE_NESTED_FIELDS", "else", "term_query", ")", "elif", "ElasticSearchVisitor", ".", "KEYWORD_TO_ES_FIELDNAME", "[", "'author'", "]", "in", "fieldnames", ":", "term_queries", "=", "[", "generate_nested_query", "(", "ElasticSearchVisitor", ".", "AUTHORS_NESTED_QUERY_PATH", ",", "{", "'term'", ":", "{", "field", ":", "node", ".", "value", "}", "}", ")", "for", "field", "in", "(", "bai_fieldnames", "or", "fieldnames", ")", "]", "else", ":", "term_queries", "=", "[", "{", "'term'", ":", "{", "field", ":", "node", ".", "value", "}", "}", "for", "field", "in", "(", "bai_fieldnames", "or", "fieldnames", ")", "]", "return", "wrap_queries_in_bool_clauses_if_more_than_one", "(", "term_queries", ",", "use_must_clause", "=", "False", ")" ]
Generates a term query (exact search in ElasticSearch).
[ "Generates", "a", "term", "query", "(", "exact", "search", "in", "ElasticSearch", ")", "." ]
python
train
46.333333
faucamp/python-gsmmodem
tools/gsmtermlib/terminal.py
https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/tools/gsmtermlib/terminal.py#L352-L356
def _handleDelete(self): """ Handles "delete" characters """ if self.cursorPos < len(self.inputBuffer): self.inputBuffer = self.inputBuffer[0:self.cursorPos] + self.inputBuffer[self.cursorPos+1:] self._refreshInputPrompt(len(self.inputBuffer)+1)
[ "def", "_handleDelete", "(", "self", ")", ":", "if", "self", ".", "cursorPos", "<", "len", "(", "self", ".", "inputBuffer", ")", ":", "self", ".", "inputBuffer", "=", "self", ".", "inputBuffer", "[", "0", ":", "self", ".", "cursorPos", "]", "+", "self", ".", "inputBuffer", "[", "self", ".", "cursorPos", "+", "1", ":", "]", "self", ".", "_refreshInputPrompt", "(", "len", "(", "self", ".", "inputBuffer", ")", "+", "1", ")" ]
Handles "delete" characters
[ "Handles", "delete", "characters" ]
python
train
61
deepmipt/DeepPavlov
deeppavlov/utils/alexa/ssl_tools.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L29-L58
def verify_sc_url(url: str) -> bool: """Verify signature certificate URL against Amazon Alexa requirements. Each call of Agent passes incoming utterances batch through skills filter, agent skills, skills processor. Batch of dialog IDs can be provided, in other case utterances indexes in incoming batch are used as dialog IDs. Args: url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: True if verification was successful, False if not. """ parsed = urlsplit(url) scheme: str = parsed.scheme netloc: str = parsed.netloc path: str = parsed.path try: port = parsed.port except ValueError: port = None result = (scheme.lower() == 'https' and netloc.lower().split(':')[0] == 's3.amazonaws.com' and path.startswith('/echo.api/') and (port == 443 or port is None)) return result
[ "def", "verify_sc_url", "(", "url", ":", "str", ")", "->", "bool", ":", "parsed", "=", "urlsplit", "(", "url", ")", "scheme", ":", "str", "=", "parsed", ".", "scheme", "netloc", ":", "str", "=", "parsed", ".", "netloc", "path", ":", "str", "=", "parsed", ".", "path", "try", ":", "port", "=", "parsed", ".", "port", "except", "ValueError", ":", "port", "=", "None", "result", "=", "(", "scheme", ".", "lower", "(", ")", "==", "'https'", "and", "netloc", ".", "lower", "(", ")", ".", "split", "(", "':'", ")", "[", "0", "]", "==", "'s3.amazonaws.com'", "and", "path", ".", "startswith", "(", "'/echo.api/'", ")", "and", "(", "port", "==", "443", "or", "port", "is", "None", ")", ")", "return", "result" ]
Verify signature certificate URL against Amazon Alexa requirements. Each call of Agent passes incoming utterances batch through skills filter, agent skills, skills processor. Batch of dialog IDs can be provided, in other case utterances indexes in incoming batch are used as dialog IDs. Args: url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: True if verification was successful, False if not.
[ "Verify", "signature", "certificate", "URL", "against", "Amazon", "Alexa", "requirements", "." ]
python
test
30.633333
EmbodiedCognition/py-c3d
c3d.py
https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L1023-L1094
def write(self, handle): '''Write metadata and point + analog frames to a file handle. Parameters ---------- handle : file Write metadata and C3D motion frames to the given file handle. The writer does not close the handle. ''' if not self._frames: return def add(name, desc, bpe, format, bytes, *dimensions): group.add_param(name, desc=desc, bytes_per_element=bpe, bytes=struct.pack(format, bytes), dimensions=list(dimensions)) def add_str(name, desc, bytes, *dimensions): group.add_param(name, desc=desc, bytes_per_element=-1, bytes=bytes.encode('utf-8'), dimensions=list(dimensions)) def add_empty_array(name, desc, bpe): group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0]) points, analog = self._frames[0] ppf = len(points) # POINT group group = self.add_group(1, 'POINT', 'POINT group') add('USED', 'Number of 3d markers', 2, '<H', ppf) add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames))) add('DATA_START', 'data block number', 2, '<H', 0) add('SCALE', '3d scale factor', 4, '<f', self._point_scale) add('RATE', '3d data capture rate', 4, '<f', self._point_rate) add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2) add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2) add_str('UNITS', '3d data units', self._point_units, len(self._point_units)) add_str('LABELS', 'labels', ''.join('M%03d ' % i for i in range(ppf)), 5, ppf) add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf) # ANALOG group group = self.add_group(2, 'ANALOG', 'ANALOG group') add('USED', 'analog channel count', 2, '<H', analog.shape[0]) add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1]) add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale) add_empty_array('SCALE', 'analog channel scale factors', 4) add_empty_array('OFFSET', 'analog channel offsets', 2) # TRIAL group group = self.add_group(3, 'TRIAL', 'TRIAL group') add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2) add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2) # sync parameter information to header. blocks = self.parameter_blocks() self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks) self.header.data_block = 2 + blocks self.header.frame_rate = self._point_rate self.header.last_frame = min(len(self._frames), 65535) self.header.point_count = ppf self.header.analog_count = np.prod(analog.shape) self.header.analog_per_frame = analog.shape[0] self.header.scale_factor = self._point_scale self._write_metadata(handle) self._write_frames(handle)
[ "def", "write", "(", "self", ",", "handle", ")", ":", "if", "not", "self", ".", "_frames", ":", "return", "def", "add", "(", "name", ",", "desc", ",", "bpe", ",", "format", ",", "bytes", ",", "*", "dimensions", ")", ":", "group", ".", "add_param", "(", "name", ",", "desc", "=", "desc", ",", "bytes_per_element", "=", "bpe", ",", "bytes", "=", "struct", ".", "pack", "(", "format", ",", "bytes", ")", ",", "dimensions", "=", "list", "(", "dimensions", ")", ")", "def", "add_str", "(", "name", ",", "desc", ",", "bytes", ",", "*", "dimensions", ")", ":", "group", ".", "add_param", "(", "name", ",", "desc", "=", "desc", ",", "bytes_per_element", "=", "-", "1", ",", "bytes", "=", "bytes", ".", "encode", "(", "'utf-8'", ")", ",", "dimensions", "=", "list", "(", "dimensions", ")", ")", "def", "add_empty_array", "(", "name", ",", "desc", ",", "bpe", ")", ":", "group", ".", "add_param", "(", "name", ",", "desc", "=", "desc", ",", "bytes_per_element", "=", "bpe", ",", "dimensions", "=", "[", "0", "]", ")", "points", ",", "analog", "=", "self", ".", "_frames", "[", "0", "]", "ppf", "=", "len", "(", "points", ")", "# POINT group", "group", "=", "self", ".", "add_group", "(", "1", ",", "'POINT'", ",", "'POINT group'", ")", "add", "(", "'USED'", ",", "'Number of 3d markers'", ",", "2", ",", "'<H'", ",", "ppf", ")", "add", "(", "'FRAMES'", ",", "'frame count'", ",", "2", ",", "'<H'", ",", "min", "(", "65535", ",", "len", "(", "self", ".", "_frames", ")", ")", ")", "add", "(", "'DATA_START'", ",", "'data block number'", ",", "2", ",", "'<H'", ",", "0", ")", "add", "(", "'SCALE'", ",", "'3d scale factor'", ",", "4", ",", "'<f'", ",", "self", ".", "_point_scale", ")", "add", "(", "'RATE'", ",", "'3d data capture rate'", ",", "4", ",", "'<f'", ",", "self", ".", "_point_rate", ")", "add_str", "(", "'X_SCREEN'", ",", "'X_SCREEN parameter'", ",", "'+X'", ",", "2", ")", "add_str", "(", "'Y_SCREEN'", ",", "'Y_SCREEN parameter'", ",", "'+Y'", ",", "2", ")", "add_str", "(", "'UNITS'", ",", "'3d data units'", ",", "self", ".", "_point_units", ",", "len", "(", "self", ".", "_point_units", ")", ")", "add_str", "(", "'LABELS'", ",", "'labels'", ",", "''", ".", "join", "(", "'M%03d '", "%", "i", "for", "i", "in", "range", "(", "ppf", ")", ")", ",", "5", ",", "ppf", ")", "add_str", "(", "'DESCRIPTIONS'", ",", "'descriptions'", ",", "' '", "*", "16", "*", "ppf", ",", "16", ",", "ppf", ")", "# ANALOG group", "group", "=", "self", ".", "add_group", "(", "2", ",", "'ANALOG'", ",", "'ANALOG group'", ")", "add", "(", "'USED'", ",", "'analog channel count'", ",", "2", ",", "'<H'", ",", "analog", ".", "shape", "[", "0", "]", ")", "add", "(", "'RATE'", ",", "'analog samples per 3d frame'", ",", "4", ",", "'<f'", ",", "analog", ".", "shape", "[", "1", "]", ")", "add", "(", "'GEN_SCALE'", ",", "'analog general scale factor'", ",", "4", ",", "'<f'", ",", "self", ".", "_gen_scale", ")", "add_empty_array", "(", "'SCALE'", ",", "'analog channel scale factors'", ",", "4", ")", "add_empty_array", "(", "'OFFSET'", ",", "'analog channel offsets'", ",", "2", ")", "# TRIAL group", "group", "=", "self", ".", "add_group", "(", "3", ",", "'TRIAL'", ",", "'TRIAL group'", ")", "add", "(", "'ACTUAL_START_FIELD'", ",", "'actual start frame'", ",", "2", ",", "'<I'", ",", "1", ",", "2", ")", "add", "(", "'ACTUAL_END_FIELD'", ",", "'actual end frame'", ",", "2", ",", "'<I'", ",", "len", "(", "self", ".", "_frames", ")", ",", "2", ")", "# sync parameter information to header.", "blocks", "=", "self", ".", "parameter_blocks", "(", ")", "self", ".", "get", "(", "'POINT:DATA_START'", ")", ".", "bytes", "=", "struct", ".", "pack", "(", "'<H'", ",", "2", "+", "blocks", ")", "self", ".", "header", ".", "data_block", "=", "2", "+", "blocks", "self", ".", "header", ".", "frame_rate", "=", "self", ".", "_point_rate", "self", ".", "header", ".", "last_frame", "=", "min", "(", "len", "(", "self", ".", "_frames", ")", ",", "65535", ")", "self", ".", "header", ".", "point_count", "=", "ppf", "self", ".", "header", ".", "analog_count", "=", "np", ".", "prod", "(", "analog", ".", "shape", ")", "self", ".", "header", ".", "analog_per_frame", "=", "analog", ".", "shape", "[", "0", "]", "self", ".", "header", ".", "scale_factor", "=", "self", ".", "_point_scale", "self", ".", "_write_metadata", "(", "handle", ")", "self", ".", "_write_frames", "(", "handle", ")" ]
Write metadata and point + analog frames to a file handle. Parameters ---------- handle : file Write metadata and C3D motion frames to the given file handle. The writer does not close the handle.
[ "Write", "metadata", "and", "point", "+", "analog", "frames", "to", "a", "file", "handle", "." ]
python
train
43.333333
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4006-L4096
def sort_values(self, by=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 3 NaN 8 4 4 D 7 2 5 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 3 NaN 8 4 Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 3 NaN 8 4 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 """ raise NotImplementedError("sort_values has not been implemented " "on Panel or Panel4D objects.")
[ "def", "sort_values", "(", "self", ",", "by", "=", "None", ",", "axis", "=", "0", ",", "ascending", "=", "True", ",", "inplace", "=", "False", ",", "kind", "=", "'quicksort'", ",", "na_position", "=", "'last'", ")", ":", "raise", "NotImplementedError", "(", "\"sort_values has not been implemented \"", "\"on Panel or Panel4D objects.\"", ")" ]
Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 3 NaN 8 4 4 D 7 2 5 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 3 NaN 8 4 Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 3 NaN 8 4 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1
[ "Sort", "by", "the", "values", "along", "either", "axis", "." ]
python
train
30.252747
mottosso/be
be/vendor/requests/sessions.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/sessions.py#L538-L615
def send(self, request, **kwargs): """Send a given PreparedRequest.""" # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if not isinstance(request, PreparedRequest): raise ValueError('You can only send PreparedRequests.') checked_urls = set() while request.url in self.redirect_cache: checked_urls.add(request.url) new_url = self.redirect_cache.get(request.url) if new_url in checked_urls: break request.url = new_url # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') timeout = kwargs.get('timeout') verify = kwargs.get('verify') cert = kwargs.get('cert') proxies = kwargs.get('proxies') hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = datetime.utcnow() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) r.elapsed = datetime.utcnow() - start # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. gen = self.resolve_redirects(r, request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history if not stream: r.content return r
[ "def", "send", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "# Set defaults that the hooks can utilize to ensure they always have", "# the correct parameters to reproduce the previous request.", "kwargs", ".", "setdefault", "(", "'stream'", ",", "self", ".", "stream", ")", "kwargs", ".", "setdefault", "(", "'verify'", ",", "self", ".", "verify", ")", "kwargs", ".", "setdefault", "(", "'cert'", ",", "self", ".", "cert", ")", "kwargs", ".", "setdefault", "(", "'proxies'", ",", "self", ".", "proxies", ")", "# It's possible that users might accidentally send a Request object.", "# Guard against that specific failure case.", "if", "not", "isinstance", "(", "request", ",", "PreparedRequest", ")", ":", "raise", "ValueError", "(", "'You can only send PreparedRequests.'", ")", "checked_urls", "=", "set", "(", ")", "while", "request", ".", "url", "in", "self", ".", "redirect_cache", ":", "checked_urls", ".", "add", "(", "request", ".", "url", ")", "new_url", "=", "self", ".", "redirect_cache", ".", "get", "(", "request", ".", "url", ")", "if", "new_url", "in", "checked_urls", ":", "break", "request", ".", "url", "=", "new_url", "# Set up variables needed for resolve_redirects and dispatching of hooks", "allow_redirects", "=", "kwargs", ".", "pop", "(", "'allow_redirects'", ",", "True", ")", "stream", "=", "kwargs", ".", "get", "(", "'stream'", ")", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ")", "verify", "=", "kwargs", ".", "get", "(", "'verify'", ")", "cert", "=", "kwargs", ".", "get", "(", "'cert'", ")", "proxies", "=", "kwargs", ".", "get", "(", "'proxies'", ")", "hooks", "=", "request", ".", "hooks", "# Get the appropriate adapter to use", "adapter", "=", "self", ".", "get_adapter", "(", "url", "=", "request", ".", "url", ")", "# Start time (approximately) of the request", "start", "=", "datetime", ".", "utcnow", "(", ")", "# Send the request", "r", "=", "adapter", ".", "send", "(", "request", ",", "*", "*", "kwargs", ")", "# Total elapsed time of the request (approximately)", "r", ".", "elapsed", "=", "datetime", ".", "utcnow", "(", ")", "-", "start", "# Response manipulation hooks", "r", "=", "dispatch_hook", "(", "'response'", ",", "hooks", ",", "r", ",", "*", "*", "kwargs", ")", "# Persist cookies", "if", "r", ".", "history", ":", "# If the hooks create history then we want those cookies too", "for", "resp", "in", "r", ".", "history", ":", "extract_cookies_to_jar", "(", "self", ".", "cookies", ",", "resp", ".", "request", ",", "resp", ".", "raw", ")", "extract_cookies_to_jar", "(", "self", ".", "cookies", ",", "request", ",", "r", ".", "raw", ")", "# Redirect resolving generator.", "gen", "=", "self", ".", "resolve_redirects", "(", "r", ",", "request", ",", "stream", "=", "stream", ",", "timeout", "=", "timeout", ",", "verify", "=", "verify", ",", "cert", "=", "cert", ",", "proxies", "=", "proxies", ")", "# Resolve redirects if allowed.", "history", "=", "[", "resp", "for", "resp", "in", "gen", "]", "if", "allow_redirects", "else", "[", "]", "# Shuffle things around if there's history.", "if", "history", ":", "# Insert the first (original) request at the start", "history", ".", "insert", "(", "0", ",", "r", ")", "# Get the last request made", "r", "=", "history", ".", "pop", "(", ")", "r", ".", "history", "=", "history", "if", "not", "stream", ":", "r", ".", "content", "return", "r" ]
Send a given PreparedRequest.
[ "Send", "a", "given", "PreparedRequest", "." ]
python
train
34.089744
jaraco/jaraco.itertools
jaraco/itertools.py
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L379-L401
def grouper_nofill_str(n, iterable): """ Take a sequence and break it up into chunks of the specified size. The last chunk may be smaller than size. This works very similar to grouper_nofill, except it works with strings as well. >>> tuple(grouper_nofill_str(3, 'foobarbaz')) ('foo', 'bar', 'baz') You can still use it on non-strings too if you like. >>> tuple(grouper_nofill_str(42, [])) () >>> tuple(grouper_nofill_str(3, list(range(10)))) ([0, 1, 2], [3, 4, 5], [6, 7, 8], [9]) """ res = more_itertools.chunked(iterable, n) if isinstance(iterable, six.string_types): res = (''.join(item) for item in res) return res
[ "def", "grouper_nofill_str", "(", "n", ",", "iterable", ")", ":", "res", "=", "more_itertools", ".", "chunked", "(", "iterable", ",", "n", ")", "if", "isinstance", "(", "iterable", ",", "six", ".", "string_types", ")", ":", "res", "=", "(", "''", ".", "join", "(", "item", ")", "for", "item", "in", "res", ")", "return", "res" ]
Take a sequence and break it up into chunks of the specified size. The last chunk may be smaller than size. This works very similar to grouper_nofill, except it works with strings as well. >>> tuple(grouper_nofill_str(3, 'foobarbaz')) ('foo', 'bar', 'baz') You can still use it on non-strings too if you like. >>> tuple(grouper_nofill_str(42, [])) () >>> tuple(grouper_nofill_str(3, list(range(10)))) ([0, 1, 2], [3, 4, 5], [6, 7, 8], [9])
[ "Take", "a", "sequence", "and", "break", "it", "up", "into", "chunks", "of", "the", "specified", "size", ".", "The", "last", "chunk", "may", "be", "smaller", "than", "size", "." ]
python
test
26.913043
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py#L262-L282
def GetForwardedIps(self, interface, interface_ip=None): """Retrieve the list of configured forwarded IP addresses. Args: interface: string, the output device to query. interface_ip: string, current interface ip address. Returns: list, the IP address strings. """ try: ips = netifaces.ifaddresses(interface) ips = ips[netifaces.AF_INET] except (ValueError, IndexError): return [] forwarded_ips = [] for ip in ips: if ip['addr'] != interface_ip: full_addr = '%s/%d' % (ip['addr'], netaddr.IPAddress(ip['netmask']).netmask_bits()) forwarded_ips.append(full_addr) return self.ParseForwardedIps(forwarded_ips)
[ "def", "GetForwardedIps", "(", "self", ",", "interface", ",", "interface_ip", "=", "None", ")", ":", "try", ":", "ips", "=", "netifaces", ".", "ifaddresses", "(", "interface", ")", "ips", "=", "ips", "[", "netifaces", ".", "AF_INET", "]", "except", "(", "ValueError", ",", "IndexError", ")", ":", "return", "[", "]", "forwarded_ips", "=", "[", "]", "for", "ip", "in", "ips", ":", "if", "ip", "[", "'addr'", "]", "!=", "interface_ip", ":", "full_addr", "=", "'%s/%d'", "%", "(", "ip", "[", "'addr'", "]", ",", "netaddr", ".", "IPAddress", "(", "ip", "[", "'netmask'", "]", ")", ".", "netmask_bits", "(", ")", ")", "forwarded_ips", ".", "append", "(", "full_addr", ")", "return", "self", ".", "ParseForwardedIps", "(", "forwarded_ips", ")" ]
Retrieve the list of configured forwarded IP addresses. Args: interface: string, the output device to query. interface_ip: string, current interface ip address. Returns: list, the IP address strings.
[ "Retrieve", "the", "list", "of", "configured", "forwarded", "IP", "addresses", "." ]
python
train
32.428571
CodeReclaimers/neat-python
examples/xor/evolve-feedforward-parallel.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/examples/xor/evolve-feedforward-parallel.py#L32-L50
def eval_genome(genome, config): """ This function will be run in parallel by ParallelEvaluator. It takes two arguments (a single genome and the genome class configuration data) and should return one float (that genome's fitness). Note that this function needs to be in module scope for multiprocessing.Pool (which is what ParallelEvaluator uses) to find it. Because of this, make sure you check for __main__ before executing any code (as we do here in the last few lines in the file), otherwise you'll have made a fork bomb instead of a neuroevolution demo. :) """ net = neat.nn.FeedForwardNetwork.create(genome, config) error = 4.0 for xi, xo in zip(xor_inputs, xor_outputs): output = net.activate(xi) error -= (output[0] - xo[0]) ** 2 return error
[ "def", "eval_genome", "(", "genome", ",", "config", ")", ":", "net", "=", "neat", ".", "nn", ".", "FeedForwardNetwork", ".", "create", "(", "genome", ",", "config", ")", "error", "=", "4.0", "for", "xi", ",", "xo", "in", "zip", "(", "xor_inputs", ",", "xor_outputs", ")", ":", "output", "=", "net", ".", "activate", "(", "xi", ")", "error", "-=", "(", "output", "[", "0", "]", "-", "xo", "[", "0", "]", ")", "**", "2", "return", "error" ]
This function will be run in parallel by ParallelEvaluator. It takes two arguments (a single genome and the genome class configuration data) and should return one float (that genome's fitness). Note that this function needs to be in module scope for multiprocessing.Pool (which is what ParallelEvaluator uses) to find it. Because of this, make sure you check for __main__ before executing any code (as we do here in the last few lines in the file), otherwise you'll have made a fork bomb instead of a neuroevolution demo. :)
[ "This", "function", "will", "be", "run", "in", "parallel", "by", "ParallelEvaluator", ".", "It", "takes", "two", "arguments", "(", "a", "single", "genome", "and", "the", "genome", "class", "configuration", "data", ")", "and", "should", "return", "one", "float", "(", "that", "genome", "s", "fitness", ")", "." ]
python
train
42.526316
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L422-L544
def quantize_model(sym, arg_params, aux_params, data_names=('data',), label_names=('softmax_label',), ctx=cpu(), excluded_sym_names=None, calib_mode='entropy', calib_data=None, num_calib_examples=None, calib_layer=None, quantized_dtype='int8', logger=logging): """User-level API for generating a quantized model from a FP32 model w/ or w/o calibration. The backend quantized operators are only enabled for Linux systems. Please do not run inference using the quantized models on Windows for now. The quantization implementation adopts the TensorFlow's approach: https://www.tensorflow.org/performance/quantization. The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf and adapts the method to MXNet. Parameters ---------- sym : str or Symbol Defines the structure of a neural network for FP32 data types. arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. data_names : a list of strs Data names required for creating a Module object to run forward propagation on the calibration dataset. label_names : a list of strs Label names required for creating a Module object to run forward propagation on the calibration dataset. ctx : Context Defines the device that users want to run forward propagation on the calibration dataset for collecting layer output statistics. Currently, only supports single context. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. calib_mode : str If calib_mode='none', no calibration will be used and the thresholds for requantization after the corresponding layers will be calculated at runtime by calling min and max operators. The quantized models generated in this mode are normally 10-20% slower than those with calibrations during inference. If calib_mode='naive', the min and max values of the layer outputs from a calibration dataset will be directly taken as the thresholds for quantization. If calib_mode='entropy' (default mode), the thresholds for quantization will be derived such that the KL divergence between the distributions of FP32 layer outputs and quantized layer outputs is minimized based upon the calibration dataset. calib_data : DataIter A data iterator initialized by the calibration dataset. num_calib_examples : int or None The maximum number of examples that user would like to use for calibration. If not provided, the whole calibration dataset will be used. calib_layer : function Given a layer's output name in string, return True or False for deciding whether to calibrate this layer. If yes, the statistics of the layer's output will be collected; otherwise, no information of the layer's output will be collected. If not provided, all the layers' outputs that need requantization will be collected. quantized_dtype : str The quantized destination type for input data. Currently support 'int8' , 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result. Default value is 'int8'. logger : Object A logging object for printing information during the process of quantization. Returns ------- tuple A tuple of quantized symbol, quantized arg_params, and aux_params. ------- """ if excluded_sym_names is None: excluded_sym_names = [] if not isinstance(excluded_sym_names, list): raise ValueError('excluded_sym_names must be a list of strings representing' ' the names of the symbols that will not be quantized,' ' while received type %s' % str(type(excluded_sym_names))) logger.info('Quantizing symbol') if quantized_dtype not in ('int8', 'uint8', 'auto'): raise ValueError('unknown quantized_dtype %s received,' ' expected `int8`, `uint8` or `auto`' % quantized_dtype) qsym = _quantize_symbol(sym, excluded_symbols=excluded_sym_names, offline_params=list(arg_params.keys()), quantized_dtype=quantized_dtype) th_dict = {} if calib_mode is not None and calib_mode != 'none': if not isinstance(ctx, Context): raise ValueError('currently only supports single ctx, while received %s' % str(ctx)) if calib_data is None: raise ValueError('calib_data must be provided when calib_mode=%s' % calib_mode) if not isinstance(calib_data, DataIter): raise ValueError('calib_data must be of DataIter type when calib_mode=%s,' ' while received type %s' % (calib_mode, str(type(calib_data)))) mod = Module(symbol=sym, data_names=data_names, label_names=label_names, context=ctx) if len(calib_data.provide_label) > 0: mod.bind(for_training=False, data_shapes=calib_data.provide_data, label_shapes=calib_data.provide_label) else: mod.bind(for_training=False, data_shapes=calib_data.provide_data) mod.set_params(arg_params, aux_params) if calib_mode == 'entropy': nd_dict, num_examples = _collect_layer_outputs(mod, calib_data, include_layer=calib_layer, max_num_examples=num_calib_examples, logger=logger) logger.info('Collected layer outputs from FP32 model using %d examples' % num_examples) logger.info('Calculating optimal thresholds for quantization') th_dict = _get_optimal_thresholds(nd_dict, quantized_dtype, logger=logger) elif calib_mode == 'naive': th_dict, num_examples = _collect_layer_output_min_max( mod, calib_data, include_layer=calib_layer, max_num_examples=num_calib_examples, logger=logger) logger.info('Collected layer output min/max values from FP32 model using %d examples' % num_examples) else: raise ValueError('unknown calibration mode %s received,' ' expected `none`, `naive`, or `entropy`' % calib_mode) logger.info('Calibrating quantized symbol') qsym = _calibrate_quantized_sym(qsym, th_dict) logger.info('Quantizing parameters') qarg_params = _quantize_params(qsym, arg_params, th_dict) return qsym, qarg_params, aux_params
[ "def", "quantize_model", "(", "sym", ",", "arg_params", ",", "aux_params", ",", "data_names", "=", "(", "'data'", ",", ")", ",", "label_names", "=", "(", "'softmax_label'", ",", ")", ",", "ctx", "=", "cpu", "(", ")", ",", "excluded_sym_names", "=", "None", ",", "calib_mode", "=", "'entropy'", ",", "calib_data", "=", "None", ",", "num_calib_examples", "=", "None", ",", "calib_layer", "=", "None", ",", "quantized_dtype", "=", "'int8'", ",", "logger", "=", "logging", ")", ":", "if", "excluded_sym_names", "is", "None", ":", "excluded_sym_names", "=", "[", "]", "if", "not", "isinstance", "(", "excluded_sym_names", ",", "list", ")", ":", "raise", "ValueError", "(", "'excluded_sym_names must be a list of strings representing'", "' the names of the symbols that will not be quantized,'", "' while received type %s'", "%", "str", "(", "type", "(", "excluded_sym_names", ")", ")", ")", "logger", ".", "info", "(", "'Quantizing symbol'", ")", "if", "quantized_dtype", "not", "in", "(", "'int8'", ",", "'uint8'", ",", "'auto'", ")", ":", "raise", "ValueError", "(", "'unknown quantized_dtype %s received,'", "' expected `int8`, `uint8` or `auto`'", "%", "quantized_dtype", ")", "qsym", "=", "_quantize_symbol", "(", "sym", ",", "excluded_symbols", "=", "excluded_sym_names", ",", "offline_params", "=", "list", "(", "arg_params", ".", "keys", "(", ")", ")", ",", "quantized_dtype", "=", "quantized_dtype", ")", "th_dict", "=", "{", "}", "if", "calib_mode", "is", "not", "None", "and", "calib_mode", "!=", "'none'", ":", "if", "not", "isinstance", "(", "ctx", ",", "Context", ")", ":", "raise", "ValueError", "(", "'currently only supports single ctx, while received %s'", "%", "str", "(", "ctx", ")", ")", "if", "calib_data", "is", "None", ":", "raise", "ValueError", "(", "'calib_data must be provided when calib_mode=%s'", "%", "calib_mode", ")", "if", "not", "isinstance", "(", "calib_data", ",", "DataIter", ")", ":", "raise", "ValueError", "(", "'calib_data must be of DataIter type when calib_mode=%s,'", "' while received type %s'", "%", "(", "calib_mode", ",", "str", "(", "type", "(", "calib_data", ")", ")", ")", ")", "mod", "=", "Module", "(", "symbol", "=", "sym", ",", "data_names", "=", "data_names", ",", "label_names", "=", "label_names", ",", "context", "=", "ctx", ")", "if", "len", "(", "calib_data", ".", "provide_label", ")", ">", "0", ":", "mod", ".", "bind", "(", "for_training", "=", "False", ",", "data_shapes", "=", "calib_data", ".", "provide_data", ",", "label_shapes", "=", "calib_data", ".", "provide_label", ")", "else", ":", "mod", ".", "bind", "(", "for_training", "=", "False", ",", "data_shapes", "=", "calib_data", ".", "provide_data", ")", "mod", ".", "set_params", "(", "arg_params", ",", "aux_params", ")", "if", "calib_mode", "==", "'entropy'", ":", "nd_dict", ",", "num_examples", "=", "_collect_layer_outputs", "(", "mod", ",", "calib_data", ",", "include_layer", "=", "calib_layer", ",", "max_num_examples", "=", "num_calib_examples", ",", "logger", "=", "logger", ")", "logger", ".", "info", "(", "'Collected layer outputs from FP32 model using %d examples'", "%", "num_examples", ")", "logger", ".", "info", "(", "'Calculating optimal thresholds for quantization'", ")", "th_dict", "=", "_get_optimal_thresholds", "(", "nd_dict", ",", "quantized_dtype", ",", "logger", "=", "logger", ")", "elif", "calib_mode", "==", "'naive'", ":", "th_dict", ",", "num_examples", "=", "_collect_layer_output_min_max", "(", "mod", ",", "calib_data", ",", "include_layer", "=", "calib_layer", ",", "max_num_examples", "=", "num_calib_examples", ",", "logger", "=", "logger", ")", "logger", ".", "info", "(", "'Collected layer output min/max values from FP32 model using %d examples'", "%", "num_examples", ")", "else", ":", "raise", "ValueError", "(", "'unknown calibration mode %s received,'", "' expected `none`, `naive`, or `entropy`'", "%", "calib_mode", ")", "logger", ".", "info", "(", "'Calibrating quantized symbol'", ")", "qsym", "=", "_calibrate_quantized_sym", "(", "qsym", ",", "th_dict", ")", "logger", ".", "info", "(", "'Quantizing parameters'", ")", "qarg_params", "=", "_quantize_params", "(", "qsym", ",", "arg_params", ",", "th_dict", ")", "return", "qsym", ",", "qarg_params", ",", "aux_params" ]
User-level API for generating a quantized model from a FP32 model w/ or w/o calibration. The backend quantized operators are only enabled for Linux systems. Please do not run inference using the quantized models on Windows for now. The quantization implementation adopts the TensorFlow's approach: https://www.tensorflow.org/performance/quantization. The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf and adapts the method to MXNet. Parameters ---------- sym : str or Symbol Defines the structure of a neural network for FP32 data types. arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. data_names : a list of strs Data names required for creating a Module object to run forward propagation on the calibration dataset. label_names : a list of strs Label names required for creating a Module object to run forward propagation on the calibration dataset. ctx : Context Defines the device that users want to run forward propagation on the calibration dataset for collecting layer output statistics. Currently, only supports single context. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. calib_mode : str If calib_mode='none', no calibration will be used and the thresholds for requantization after the corresponding layers will be calculated at runtime by calling min and max operators. The quantized models generated in this mode are normally 10-20% slower than those with calibrations during inference. If calib_mode='naive', the min and max values of the layer outputs from a calibration dataset will be directly taken as the thresholds for quantization. If calib_mode='entropy' (default mode), the thresholds for quantization will be derived such that the KL divergence between the distributions of FP32 layer outputs and quantized layer outputs is minimized based upon the calibration dataset. calib_data : DataIter A data iterator initialized by the calibration dataset. num_calib_examples : int or None The maximum number of examples that user would like to use for calibration. If not provided, the whole calibration dataset will be used. calib_layer : function Given a layer's output name in string, return True or False for deciding whether to calibrate this layer. If yes, the statistics of the layer's output will be collected; otherwise, no information of the layer's output will be collected. If not provided, all the layers' outputs that need requantization will be collected. quantized_dtype : str The quantized destination type for input data. Currently support 'int8' , 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result. Default value is 'int8'. logger : Object A logging object for printing information during the process of quantization. Returns ------- tuple A tuple of quantized symbol, quantized arg_params, and aux_params. -------
[ "User", "-", "level", "API", "for", "generating", "a", "quantized", "model", "from", "a", "FP32", "model", "w", "/", "or", "w", "/", "o", "calibration", ".", "The", "backend", "quantized", "operators", "are", "only", "enabled", "for", "Linux", "systems", ".", "Please", "do", "not", "run", "inference", "using", "the", "quantized", "models", "on", "Windows", "for", "now", ".", "The", "quantization", "implementation", "adopts", "the", "TensorFlow", "s", "approach", ":", "https", ":", "//", "www", ".", "tensorflow", ".", "org", "/", "performance", "/", "quantization", ".", "The", "calibration", "implementation", "borrows", "the", "idea", "of", "Nvidia", "s", "8", "-", "bit", "Inference", "with", "TensorRT", ":", "http", ":", "//", "on", "-", "demand", ".", "gputechconf", ".", "com", "/", "gtc", "/", "2017", "/", "presentation", "/", "s7310", "-", "8", "-", "bit", "-", "inference", "-", "with", "-", "tensorrt", ".", "pdf", "and", "adapts", "the", "method", "to", "MXNet", "." ]
python
train
55.98374
idlesign/django-dev
django_dev/dev.py
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L162-L176
def make_venv(self, dj_version): """Creates a virtual environment for a given Django version. :param str dj_version: :rtype: str :return: path to created virtual env """ venv_path = self._get_venv_path(dj_version) self.logger.info('Creating virtual environment for Django %s ...' % dj_version) try: create_venv(venv_path, **VENV_CREATE_KWARGS) except ValueError: self.logger.warning('Virtual environment directory already exists. Skipped.') self.venv_install('django==%s' % dj_version, venv_path) return venv_path
[ "def", "make_venv", "(", "self", ",", "dj_version", ")", ":", "venv_path", "=", "self", ".", "_get_venv_path", "(", "dj_version", ")", "self", ".", "logger", ".", "info", "(", "'Creating virtual environment for Django %s ...'", "%", "dj_version", ")", "try", ":", "create_venv", "(", "venv_path", ",", "*", "*", "VENV_CREATE_KWARGS", ")", "except", "ValueError", ":", "self", ".", "logger", ".", "warning", "(", "'Virtual environment directory already exists. Skipped.'", ")", "self", ".", "venv_install", "(", "'django==%s'", "%", "dj_version", ",", "venv_path", ")", "return", "venv_path" ]
Creates a virtual environment for a given Django version. :param str dj_version: :rtype: str :return: path to created virtual env
[ "Creates", "a", "virtual", "environment", "for", "a", "given", "Django", "version", "." ]
python
train
40.8
RedHatInsights/insights-core
insights/contrib/importlib.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/importlib.py#L6-L17
def _resolve_name(name, package, level): """Return the absolute name of the module to be imported.""" if not hasattr(package, 'rindex'): raise ValueError("'package' not set to a string") dot = len(package) for x in xrange(level, 1, -1): try: dot = package.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") return "%s.%s" % (package[:dot], name)
[ "def", "_resolve_name", "(", "name", ",", "package", ",", "level", ")", ":", "if", "not", "hasattr", "(", "package", ",", "'rindex'", ")", ":", "raise", "ValueError", "(", "\"'package' not set to a string\"", ")", "dot", "=", "len", "(", "package", ")", "for", "x", "in", "xrange", "(", "level", ",", "1", ",", "-", "1", ")", ":", "try", ":", "dot", "=", "package", ".", "rindex", "(", "'.'", ",", "0", ",", "dot", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"attempted relative import beyond top-level \"", "\"package\"", ")", "return", "\"%s.%s\"", "%", "(", "package", "[", ":", "dot", "]", ",", "name", ")" ]
Return the absolute name of the module to be imported.
[ "Return", "the", "absolute", "name", "of", "the", "module", "to", "be", "imported", "." ]
python
train
41.166667
todbot/blink1-python
blink1/blink1.py
https://github.com/todbot/blink1-python/blob/7a5183becd9662f88da3c29afd3447403f4ef82f/blink1/blink1.py#L274-L282
def readPattern(self): """Read the entire color pattern :return List of pattern line tuples """ if ( self.dev == None ): return '' pattern=[] for i in range(0,16): # FIXME: adjustable for diff blink(1) models pattern.append( self.readPatternLine(i) ) return pattern
[ "def", "readPattern", "(", "self", ")", ":", "if", "(", "self", ".", "dev", "==", "None", ")", ":", "return", "''", "pattern", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "16", ")", ":", "# FIXME: adjustable for diff blink(1) models", "pattern", ".", "append", "(", "self", ".", "readPatternLine", "(", "i", ")", ")", "return", "pattern" ]
Read the entire color pattern :return List of pattern line tuples
[ "Read", "the", "entire", "color", "pattern", ":", "return", "List", "of", "pattern", "line", "tuples" ]
python
train
36.444444
abseil/abseil-py
absl/flags/_validators.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_validators.py#L387-L421
def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS): """Ensures that only one flag among flag_names is not None. Important note: This validator checks if flag values are None, and it does not distinguish between default and explicit values. Therefore, this validator does not make sense when applied to flags with default values other than None, including other false values (e.g. False, 0, '', []). That includes multi flags with a default value of [] instead of None. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one of the flags must have a value other than None. Otherwise, at most one of the flags can have a value other than None, and it is valid for all of the flags to be None. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. """ for flag_name in flag_names: if flag_values[flag_name].default is not None: warnings.warn( 'Flag --{} has a non-None default value. That does not make sense ' 'with mark_flags_as_mutual_exclusive, which checks whether the ' 'listed flags have a value other than None.'.format(flag_name)) def validate_mutual_exclusion(flags_dict): flag_count = sum(1 for val in flags_dict.values() if val is not None) if flag_count == 1 or (not required and flag_count == 0): return True raise _exceptions.ValidationError( '{} one of ({}) must have a value other than None.'.format( 'Exactly' if required else 'At most', ', '.join(flag_names))) register_multi_flags_validator( flag_names, validate_mutual_exclusion, flag_values=flag_values)
[ "def", "mark_flags_as_mutual_exclusive", "(", "flag_names", ",", "required", "=", "False", ",", "flag_values", "=", "_flagvalues", ".", "FLAGS", ")", ":", "for", "flag_name", "in", "flag_names", ":", "if", "flag_values", "[", "flag_name", "]", ".", "default", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'Flag --{} has a non-None default value. That does not make sense '", "'with mark_flags_as_mutual_exclusive, which checks whether the '", "'listed flags have a value other than None.'", ".", "format", "(", "flag_name", ")", ")", "def", "validate_mutual_exclusion", "(", "flags_dict", ")", ":", "flag_count", "=", "sum", "(", "1", "for", "val", "in", "flags_dict", ".", "values", "(", ")", "if", "val", "is", "not", "None", ")", "if", "flag_count", "==", "1", "or", "(", "not", "required", "and", "flag_count", "==", "0", ")", ":", "return", "True", "raise", "_exceptions", ".", "ValidationError", "(", "'{} one of ({}) must have a value other than None.'", ".", "format", "(", "'Exactly'", "if", "required", "else", "'At most'", ",", "', '", ".", "join", "(", "flag_names", ")", ")", ")", "register_multi_flags_validator", "(", "flag_names", ",", "validate_mutual_exclusion", ",", "flag_values", "=", "flag_values", ")" ]
Ensures that only one flag among flag_names is not None. Important note: This validator checks if flag values are None, and it does not distinguish between default and explicit values. Therefore, this validator does not make sense when applied to flags with default values other than None, including other false values (e.g. False, 0, '', []). That includes multi flags with a default value of [] instead of None. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one of the flags must have a value other than None. Otherwise, at most one of the flags can have a value other than None, and it is valid for all of the flags to be None. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined.
[ "Ensures", "that", "only", "one", "flag", "among", "flag_names", "is", "not", "None", "." ]
python
train
49.257143
reincubate/ricloud
ricloud/asmaster_api.py
https://github.com/reincubate/ricloud/blob/e46bce4529fbdca34a4190c18c7219e937e2b697/ricloud/asmaster_api.py#L59-L66
def _set_allowed_services_and_actions(self, services): """Expect services to be a list of service dictionaries, each with `name` and `actions` keys.""" for service in services: self.services[service['name']] = {} for action in service['actions']: name = action.pop('name') self.services[service['name']][name] = action
[ "def", "_set_allowed_services_and_actions", "(", "self", ",", "services", ")", ":", "for", "service", "in", "services", ":", "self", ".", "services", "[", "service", "[", "'name'", "]", "]", "=", "{", "}", "for", "action", "in", "service", "[", "'actions'", "]", ":", "name", "=", "action", ".", "pop", "(", "'name'", ")", "self", ".", "services", "[", "service", "[", "'name'", "]", "]", "[", "name", "]", "=", "action" ]
Expect services to be a list of service dictionaries, each with `name` and `actions` keys.
[ "Expect", "services", "to", "be", "a", "list", "of", "service", "dictionaries", "each", "with", "name", "and", "actions", "keys", "." ]
python
train
48
readbeyond/aeneas
aeneas/textfile.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/textfile.py#L879-L896
def _read_parsed(self, lines): """ Read text fragments from a parsed format text file. :param list lines: the lines of the parsed text file :param dict parameters: additional parameters for parsing (e.g., class/id regex strings) """ self.log(u"Parsing fragments from parsed text format") pairs = [] for line in lines: pieces = line.split(gc.PARSED_TEXT_SEPARATOR) if len(pieces) == 2: identifier = pieces[0].strip() text = pieces[1].strip() if len(identifier) > 0: pairs.append((identifier, [text])) self._create_text_fragments(pairs)
[ "def", "_read_parsed", "(", "self", ",", "lines", ")", ":", "self", ".", "log", "(", "u\"Parsing fragments from parsed text format\"", ")", "pairs", "=", "[", "]", "for", "line", "in", "lines", ":", "pieces", "=", "line", ".", "split", "(", "gc", ".", "PARSED_TEXT_SEPARATOR", ")", "if", "len", "(", "pieces", ")", "==", "2", ":", "identifier", "=", "pieces", "[", "0", "]", ".", "strip", "(", ")", "text", "=", "pieces", "[", "1", "]", ".", "strip", "(", ")", "if", "len", "(", "identifier", ")", ">", "0", ":", "pairs", ".", "append", "(", "(", "identifier", ",", "[", "text", "]", ")", ")", "self", ".", "_create_text_fragments", "(", "pairs", ")" ]
Read text fragments from a parsed format text file. :param list lines: the lines of the parsed text file :param dict parameters: additional parameters for parsing (e.g., class/id regex strings)
[ "Read", "text", "fragments", "from", "a", "parsed", "format", "text", "file", "." ]
python
train
39.666667
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L4588-L4594
def addmsg(self, msg_p): """ Push encoded message as a new frame. Message takes ownership of submessage, so the original is destroyed in this call. Returns 0 on success, -1 on error. """ return lib.zmsg_addmsg(self._as_parameter_, byref(zmsg_p.from_param(msg_p)))
[ "def", "addmsg", "(", "self", ",", "msg_p", ")", ":", "return", "lib", ".", "zmsg_addmsg", "(", "self", ".", "_as_parameter_", ",", "byref", "(", "zmsg_p", ".", "from_param", "(", "msg_p", ")", ")", ")" ]
Push encoded message as a new frame. Message takes ownership of submessage, so the original is destroyed in this call. Returns 0 on success, -1 on error.
[ "Push", "encoded", "message", "as", "a", "new", "frame", ".", "Message", "takes", "ownership", "of", "submessage", "so", "the", "original", "is", "destroyed", "in", "this", "call", ".", "Returns", "0", "on", "success", "-", "1", "on", "error", "." ]
python
train
41.285714
trailofbits/manticore
manticore/platforms/linux.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L2262-L2273
def check_timers(self): """ Awake process if timer has expired """ if self._current is None: # Advance the clocks. Go to future!! advance = min([self.clocks] + [x for x in self.timers if x is not None]) + 1 logger.debug(f"Advancing the clock from {self.clocks} to {advance}") self.clocks = advance for procid in range(len(self.timers)): if self.timers[procid] is not None: if self.clocks > self.timers[procid]: self.procs[procid].PC += self.procs[procid].instruction.size self.awake(procid)
[ "def", "check_timers", "(", "self", ")", ":", "if", "self", ".", "_current", "is", "None", ":", "# Advance the clocks. Go to future!!", "advance", "=", "min", "(", "[", "self", ".", "clocks", "]", "+", "[", "x", "for", "x", "in", "self", ".", "timers", "if", "x", "is", "not", "None", "]", ")", "+", "1", "logger", ".", "debug", "(", "f\"Advancing the clock from {self.clocks} to {advance}\"", ")", "self", ".", "clocks", "=", "advance", "for", "procid", "in", "range", "(", "len", "(", "self", ".", "timers", ")", ")", ":", "if", "self", ".", "timers", "[", "procid", "]", "is", "not", "None", ":", "if", "self", ".", "clocks", ">", "self", ".", "timers", "[", "procid", "]", ":", "self", ".", "procs", "[", "procid", "]", ".", "PC", "+=", "self", ".", "procs", "[", "procid", "]", ".", "instruction", ".", "size", "self", ".", "awake", "(", "procid", ")" ]
Awake process if timer has expired
[ "Awake", "process", "if", "timer", "has", "expired" ]
python
valid
51.583333
bbusenius/Diablo-Python
simple_math/simple_math.py
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L190-L218
def triangle_area(point1, point2, point3): """ Uses Heron's formula to find the area of a triangle based on the coordinates of three points. Args: point1: list or tuple, the x y coordinate of point one. point2: list or tuple, the x y coordinate of point two. point3: list or tuple, the x y coordinate of point three. Returns: The area of a triangle as a floating point number. Requires: The math module, point_distance(). """ """Lengths of the three sides of the triangle""" a = point_distance(point1, point2) b = point_distance(point1, point3) c = point_distance(point2, point3) """Where s is the semiperimeter""" s = (a + b + c) / 2.0 """Return the area of the triangle (using Heron's formula)""" return math.sqrt(s * (s - a) * (s - b) * (s - c))
[ "def", "triangle_area", "(", "point1", ",", "point2", ",", "point3", ")", ":", "\"\"\"Lengths of the three sides of the triangle\"\"\"", "a", "=", "point_distance", "(", "point1", ",", "point2", ")", "b", "=", "point_distance", "(", "point1", ",", "point3", ")", "c", "=", "point_distance", "(", "point2", ",", "point3", ")", "\"\"\"Where s is the semiperimeter\"\"\"", "s", "=", "(", "a", "+", "b", "+", "c", ")", "/", "2.0", "\"\"\"Return the area of the triangle (using Heron's formula)\"\"\"", "return", "math", ".", "sqrt", "(", "s", "*", "(", "s", "-", "a", ")", "*", "(", "s", "-", "b", ")", "*", "(", "s", "-", "c", ")", ")" ]
Uses Heron's formula to find the area of a triangle based on the coordinates of three points. Args: point1: list or tuple, the x y coordinate of point one. point2: list or tuple, the x y coordinate of point two. point3: list or tuple, the x y coordinate of point three. Returns: The area of a triangle as a floating point number. Requires: The math module, point_distance().
[ "Uses", "Heron", "s", "formula", "to", "find", "the", "area", "of", "a", "triangle", "based", "on", "the", "coordinates", "of", "three", "points", "." ]
python
valid
28.551724
saltstack/salt
salt/modules/boto_cognitoidentity.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_cognitoidentity.py#L289-L299
def _get_role_arn(name, **conn_params): ''' Helper function to turn a name into an arn string, returns None if not able to resolve ''' if name.startswith('arn:aws:iam'): return name role = __salt__['boto_iam.describe_role'](name, **conn_params) rolearn = role.get('arn') if role else None return rolearn
[ "def", "_get_role_arn", "(", "name", ",", "*", "*", "conn_params", ")", ":", "if", "name", ".", "startswith", "(", "'arn:aws:iam'", ")", ":", "return", "name", "role", "=", "__salt__", "[", "'boto_iam.describe_role'", "]", "(", "name", ",", "*", "*", "conn_params", ")", "rolearn", "=", "role", ".", "get", "(", "'arn'", ")", "if", "role", "else", "None", "return", "rolearn" ]
Helper function to turn a name into an arn string, returns None if not able to resolve
[ "Helper", "function", "to", "turn", "a", "name", "into", "an", "arn", "string", "returns", "None", "if", "not", "able", "to", "resolve" ]
python
train
30.363636
ulfalizer/Kconfiglib
examples/print_config_tree.py
https://github.com/ulfalizer/Kconfiglib/blob/9fe13c03de16c341cd7ed40167216207b821ea50/examples/print_config_tree.py#L70-L101
def value_str(sc): """ Returns the value part ("[*]", "<M>", "(foo)" etc.) of a menu entry. sc: Symbol or Choice. """ if sc.type in (STRING, INT, HEX): return "({})".format(sc.str_value) # BOOL or TRISTATE # The choice mode is an upper bound on the visibility of choice symbols, so # we can check the choice symbols' own visibility to see if the choice is # in y mode if isinstance(sc, Symbol) and sc.choice and sc.visibility == 2: # For choices in y mode, print '-->' next to the selected symbol return "-->" if sc.choice.selection is sc else " " tri_val_str = (" ", "M", "*")[sc.tri_value] if len(sc.assignable) == 1: # Pinned to a single value return "-{}-".format(tri_val_str) if sc.type == BOOL: return "[{}]".format(tri_val_str) if sc.type == TRISTATE: if sc.assignable == (1, 2): # m and y available return "{" + tri_val_str + "}" # Gets a bit confusing with .format() return "<{}>".format(tri_val_str)
[ "def", "value_str", "(", "sc", ")", ":", "if", "sc", ".", "type", "in", "(", "STRING", ",", "INT", ",", "HEX", ")", ":", "return", "\"({})\"", ".", "format", "(", "sc", ".", "str_value", ")", "# BOOL or TRISTATE", "# The choice mode is an upper bound on the visibility of choice symbols, so", "# we can check the choice symbols' own visibility to see if the choice is", "# in y mode", "if", "isinstance", "(", "sc", ",", "Symbol", ")", "and", "sc", ".", "choice", "and", "sc", ".", "visibility", "==", "2", ":", "# For choices in y mode, print '-->' next to the selected symbol", "return", "\"-->\"", "if", "sc", ".", "choice", ".", "selection", "is", "sc", "else", "\" \"", "tri_val_str", "=", "(", "\" \"", ",", "\"M\"", ",", "\"*\"", ")", "[", "sc", ".", "tri_value", "]", "if", "len", "(", "sc", ".", "assignable", ")", "==", "1", ":", "# Pinned to a single value", "return", "\"-{}-\"", ".", "format", "(", "tri_val_str", ")", "if", "sc", ".", "type", "==", "BOOL", ":", "return", "\"[{}]\"", ".", "format", "(", "tri_val_str", ")", "if", "sc", ".", "type", "==", "TRISTATE", ":", "if", "sc", ".", "assignable", "==", "(", "1", ",", "2", ")", ":", "# m and y available", "return", "\"{\"", "+", "tri_val_str", "+", "\"}\"", "# Gets a bit confusing with .format()", "return", "\"<{}>\"", ".", "format", "(", "tri_val_str", ")" ]
Returns the value part ("[*]", "<M>", "(foo)" etc.) of a menu entry. sc: Symbol or Choice.
[ "Returns", "the", "value", "part", "(", "[", "*", "]", "<M", ">", "(", "foo", ")", "etc", ".", ")", "of", "a", "menu", "entry", "." ]
python
train
32.25
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L3067-L3128
def load_mod_from_file(self, fpath): """Loads modules from a .py file into ShutIt if there are no modules from this file already. We expect to have a callable 'module/0' which returns one or more module objects. If this doesn't exist we assume that the .py file works in the old style (automatically inserting the module into shutit_global) or it's not a shutit module. """ shutit_global.shutit_global_object.yield_to_draw() fpath = os.path.abspath(fpath) file_ext = os.path.splitext(os.path.split(fpath)[-1])[-1] if file_ext.lower() != '.py': return with open(fpath) as f: content = f.read().splitlines() ok = False for line in content: if line.strip() == 'from shutit_module import ShutItModule': ok = True break if not ok: self.log('Rejected file: ' + fpath,level=logging.DEBUG) return # Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules. # TODO: this is quadratic complexity existingmodules = [ m for m in self.shutit_modules if getattr(m, '__module_file', None) == fpath ] if existingmodules: self.log('Module already seen: ' + fpath,level=logging.DEBUG) return # Looks like it's ok to load this file self.log('Loading source for: ' + fpath,level=logging.DEBUG) # Add this directory to the python path iff not already there. directory = os.path.dirname(fpath) if directory not in sys.path: sys.path.append(os.path.dirname(fpath)) # TODO: use bytearray to encode? mod_name = base64.b32encode(fpath.encode()).decode().replace('=', '') pymod = imp.load_source(mod_name, fpath) # Got the python module, now time to pull the shutit module(s) out of it. targets = [ ('module', self.shutit_modules), ('conn_module', self.conn_modules) ] self.build['source'] = {} for attr, target in targets: modulefunc = getattr(pymod, attr, None) # Old style or not a shutit module, nothing else to do if not callable(modulefunc): return modules = modulefunc() if not isinstance(modules, list): modules = [modules] for module in modules: setattr(module, '__module_file', fpath) ShutItModule.register(module.__class__) target.add(module) self.build['source'][fpath] = open(fpath).read()
[ "def", "load_mod_from_file", "(", "self", ",", "fpath", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "fpath", "=", "os", ".", "path", ".", "abspath", "(", "fpath", ")", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "fpath", ")", "[", "-", "1", "]", ")", "[", "-", "1", "]", "if", "file_ext", ".", "lower", "(", ")", "!=", "'.py'", ":", "return", "with", "open", "(", "fpath", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", "ok", "=", "False", "for", "line", "in", "content", ":", "if", "line", ".", "strip", "(", ")", "==", "'from shutit_module import ShutItModule'", ":", "ok", "=", "True", "break", "if", "not", "ok", ":", "self", ".", "log", "(", "'Rejected file: '", "+", "fpath", ",", "level", "=", "logging", ".", "DEBUG", ")", "return", "# Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules.", "# TODO: this is quadratic complexity", "existingmodules", "=", "[", "m", "for", "m", "in", "self", ".", "shutit_modules", "if", "getattr", "(", "m", ",", "'__module_file'", ",", "None", ")", "==", "fpath", "]", "if", "existingmodules", ":", "self", ".", "log", "(", "'Module already seen: '", "+", "fpath", ",", "level", "=", "logging", ".", "DEBUG", ")", "return", "# Looks like it's ok to load this file", "self", ".", "log", "(", "'Loading source for: '", "+", "fpath", ",", "level", "=", "logging", ".", "DEBUG", ")", "# Add this directory to the python path iff not already there.", "directory", "=", "os", ".", "path", ".", "dirname", "(", "fpath", ")", "if", "directory", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", ".", "append", "(", "os", ".", "path", ".", "dirname", "(", "fpath", ")", ")", "# TODO: use bytearray to encode?", "mod_name", "=", "base64", ".", "b32encode", "(", "fpath", ".", "encode", "(", ")", ")", ".", "decode", "(", ")", ".", "replace", "(", "'='", ",", "''", ")", "pymod", "=", "imp", ".", "load_source", "(", "mod_name", ",", "fpath", ")", "# Got the python module, now time to pull the shutit module(s) out of it.", "targets", "=", "[", "(", "'module'", ",", "self", ".", "shutit_modules", ")", ",", "(", "'conn_module'", ",", "self", ".", "conn_modules", ")", "]", "self", ".", "build", "[", "'source'", "]", "=", "{", "}", "for", "attr", ",", "target", "in", "targets", ":", "modulefunc", "=", "getattr", "(", "pymod", ",", "attr", ",", "None", ")", "# Old style or not a shutit module, nothing else to do", "if", "not", "callable", "(", "modulefunc", ")", ":", "return", "modules", "=", "modulefunc", "(", ")", "if", "not", "isinstance", "(", "modules", ",", "list", ")", ":", "modules", "=", "[", "modules", "]", "for", "module", "in", "modules", ":", "setattr", "(", "module", ",", "'__module_file'", ",", "fpath", ")", "ShutItModule", ".", "register", "(", "module", ".", "__class__", ")", "target", ".", "add", "(", "module", ")", "self", ".", "build", "[", "'source'", "]", "[", "fpath", "]", "=", "open", "(", "fpath", ")", ".", "read", "(", ")" ]
Loads modules from a .py file into ShutIt if there are no modules from this file already. We expect to have a callable 'module/0' which returns one or more module objects. If this doesn't exist we assume that the .py file works in the old style (automatically inserting the module into shutit_global) or it's not a shutit module.
[ "Loads", "modules", "from", "a", ".", "py", "file", "into", "ShutIt", "if", "there", "are", "no", "modules", "from", "this", "file", "already", ".", "We", "expect", "to", "have", "a", "callable", "module", "/", "0", "which", "returns", "one", "or", "more", "module", "objects", ".", "If", "this", "doesn", "t", "exist", "we", "assume", "that", "the", ".", "py", "file", "works", "in", "the", "old", "style", "(", "automatically", "inserting", "the", "module", "into", "shutit_global", ")", "or", "it", "s", "not", "a", "shutit", "module", "." ]
python
train
36.645161
jeremylow/pyshk
pyshk/models.py
https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L90-L111
def NewFromJSON(data): """ Create a new User instance from a JSON dict. Args: data (dict): JSON dictionary representing a user. Returns: A User instance. """ if data.get('shakes', None): shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')] else: shakes = None return User( id=data.get('id', None), name=data.get('name', None), profile_image_url=data.get('profile_image_url', None), about=data.get('about', None), website=data.get('website', None), shakes=shakes)
[ "def", "NewFromJSON", "(", "data", ")", ":", "if", "data", ".", "get", "(", "'shakes'", ",", "None", ")", ":", "shakes", "=", "[", "Shake", ".", "NewFromJSON", "(", "shk", ")", "for", "shk", "in", "data", ".", "get", "(", "'shakes'", ")", "]", "else", ":", "shakes", "=", "None", "return", "User", "(", "id", "=", "data", ".", "get", "(", "'id'", ",", "None", ")", ",", "name", "=", "data", ".", "get", "(", "'name'", ",", "None", ")", ",", "profile_image_url", "=", "data", ".", "get", "(", "'profile_image_url'", ",", "None", ")", ",", "about", "=", "data", ".", "get", "(", "'about'", ",", "None", ")", ",", "website", "=", "data", ".", "get", "(", "'website'", ",", "None", ")", ",", "shakes", "=", "shakes", ")" ]
Create a new User instance from a JSON dict. Args: data (dict): JSON dictionary representing a user. Returns: A User instance.
[ "Create", "a", "new", "User", "instance", "from", "a", "JSON", "dict", "." ]
python
train
29.045455
apache/incubator-mxnet
python/mxnet/image/detection.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/detection.py#L702-L714
def _estimate_label_shape(self): """Helper function to estimate label shape""" max_count = 0 self.reset() try: while True: label, _ = self.next_sample() label = self._parse_label(label) max_count = max(max_count, label.shape[0]) except StopIteration: pass self.reset() return (max_count, label.shape[1])
[ "def", "_estimate_label_shape", "(", "self", ")", ":", "max_count", "=", "0", "self", ".", "reset", "(", ")", "try", ":", "while", "True", ":", "label", ",", "_", "=", "self", ".", "next_sample", "(", ")", "label", "=", "self", ".", "_parse_label", "(", "label", ")", "max_count", "=", "max", "(", "max_count", ",", "label", ".", "shape", "[", "0", "]", ")", "except", "StopIteration", ":", "pass", "self", ".", "reset", "(", ")", "return", "(", "max_count", ",", "label", ".", "shape", "[", "1", "]", ")" ]
Helper function to estimate label shape
[ "Helper", "function", "to", "estimate", "label", "shape" ]
python
train
32.230769
pytroll/trollimage
trollimage/xrimage.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/xrimage.py#L885-L894
def stretch_weber_fechner(self, k, s0): """Stretch according to the Weber-Fechner law. p = k.ln(S/S0) p is perception, S is the stimulus, S0 is the stimulus threshold (the highest unpercieved stimulus), and k is the factor. """ attrs = self.data.attrs self.data = k * xu.log(self.data / s0) self.data.attrs = attrs
[ "def", "stretch_weber_fechner", "(", "self", ",", "k", ",", "s0", ")", ":", "attrs", "=", "self", ".", "data", ".", "attrs", "self", ".", "data", "=", "k", "*", "xu", ".", "log", "(", "self", ".", "data", "/", "s0", ")", "self", ".", "data", ".", "attrs", "=", "attrs" ]
Stretch according to the Weber-Fechner law. p = k.ln(S/S0) p is perception, S is the stimulus, S0 is the stimulus threshold (the highest unpercieved stimulus), and k is the factor.
[ "Stretch", "according", "to", "the", "Weber", "-", "Fechner", "law", "." ]
python
train
37
alefnula/tea
tea/shell/__init__.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L381-L396
def read(path, encoding="utf-8"): """Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error """ try: with io.open(path, encoding=encoding) as f: return f.read() except Exception as e: logger.error("read: %s failed. Error: %s", path, e) return ""
[ "def", "read", "(", "path", ",", "encoding", "=", "\"utf-8\"", ")", ":", "try", ":", "with", "io", ".", "open", "(", "path", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"read: %s failed. Error: %s\"", ",", "path", ",", "e", ")", "return", "\"\"" ]
Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error
[ "Read", "the", "content", "of", "the", "file", "." ]
python
train
27.3125
theiviaxx/Frog
frog/send_file.py
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/send_file.py#L46-L63
def send_zipfile(request, fileList): """ Create a ZIP file on disk and transmit it in chunks of 8KB, without loading the whole file into memory. A similar approach can be used for large dynamic PDF files. """ temp = tempfile.TemporaryFile() archive = zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED) for artist,files in fileList.iteritems(): for f in files: archive.write(f[0], '%s/%s' % (artist, f[1])) archive.close() wrapper = FixedFileWrapper(temp) response = HttpResponse(wrapper, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename=FrogSources.zip' response['Content-Length'] = temp.tell() temp.seek(0) return response
[ "def", "send_zipfile", "(", "request", ",", "fileList", ")", ":", "temp", "=", "tempfile", ".", "TemporaryFile", "(", ")", "archive", "=", "zipfile", ".", "ZipFile", "(", "temp", ",", "'w'", ",", "zipfile", ".", "ZIP_DEFLATED", ")", "for", "artist", ",", "files", "in", "fileList", ".", "iteritems", "(", ")", ":", "for", "f", "in", "files", ":", "archive", ".", "write", "(", "f", "[", "0", "]", ",", "'%s/%s'", "%", "(", "artist", ",", "f", "[", "1", "]", ")", ")", "archive", ".", "close", "(", ")", "wrapper", "=", "FixedFileWrapper", "(", "temp", ")", "response", "=", "HttpResponse", "(", "wrapper", ",", "content_type", "=", "'application/zip'", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=FrogSources.zip'", "response", "[", "'Content-Length'", "]", "=", "temp", ".", "tell", "(", ")", "temp", ".", "seek", "(", "0", ")", "return", "response" ]
Create a ZIP file on disk and transmit it in chunks of 8KB, without loading the whole file into memory. A similar approach can be used for large dynamic PDF files.
[ "Create", "a", "ZIP", "file", "on", "disk", "and", "transmit", "it", "in", "chunks", "of", "8KB", "without", "loading", "the", "whole", "file", "into", "memory", ".", "A", "similar", "approach", "can", "be", "used", "for", "large", "dynamic", "PDF", "files", "." ]
python
train
47.944444
tjcsl/cslbot
cslbot/commands/morse.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/morse.py#L23-L35
def cmd(send, msg, args): """Converts text to morse code. Syntax: {command} [text] """ if not msg: msg = gen_word() morse = gen_morse(msg) if len(morse) > 100: send("Your morse is too long. Have you considered Western Union?") else: send(morse)
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "if", "not", "msg", ":", "msg", "=", "gen_word", "(", ")", "morse", "=", "gen_morse", "(", "msg", ")", "if", "len", "(", "morse", ")", ">", "100", ":", "send", "(", "\"Your morse is too long. Have you considered Western Union?\"", ")", "else", ":", "send", "(", "morse", ")" ]
Converts text to morse code. Syntax: {command} [text]
[ "Converts", "text", "to", "morse", "code", "." ]
python
train
22
wummel/linkchecker
third_party/dnspython/dns/inet.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/inet.py#L92-L107
def is_multicast(text): """Is the textual-form network address a multicast address? @param text: the textual address @raises ValueError: the address family cannot be determined from the input. @rtype: bool """ try: first = ord(dns.ipv4.inet_aton(text)[0]) return (first >= 224 and first <= 239) except Exception: try: first = ord(dns.ipv6.inet_aton(text)[0]) return (first == 255) except Exception: raise ValueError
[ "def", "is_multicast", "(", "text", ")", ":", "try", ":", "first", "=", "ord", "(", "dns", ".", "ipv4", ".", "inet_aton", "(", "text", ")", "[", "0", "]", ")", "return", "(", "first", ">=", "224", "and", "first", "<=", "239", ")", "except", "Exception", ":", "try", ":", "first", "=", "ord", "(", "dns", ".", "ipv6", ".", "inet_aton", "(", "text", ")", "[", "0", "]", ")", "return", "(", "first", "==", "255", ")", "except", "Exception", ":", "raise", "ValueError" ]
Is the textual-form network address a multicast address? @param text: the textual address @raises ValueError: the address family cannot be determined from the input. @rtype: bool
[ "Is", "the", "textual", "-", "form", "network", "address", "a", "multicast", "address?" ]
python
train
31.0625
openid/python-openid
openid/extensions/ax.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L149-L179
def toTypeURIs(namespace_map, alias_list_s): """Given a namespace mapping and a string containing a comma-separated list of namespace aliases, return a list of type URIs that correspond to those aliases. @param namespace_map: The mapping from namespace URI to alias @type namespace_map: openid.message.NamespaceMap @param alias_list_s: The string containing the comma-separated list of aliases. May also be None for convenience. @type alias_list_s: str or NoneType @returns: The list of namespace URIs that corresponds to the supplied list of aliases. If the string was zero-length or None, an empty list will be returned. @raise KeyError: If an alias is present in the list of aliases but is not present in the namespace map. """ uris = [] if alias_list_s: for alias in alias_list_s.split(','): type_uri = namespace_map.getNamespaceURI(alias) if type_uri is None: raise KeyError( 'No type is defined for attribute name %r' % (alias,)) else: uris.append(type_uri) return uris
[ "def", "toTypeURIs", "(", "namespace_map", ",", "alias_list_s", ")", ":", "uris", "=", "[", "]", "if", "alias_list_s", ":", "for", "alias", "in", "alias_list_s", ".", "split", "(", "','", ")", ":", "type_uri", "=", "namespace_map", ".", "getNamespaceURI", "(", "alias", ")", "if", "type_uri", "is", "None", ":", "raise", "KeyError", "(", "'No type is defined for attribute name %r'", "%", "(", "alias", ",", ")", ")", "else", ":", "uris", ".", "append", "(", "type_uri", ")", "return", "uris" ]
Given a namespace mapping and a string containing a comma-separated list of namespace aliases, return a list of type URIs that correspond to those aliases. @param namespace_map: The mapping from namespace URI to alias @type namespace_map: openid.message.NamespaceMap @param alias_list_s: The string containing the comma-separated list of aliases. May also be None for convenience. @type alias_list_s: str or NoneType @returns: The list of namespace URIs that corresponds to the supplied list of aliases. If the string was zero-length or None, an empty list will be returned. @raise KeyError: If an alias is present in the list of aliases but is not present in the namespace map.
[ "Given", "a", "namespace", "mapping", "and", "a", "string", "containing", "a", "comma", "-", "separated", "list", "of", "namespace", "aliases", "return", "a", "list", "of", "type", "URIs", "that", "correspond", "to", "those", "aliases", "." ]
python
train
36.483871
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py#L301-L355
def __query_spec(self): """Get the spec to use for a query. """ operators = self.__modifiers.copy() if self.__ordering: operators["$orderby"] = self.__ordering if self.__explain: operators["$explain"] = True if self.__hint: operators["$hint"] = self.__hint if self.__comment: operators["$comment"] = self.__comment if self.__max_scan: operators["$maxScan"] = self.__max_scan if self.__max_time_ms is not None: operators["$maxTimeMS"] = self.__max_time_ms if self.__max: operators["$max"] = self.__max if self.__min: operators["$min"] = self.__min if self.__return_key: operators["$returnKey"] = self.__return_key if self.__show_record_id: # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. operators["$showDiskLoc"] = self.__show_record_id if self.__snapshot: operators["$snapshot"] = self.__snapshot if operators: # Make a shallow copy so we can cleanly rewind or clone. spec = self.__spec.copy() # White-listed commands must be wrapped in $query. if "$query" not in spec: # $query has to come first spec = SON([("$query", spec)]) if not isinstance(spec, SON): # Ensure the spec is SON. As order is important this will # ensure its set before merging in any extra operators. spec = SON(spec) spec.update(operators) return spec # Have to wrap with $query if "query" is the first key. # We can't just use $query anytime "query" is a key as # that breaks commands like count and find_and_modify. # Checking spec.keys()[0] covers the case that the spec # was passed as an instance of SON or OrderedDict. elif ("query" in self.__spec and (len(self.__spec) == 1 or next(iter(self.__spec)) == "query")): return SON({"$query": self.__spec}) return self.__spec
[ "def", "__query_spec", "(", "self", ")", ":", "operators", "=", "self", ".", "__modifiers", ".", "copy", "(", ")", "if", "self", ".", "__ordering", ":", "operators", "[", "\"$orderby\"", "]", "=", "self", ".", "__ordering", "if", "self", ".", "__explain", ":", "operators", "[", "\"$explain\"", "]", "=", "True", "if", "self", ".", "__hint", ":", "operators", "[", "\"$hint\"", "]", "=", "self", ".", "__hint", "if", "self", ".", "__comment", ":", "operators", "[", "\"$comment\"", "]", "=", "self", ".", "__comment", "if", "self", ".", "__max_scan", ":", "operators", "[", "\"$maxScan\"", "]", "=", "self", ".", "__max_scan", "if", "self", ".", "__max_time_ms", "is", "not", "None", ":", "operators", "[", "\"$maxTimeMS\"", "]", "=", "self", ".", "__max_time_ms", "if", "self", ".", "__max", ":", "operators", "[", "\"$max\"", "]", "=", "self", ".", "__max", "if", "self", ".", "__min", ":", "operators", "[", "\"$min\"", "]", "=", "self", ".", "__min", "if", "self", ".", "__return_key", ":", "operators", "[", "\"$returnKey\"", "]", "=", "self", ".", "__return_key", "if", "self", ".", "__show_record_id", ":", "# This is upgraded to showRecordId for MongoDB 3.2+ \"find\" command.", "operators", "[", "\"$showDiskLoc\"", "]", "=", "self", ".", "__show_record_id", "if", "self", ".", "__snapshot", ":", "operators", "[", "\"$snapshot\"", "]", "=", "self", ".", "__snapshot", "if", "operators", ":", "# Make a shallow copy so we can cleanly rewind or clone.", "spec", "=", "self", ".", "__spec", ".", "copy", "(", ")", "# White-listed commands must be wrapped in $query.", "if", "\"$query\"", "not", "in", "spec", ":", "# $query has to come first", "spec", "=", "SON", "(", "[", "(", "\"$query\"", ",", "spec", ")", "]", ")", "if", "not", "isinstance", "(", "spec", ",", "SON", ")", ":", "# Ensure the spec is SON. As order is important this will", "# ensure its set before merging in any extra operators.", "spec", "=", "SON", "(", "spec", ")", "spec", ".", "update", "(", "operators", ")", "return", "spec", "# Have to wrap with $query if \"query\" is the first key.", "# We can't just use $query anytime \"query\" is a key as", "# that breaks commands like count and find_and_modify.", "# Checking spec.keys()[0] covers the case that the spec", "# was passed as an instance of SON or OrderedDict.", "elif", "(", "\"query\"", "in", "self", ".", "__spec", "and", "(", "len", "(", "self", ".", "__spec", ")", "==", "1", "or", "next", "(", "iter", "(", "self", ".", "__spec", ")", ")", "==", "\"query\"", ")", ")", ":", "return", "SON", "(", "{", "\"$query\"", ":", "self", ".", "__spec", "}", ")", "return", "self", ".", "__spec" ]
Get the spec to use for a query.
[ "Get", "the", "spec", "to", "use", "for", "a", "query", "." ]
python
train
39
7sDream/zhihu-py3
zhihu/author.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/author.py#L356-L367
def followed_topic_num(self): """获取用户关注的话题数 :return: 关注的话题数 :rtype: int """ if self.url is not None: tag = self.soup.find('div', class_='zm-profile-side-topics') if tag is not None: return int(re_get_number.match( tag.parent.strong.text).group(1)) return 0
[ "def", "followed_topic_num", "(", "self", ")", ":", "if", "self", ".", "url", "is", "not", "None", ":", "tag", "=", "self", ".", "soup", ".", "find", "(", "'div'", ",", "class_", "=", "'zm-profile-side-topics'", ")", "if", "tag", "is", "not", "None", ":", "return", "int", "(", "re_get_number", ".", "match", "(", "tag", ".", "parent", ".", "strong", ".", "text", ")", ".", "group", "(", "1", ")", ")", "return", "0" ]
获取用户关注的话题数 :return: 关注的话题数 :rtype: int
[ "获取用户关注的话题数" ]
python
train
29.5
cloudant/python-cloudant
src/cloudant/_common_util.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_common_util.py#L283-L294
def response_to_json_dict(response, **kwargs): """ Standard place to convert responses to JSON. :param response: requests response object :param **kwargs: arguments accepted by json.loads :returns: dict of JSON response """ if response.encoding is None: response.encoding = 'utf-8' return json.loads(response.text, **kwargs)
[ "def", "response_to_json_dict", "(", "response", ",", "*", "*", "kwargs", ")", ":", "if", "response", ".", "encoding", "is", "None", ":", "response", ".", "encoding", "=", "'utf-8'", "return", "json", ".", "loads", "(", "response", ".", "text", ",", "*", "*", "kwargs", ")" ]
Standard place to convert responses to JSON. :param response: requests response object :param **kwargs: arguments accepted by json.loads :returns: dict of JSON response
[ "Standard", "place", "to", "convert", "responses", "to", "JSON", "." ]
python
train
29.583333
ErikBjare/pyzenobase
examples/upload_lifelogger_spreadsheet/main.py
https://github.com/ErikBjare/pyzenobase/blob/eb0572c7441a350bf5578bc5287f3be53d32ea19/examples/upload_lifelogger_spreadsheet/main.py#L63-L87
def get_dates(raw_table) -> "list of dates": """ Goes through the first column of input table and returns the first sequence of dates it finds. """ dates = [] found_first = False for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]): if dstr: if len(dstr.split("/")) == 3: d = datetime.datetime.strptime(dstr, '%m/%d/%Y') elif len(dstr.split("-")) == 3: d = datetime.datetime.strptime(dstr, '%Y-%m-%d') else: # Not necessarily an error, could just be a non-date cell logging.debug("unknown date-format: {}".format(dstr)) continue dates.append(d) if not found_first: found_first = True logging.debug("Found first date: '{}' at i: {}".format(d.isoformat(), i)) elif found_first: logging.debug("Last date: {}".format(d)) break return dates
[ "def", "get_dates", "(", "raw_table", ")", "->", "\"list of dates\"", ":", "dates", "=", "[", "]", "found_first", "=", "False", "for", "i", ",", "dstr", "in", "enumerate", "(", "[", "raw_table", "[", "i", "]", "[", "0", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "raw_table", ")", ")", "]", ")", ":", "if", "dstr", ":", "if", "len", "(", "dstr", ".", "split", "(", "\"/\"", ")", ")", "==", "3", ":", "d", "=", "datetime", ".", "datetime", ".", "strptime", "(", "dstr", ",", "'%m/%d/%Y'", ")", "elif", "len", "(", "dstr", ".", "split", "(", "\"-\"", ")", ")", "==", "3", ":", "d", "=", "datetime", ".", "datetime", ".", "strptime", "(", "dstr", ",", "'%Y-%m-%d'", ")", "else", ":", "# Not necessarily an error, could just be a non-date cell", "logging", ".", "debug", "(", "\"unknown date-format: {}\"", ".", "format", "(", "dstr", ")", ")", "continue", "dates", ".", "append", "(", "d", ")", "if", "not", "found_first", ":", "found_first", "=", "True", "logging", ".", "debug", "(", "\"Found first date: '{}' at i: {}\"", ".", "format", "(", "d", ".", "isoformat", "(", ")", ",", "i", ")", ")", "elif", "found_first", ":", "logging", ".", "debug", "(", "\"Last date: {}\"", ".", "format", "(", "d", ")", ")", "break", "return", "dates" ]
Goes through the first column of input table and returns the first sequence of dates it finds.
[ "Goes", "through", "the", "first", "column", "of", "input", "table", "and", "returns", "the", "first", "sequence", "of", "dates", "it", "finds", "." ]
python
train
43.36
wummel/linkchecker
linkcheck/logger/html.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/html.py#L171-L177
def write_url (self, url_data): """Write url_data.base_url.""" self.writeln(u"<tr>") self.writeln(u'<td class="url">%s</td>' % self.part("url")) self.write(u'<td class="url">') self.write(u"`%s'" % cgi.escape(url_data.base_url)) self.writeln(u"</td></tr>")
[ "def", "write_url", "(", "self", ",", "url_data", ")", ":", "self", ".", "writeln", "(", "u\"<tr>\"", ")", "self", ".", "writeln", "(", "u'<td class=\"url\">%s</td>'", "%", "self", ".", "part", "(", "\"url\"", ")", ")", "self", ".", "write", "(", "u'<td class=\"url\">'", ")", "self", ".", "write", "(", "u\"`%s'\"", "%", "cgi", ".", "escape", "(", "url_data", ".", "base_url", ")", ")", "self", ".", "writeln", "(", "u\"</td></tr>\"", ")" ]
Write url_data.base_url.
[ "Write", "url_data", ".", "base_url", "." ]
python
train
42.571429
Nike-Inc/cerberus-python-client
cerberus/client.py
https://github.com/Nike-Inc/cerberus-python-client/blob/ef38356822e722fcb6a6ed4a1b38a5b493e753ae/cerberus/client.py#L74-L89
def _set_token(self): """Set the Cerberus token based on auth type""" try: self.token = os.environ['CERBERUS_TOKEN'] if self.verbose: print("Overriding Cerberus token with environment variable.", file=sys.stderr) logger.info("Overriding Cerberus token with environment variable.") return except: pass if self.username: ua = UserAuth(self.cerberus_url, self.username, self.password) self.token = ua.get_token() else: awsa = AWSAuth(self.cerberus_url, region=self.region, aws_session=self.aws_session, verbose=self.verbose) self.token = awsa.get_token()
[ "def", "_set_token", "(", "self", ")", ":", "try", ":", "self", ".", "token", "=", "os", ".", "environ", "[", "'CERBERUS_TOKEN'", "]", "if", "self", ".", "verbose", ":", "print", "(", "\"Overriding Cerberus token with environment variable.\"", ",", "file", "=", "sys", ".", "stderr", ")", "logger", ".", "info", "(", "\"Overriding Cerberus token with environment variable.\"", ")", "return", "except", ":", "pass", "if", "self", ".", "username", ":", "ua", "=", "UserAuth", "(", "self", ".", "cerberus_url", ",", "self", ".", "username", ",", "self", ".", "password", ")", "self", ".", "token", "=", "ua", ".", "get_token", "(", ")", "else", ":", "awsa", "=", "AWSAuth", "(", "self", ".", "cerberus_url", ",", "region", "=", "self", ".", "region", ",", "aws_session", "=", "self", ".", "aws_session", ",", "verbose", "=", "self", ".", "verbose", ")", "self", ".", "token", "=", "awsa", ".", "get_token", "(", ")" ]
Set the Cerberus token based on auth type
[ "Set", "the", "Cerberus", "token", "based", "on", "auth", "type" ]
python
train
43.75
johnnoone/aioconsul
aioconsul/client/session_endpoint.py
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/session_endpoint.py#L198-L237
async def renew(self, session, *, dc=None): """Renews a TTL-based session Parameters: session (ObjectID): Session ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: ObjectMeta: where value is session Raises: NotFound: session is absent The response looks like this:: { "LockDelay": datetime.timedelta(0, 15), "Checks": [ "serfHealth" ], "Node": "foobar", "ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "CreateIndex": 1086449 "Behavior": "release", "TTL": datetime.timedelta(0, 15) } .. note:: Consul MAY return a TTL value higher than the one specified during session creation. This indicates the server is under high load and is requesting clients renew less often. """ session_id = extract_attr(session, keys=["ID"]) response = await self._api.put("/v1/session/renew", session_id, params={"dc": dc}) try: result = response.body[0] except IndexError: meta = extract_meta(response.headers) raise NotFound("No session for %r" % session_id, meta=meta) return consul(result, meta=extract_meta(response.headers))
[ "async", "def", "renew", "(", "self", ",", "session", ",", "*", ",", "dc", "=", "None", ")", ":", "session_id", "=", "extract_attr", "(", "session", ",", "keys", "=", "[", "\"ID\"", "]", ")", "response", "=", "await", "self", ".", "_api", ".", "put", "(", "\"/v1/session/renew\"", ",", "session_id", ",", "params", "=", "{", "\"dc\"", ":", "dc", "}", ")", "try", ":", "result", "=", "response", ".", "body", "[", "0", "]", "except", "IndexError", ":", "meta", "=", "extract_meta", "(", "response", ".", "headers", ")", "raise", "NotFound", "(", "\"No session for %r\"", "%", "session_id", ",", "meta", "=", "meta", ")", "return", "consul", "(", "result", ",", "meta", "=", "extract_meta", "(", "response", ".", "headers", ")", ")" ]
Renews a TTL-based session Parameters: session (ObjectID): Session ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: ObjectMeta: where value is session Raises: NotFound: session is absent The response looks like this:: { "LockDelay": datetime.timedelta(0, 15), "Checks": [ "serfHealth" ], "Node": "foobar", "ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "CreateIndex": 1086449 "Behavior": "release", "TTL": datetime.timedelta(0, 15) } .. note:: Consul MAY return a TTL value higher than the one specified during session creation. This indicates the server is under high load and is requesting clients renew less often.
[ "Renews", "a", "TTL", "-", "based", "session" ]
python
train
37.25
vxgmichel/aiostream
aiostream/stream/aggregate.py
https://github.com/vxgmichel/aiostream/blob/43bdf04ab19108a3f1b5a472062e1392a26cbcf8/aiostream/stream/aggregate.py#L43-L52
def reduce(source, func, initializer=None): """Apply a function of two arguments cumulatively to the items of an asynchronous sequence, reducing the sequence to a single value. If ``initializer`` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. """ acc = accumulate.raw(source, func, initializer) return select.item.raw(acc, -1)
[ "def", "reduce", "(", "source", ",", "func", ",", "initializer", "=", "None", ")", ":", "acc", "=", "accumulate", ".", "raw", "(", "source", ",", "func", ",", "initializer", ")", "return", "select", ".", "item", ".", "raw", "(", "acc", ",", "-", "1", ")" ]
Apply a function of two arguments cumulatively to the items of an asynchronous sequence, reducing the sequence to a single value. If ``initializer`` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty.
[ "Apply", "a", "function", "of", "two", "arguments", "cumulatively", "to", "the", "items", "of", "an", "asynchronous", "sequence", "reducing", "the", "sequence", "to", "a", "single", "value", "." ]
python
train
43.3
matthewdeanmartin/jiggle_version
jiggle_version/file_makers.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/file_makers.py#L32-L45
def create_init(self, path): # type: (str) -> None """ Create a minimal __init__ file with enough boiler plate to not add to lint messages :param path: :return: """ source = """# coding=utf-8 \"\"\" Version \"\"\" __version__ = \"0.0.0\" """ with io.open(path, "w", encoding="utf-8") as outfile: outfile.write(source)
[ "def", "create_init", "(", "self", ",", "path", ")", ":", "# type: (str) -> None", "source", "=", "\"\"\"# coding=utf-8\n\\\"\\\"\\\"\nVersion\n\\\"\\\"\\\"\n__version__ = \\\"0.0.0\\\"\n\"\"\"", "with", "io", ".", "open", "(", "path", ",", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "source", ")" ]
Create a minimal __init__ file with enough boiler plate to not add to lint messages :param path: :return:
[ "Create", "a", "minimal", "__init__", "file", "with", "enough", "boiler", "plate", "to", "not", "add", "to", "lint", "messages", ":", "param", "path", ":", ":", "return", ":" ]
python
train
26.642857
Clinical-Genomics/scout
scout/adapter/mongo/panel.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/panel.py#L265-L296
def gene_to_panels(self, case_obj): """Fetch all gene panels and group them by gene Args: case_obj(scout.models.Case) Returns: gene_dict(dict): A dictionary with gene as keys and a set of panel names as value """ LOG.info("Building gene to panels") gene_dict = {} for panel_info in case_obj.get('panels', []): panel_name = panel_info['panel_name'] panel_version = panel_info['version'] panel_obj = self.gene_panel(panel_name, version=panel_version) if not panel_obj: ## Raise exception here??? LOG.warning("Panel: {0}, version {1} does not exist in database".format(panel_name, panel_version)) for gene in panel_obj['genes']: hgnc_id = gene['hgnc_id'] if hgnc_id not in gene_dict: gene_dict[hgnc_id] = set([panel_name]) continue gene_dict[hgnc_id].add(panel_name) LOG.info("Gene to panels done") return gene_dict
[ "def", "gene_to_panels", "(", "self", ",", "case_obj", ")", ":", "LOG", ".", "info", "(", "\"Building gene to panels\"", ")", "gene_dict", "=", "{", "}", "for", "panel_info", "in", "case_obj", ".", "get", "(", "'panels'", ",", "[", "]", ")", ":", "panel_name", "=", "panel_info", "[", "'panel_name'", "]", "panel_version", "=", "panel_info", "[", "'version'", "]", "panel_obj", "=", "self", ".", "gene_panel", "(", "panel_name", ",", "version", "=", "panel_version", ")", "if", "not", "panel_obj", ":", "## Raise exception here???", "LOG", ".", "warning", "(", "\"Panel: {0}, version {1} does not exist in database\"", ".", "format", "(", "panel_name", ",", "panel_version", ")", ")", "for", "gene", "in", "panel_obj", "[", "'genes'", "]", ":", "hgnc_id", "=", "gene", "[", "'hgnc_id'", "]", "if", "hgnc_id", "not", "in", "gene_dict", ":", "gene_dict", "[", "hgnc_id", "]", "=", "set", "(", "[", "panel_name", "]", ")", "continue", "gene_dict", "[", "hgnc_id", "]", ".", "add", "(", "panel_name", ")", "LOG", ".", "info", "(", "\"Gene to panels done\"", ")", "return", "gene_dict" ]
Fetch all gene panels and group them by gene Args: case_obj(scout.models.Case) Returns: gene_dict(dict): A dictionary with gene as keys and a set of panel names as value
[ "Fetch", "all", "gene", "panels", "and", "group", "them", "by", "gene" ]
python
test
34.78125
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L1846-L1849
def get_count(self, prefix=''): """Return the total count of errors and warnings.""" return sum([self.counters[key] for key in self.messages if key.startswith(prefix)])
[ "def", "get_count", "(", "self", ",", "prefix", "=", "''", ")", ":", "return", "sum", "(", "[", "self", ".", "counters", "[", "key", "]", "for", "key", "in", "self", ".", "messages", "if", "key", ".", "startswith", "(", "prefix", ")", "]", ")" ]
Return the total count of errors and warnings.
[ "Return", "the", "total", "count", "of", "errors", "and", "warnings", "." ]
python
train
50.25
dmbee/seglearn
seglearn/datasets.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/datasets.py#L13-L46
def load_watch(): ''' Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a smartwatch. It is a multivariate time series. The study can be found here: https://arxiv.org/abs/1802.01489 Returns ------- data : dict data['X'] : list, length 140 | inertial sensor data, each element with shape [n_samples, 6] | sampled at 50 Hz data['y'] : array, length 140 target vector (exercise type) data['side'] : array, length 140 the extremity side, 1 = right, 0 = left data['subject'] : array, length 140 the subject (participant) number data['X_labels'] : str list, length 6 ordered labels for the sensor data variables data['y_labels'] :str list, length 7 ordered labels for the target (exercise type) Examples -------- >>> from seglearn.datasets import load_watch >>> data = load_watch() >>> print(data.keys()) ''' module_path = dirname(__file__) data = np.load(module_path + "/data/watch_dataset.npy").item() return data
[ "def", "load_watch", "(", ")", ":", "module_path", "=", "dirname", "(", "__file__", ")", "data", "=", "np", ".", "load", "(", "module_path", "+", "\"/data/watch_dataset.npy\"", ")", ".", "item", "(", ")", "return", "data" ]
Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a smartwatch. It is a multivariate time series. The study can be found here: https://arxiv.org/abs/1802.01489 Returns ------- data : dict data['X'] : list, length 140 | inertial sensor data, each element with shape [n_samples, 6] | sampled at 50 Hz data['y'] : array, length 140 target vector (exercise type) data['side'] : array, length 140 the extremity side, 1 = right, 0 = left data['subject'] : array, length 140 the subject (participant) number data['X_labels'] : str list, length 6 ordered labels for the sensor data variables data['y_labels'] :str list, length 7 ordered labels for the target (exercise type) Examples -------- >>> from seglearn.datasets import load_watch >>> data = load_watch() >>> print(data.keys())
[ "Loads", "some", "of", "the", "6", "-", "axis", "inertial", "sensor", "data", "from", "my", "smartwatch", "project", ".", "The", "sensor", "data", "was", "recorded", "as", "study", "subjects", "performed", "sets", "of", "20", "shoulder", "exercise", "repetitions", "while", "wearing", "a", "smartwatch", ".", "It", "is", "a", "multivariate", "time", "series", "." ]
python
train
35.735294
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py#L371-L378
def add_filter(self, ftype, func): ''' Register a new output filter. Whenever bottle hits a handler output matching `ftype`, `func` is applyed to it. ''' if not isinstance(ftype, type): raise TypeError("Expected type object, got %s" % type(ftype)) self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype] self.castfilter.append((ftype, func)) self.castfilter.sort()
[ "def", "add_filter", "(", "self", ",", "ftype", ",", "func", ")", ":", "if", "not", "isinstance", "(", "ftype", ",", "type", ")", ":", "raise", "TypeError", "(", "\"Expected type object, got %s\"", "%", "type", "(", "ftype", ")", ")", "self", ".", "castfilter", "=", "[", "(", "t", ",", "f", ")", "for", "(", "t", ",", "f", ")", "in", "self", ".", "castfilter", "if", "t", "!=", "ftype", "]", "self", ".", "castfilter", ".", "append", "(", "(", "ftype", ",", "func", ")", ")", "self", ".", "castfilter", ".", "sort", "(", ")" ]
Register a new output filter. Whenever bottle hits a handler output matching `ftype`, `func` is applyed to it.
[ "Register", "a", "new", "output", "filter", ".", "Whenever", "bottle", "hits", "a", "handler", "output", "matching", "ftype", "func", "is", "applyed", "to", "it", "." ]
python
train
54.5
saltstack/salt
salt/modules/linux_lvm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/linux_lvm.py#L320-L348
def vgcreate(vgname, devices, **kwargs): ''' Create an LVM volume group CLI Examples: .. code-block:: bash salt mymachine lvm.vgcreate my_vg /dev/sdb1,/dev/sdb2 salt mymachine lvm.vgcreate my_vg /dev/sdb1 clustered=y ''' if not vgname or not devices: return 'Error: vgname and device(s) are both required' if isinstance(devices, six.string_types): devices = devices.split(',') cmd = ['vgcreate', vgname] for device in devices: cmd.append(device) valid = ('clustered', 'maxlogicalvolumes', 'maxphysicalvolumes', 'vgmetadatacopies', 'metadatacopies', 'physicalextentsize') for var in kwargs: if kwargs[var] and var in valid: cmd.append('--{0}'.format(var)) cmd.append(kwargs[var]) out = __salt__['cmd.run'](cmd, python_shell=False).splitlines() vgdata = vgdisplay(vgname) vgdata['Output from vgcreate'] = out[0].strip() return vgdata
[ "def", "vgcreate", "(", "vgname", ",", "devices", ",", "*", "*", "kwargs", ")", ":", "if", "not", "vgname", "or", "not", "devices", ":", "return", "'Error: vgname and device(s) are both required'", "if", "isinstance", "(", "devices", ",", "six", ".", "string_types", ")", ":", "devices", "=", "devices", ".", "split", "(", "','", ")", "cmd", "=", "[", "'vgcreate'", ",", "vgname", "]", "for", "device", "in", "devices", ":", "cmd", ".", "append", "(", "device", ")", "valid", "=", "(", "'clustered'", ",", "'maxlogicalvolumes'", ",", "'maxphysicalvolumes'", ",", "'vgmetadatacopies'", ",", "'metadatacopies'", ",", "'physicalextentsize'", ")", "for", "var", "in", "kwargs", ":", "if", "kwargs", "[", "var", "]", "and", "var", "in", "valid", ":", "cmd", ".", "append", "(", "'--{0}'", ".", "format", "(", "var", ")", ")", "cmd", ".", "append", "(", "kwargs", "[", "var", "]", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", ".", "splitlines", "(", ")", "vgdata", "=", "vgdisplay", "(", "vgname", ")", "vgdata", "[", "'Output from vgcreate'", "]", "=", "out", "[", "0", "]", ".", "strip", "(", ")", "return", "vgdata" ]
Create an LVM volume group CLI Examples: .. code-block:: bash salt mymachine lvm.vgcreate my_vg /dev/sdb1,/dev/sdb2 salt mymachine lvm.vgcreate my_vg /dev/sdb1 clustered=y
[ "Create", "an", "LVM", "volume", "group" ]
python
train
32.827586
gc3-uzh-ch/elasticluster
elasticluster/providers/gce.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L586-L616
def stop_instance(self, instance_id): """Stops the instance gracefully. :param str instance_id: instance identifier :raises: `InstanceError` if instance can not be stopped """ if not instance_id: log.info("Instance to stop has no instance id") return gce = self._connect() try: request = gce.instances().delete(project=self._project_id, instance=instance_id, zone=self._zone) response = self._execute_request(request) self._check_response(response) except HttpError as e: # If the instance does not exist, we get a 404 if e.resp.status == 404: raise InstanceNotFoundError( "Instance `{instance_id}` was not found" .format(instance_id=instance_id)) else: raise InstanceError( "Could not stop instance `{instance_id}`: `{e}`" .format(instance_id=instance_id, e=e)) except CloudProviderError as e: raise InstanceError( "Could not stop instance `{instance_id}`: `{e}`" .format(instance_id=instance_id, e=e))
[ "def", "stop_instance", "(", "self", ",", "instance_id", ")", ":", "if", "not", "instance_id", ":", "log", ".", "info", "(", "\"Instance to stop has no instance id\"", ")", "return", "gce", "=", "self", ".", "_connect", "(", ")", "try", ":", "request", "=", "gce", ".", "instances", "(", ")", ".", "delete", "(", "project", "=", "self", ".", "_project_id", ",", "instance", "=", "instance_id", ",", "zone", "=", "self", ".", "_zone", ")", "response", "=", "self", ".", "_execute_request", "(", "request", ")", "self", ".", "_check_response", "(", "response", ")", "except", "HttpError", "as", "e", ":", "# If the instance does not exist, we get a 404", "if", "e", ".", "resp", ".", "status", "==", "404", ":", "raise", "InstanceNotFoundError", "(", "\"Instance `{instance_id}` was not found\"", ".", "format", "(", "instance_id", "=", "instance_id", ")", ")", "else", ":", "raise", "InstanceError", "(", "\"Could not stop instance `{instance_id}`: `{e}`\"", ".", "format", "(", "instance_id", "=", "instance_id", ",", "e", "=", "e", ")", ")", "except", "CloudProviderError", "as", "e", ":", "raise", "InstanceError", "(", "\"Could not stop instance `{instance_id}`: `{e}`\"", ".", "format", "(", "instance_id", "=", "instance_id", ",", "e", "=", "e", ")", ")" ]
Stops the instance gracefully. :param str instance_id: instance identifier :raises: `InstanceError` if instance can not be stopped
[ "Stops", "the", "instance", "gracefully", "." ]
python
train
40
bitprophet/ssh
ssh/transport.py
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L969-L1026
def connect(self, hostkey=None, username='', password=None, pkey=None): """ Negotiate an SSH2 session, and optionally verify the server's host key and authenticate using a password or private key. This is a shortcut for L{start_client}, L{get_remote_server_key}, and L{Transport.auth_password} or L{Transport.auth_publickey}. Use those methods if you want more control. You can use this method immediately after creating a Transport to negotiate encryption with a server. If it fails, an exception will be thrown. On success, the method will return cleanly, and an encrypted session exists. You may immediately call L{open_channel} or L{open_session} to get a L{Channel} object, which is used for data transfer. @note: If you fail to supply a password or private key, this method may succeed, but a subsequent L{open_channel} or L{open_session} call may fail because you haven't authenticated yet. @param hostkey: the host key expected from the server, or C{None} if you don't want to do host key verification. @type hostkey: L{PKey<pkey.PKey>} @param username: the username to authenticate as. @type username: str @param password: a password to use for authentication, if you want to use password authentication; otherwise C{None}. @type password: str @param pkey: a private key to use for authentication, if you want to use private key authentication; otherwise C{None}. @type pkey: L{PKey<pkey.PKey>} @raise SSHException: if the SSH2 negotiation fails, the host key supplied by the server is incorrect, or authentication fails. """ if hostkey is not None: self._preferred_keys = [ hostkey.get_name() ] self.start_client() # check host key if we were given one if (hostkey is not None): key = self.get_remote_server_key() if (key.get_name() != hostkey.get_name()) or (str(key) != str(hostkey)): self._log(DEBUG, 'Bad host key from server') self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(str(hostkey)))) self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(str(key)))) raise SSHException('Bad host key from server') self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name()) if (pkey is not None) or (password is not None): if password is not None: self._log(DEBUG, 'Attempting password auth...') self.auth_password(username, password) else: self._log(DEBUG, 'Attempting public-key auth...') self.auth_publickey(username, pkey) return
[ "def", "connect", "(", "self", ",", "hostkey", "=", "None", ",", "username", "=", "''", ",", "password", "=", "None", ",", "pkey", "=", "None", ")", ":", "if", "hostkey", "is", "not", "None", ":", "self", ".", "_preferred_keys", "=", "[", "hostkey", ".", "get_name", "(", ")", "]", "self", ".", "start_client", "(", ")", "# check host key if we were given one", "if", "(", "hostkey", "is", "not", "None", ")", ":", "key", "=", "self", ".", "get_remote_server_key", "(", ")", "if", "(", "key", ".", "get_name", "(", ")", "!=", "hostkey", ".", "get_name", "(", ")", ")", "or", "(", "str", "(", "key", ")", "!=", "str", "(", "hostkey", ")", ")", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Bad host key from server'", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Expected: %s: %s'", "%", "(", "hostkey", ".", "get_name", "(", ")", ",", "repr", "(", "str", "(", "hostkey", ")", ")", ")", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Got : %s: %s'", "%", "(", "key", ".", "get_name", "(", ")", ",", "repr", "(", "str", "(", "key", ")", ")", ")", ")", "raise", "SSHException", "(", "'Bad host key from server'", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Host key verified (%s)'", "%", "hostkey", ".", "get_name", "(", ")", ")", "if", "(", "pkey", "is", "not", "None", ")", "or", "(", "password", "is", "not", "None", ")", ":", "if", "password", "is", "not", "None", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Attempting password auth...'", ")", "self", ".", "auth_password", "(", "username", ",", "password", ")", "else", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Attempting public-key auth...'", ")", "self", ".", "auth_publickey", "(", "username", ",", "pkey", ")", "return" ]
Negotiate an SSH2 session, and optionally verify the server's host key and authenticate using a password or private key. This is a shortcut for L{start_client}, L{get_remote_server_key}, and L{Transport.auth_password} or L{Transport.auth_publickey}. Use those methods if you want more control. You can use this method immediately after creating a Transport to negotiate encryption with a server. If it fails, an exception will be thrown. On success, the method will return cleanly, and an encrypted session exists. You may immediately call L{open_channel} or L{open_session} to get a L{Channel} object, which is used for data transfer. @note: If you fail to supply a password or private key, this method may succeed, but a subsequent L{open_channel} or L{open_session} call may fail because you haven't authenticated yet. @param hostkey: the host key expected from the server, or C{None} if you don't want to do host key verification. @type hostkey: L{PKey<pkey.PKey>} @param username: the username to authenticate as. @type username: str @param password: a password to use for authentication, if you want to use password authentication; otherwise C{None}. @type password: str @param pkey: a private key to use for authentication, if you want to use private key authentication; otherwise C{None}. @type pkey: L{PKey<pkey.PKey>} @raise SSHException: if the SSH2 negotiation fails, the host key supplied by the server is incorrect, or authentication fails.
[ "Negotiate", "an", "SSH2", "session", "and", "optionally", "verify", "the", "server", "s", "host", "key", "and", "authenticate", "using", "a", "password", "or", "private", "key", ".", "This", "is", "a", "shortcut", "for", "L", "{", "start_client", "}", "L", "{", "get_remote_server_key", "}", "and", "L", "{", "Transport", ".", "auth_password", "}", "or", "L", "{", "Transport", ".", "auth_publickey", "}", ".", "Use", "those", "methods", "if", "you", "want", "more", "control", "." ]
python
train
48.551724
mwgielen/jackal
jackal/scripts/services.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/services.py#L21-L34
def overview(): """ Function to create an overview of the services. Will print a list of ports found an the number of times the port was seen. """ search = Service.search() search = search.filter("term", state='open') search.aggs.bucket('port_count', 'terms', field='port', order={'_count': 'desc'}, size=100) \ .metric('unique_count', 'cardinality', field='address') response = search.execute() print_line("Port Count") print_line("---------------") for entry in response.aggregations.port_count.buckets: print_line("{0:<7} {1}".format(entry.key, entry.unique_count.value))
[ "def", "overview", "(", ")", ":", "search", "=", "Service", ".", "search", "(", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "state", "=", "'open'", ")", "search", ".", "aggs", ".", "bucket", "(", "'port_count'", ",", "'terms'", ",", "field", "=", "'port'", ",", "order", "=", "{", "'_count'", ":", "'desc'", "}", ",", "size", "=", "100", ")", ".", "metric", "(", "'unique_count'", ",", "'cardinality'", ",", "field", "=", "'address'", ")", "response", "=", "search", ".", "execute", "(", ")", "print_line", "(", "\"Port Count\"", ")", "print_line", "(", "\"---------------\"", ")", "for", "entry", "in", "response", ".", "aggregations", ".", "port_count", ".", "buckets", ":", "print_line", "(", "\"{0:<7} {1}\"", ".", "format", "(", "entry", ".", "key", ",", "entry", ".", "unique_count", ".", "value", ")", ")" ]
Function to create an overview of the services. Will print a list of ports found an the number of times the port was seen.
[ "Function", "to", "create", "an", "overview", "of", "the", "services", ".", "Will", "print", "a", "list", "of", "ports", "found", "an", "the", "number", "of", "times", "the", "port", "was", "seen", "." ]
python
valid
45.285714