repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
uw-it-aca/uw-restclients-sws
uw_sws/term.py
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L74-L88
def get_term_after(aterm): """ Returns a uw_sws.models.Term object, for the term after the term given. """ next_year = aterm.year if aterm.quarter == "autumn": next_quarter = QUARTER_SEQ[0] else: next_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) + 1] if next_quarter == "winter": next_year += 1 return get_term_by_year_and_quarter(next_year, next_quarter)
[ "def", "get_term_after", "(", "aterm", ")", ":", "next_year", "=", "aterm", ".", "year", "if", "aterm", ".", "quarter", "==", "\"autumn\"", ":", "next_quarter", "=", "QUARTER_SEQ", "[", "0", "]", "else", ":", "next_quarter", "=", "QUARTER_SEQ", "[", "QUARTER_SEQ", ".", "index", "(", "aterm", ".", "quarter", ")", "+", "1", "]", "if", "next_quarter", "==", "\"winter\"", ":", "next_year", "+=", "1", "return", "get_term_by_year_and_quarter", "(", "next_year", ",", "next_quarter", ")" ]
Returns a uw_sws.models.Term object, for the term after the term given.
[ "Returns", "a", "uw_sws", ".", "models", ".", "Term", "object", "for", "the", "term", "after", "the", "term", "given", "." ]
python
train
psd-tools/psd-tools
src/psd_tools/api/layers.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/layers.py#L635-L643
def smart_object(self): """ Associated smart object. :return: :py:class:`~psd_tools.api.smart_object.SmartObject`. """ if not hasattr(self, '_smart_object'): self._smart_object = SmartObject(self) return self._smart_object
[ "def", "smart_object", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_smart_object'", ")", ":", "self", ".", "_smart_object", "=", "SmartObject", "(", "self", ")", "return", "self", ".", "_smart_object" ]
Associated smart object. :return: :py:class:`~psd_tools.api.smart_object.SmartObject`.
[ "Associated", "smart", "object", "." ]
python
train
openstack/horizon
openstack_dashboard/api/neutron.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L740-L769
def list_targets_by_instance(self, instance_id, target_list=None): """Returns a list of FloatingIpTarget objects of FIP association. :param instance_id: ID of target VM instance :param target_list: (optional) a list returned by list_targets(). If specified, looking up is done against the specified list to save extra API calls to a back-end. Otherwise target list is retrieved from a back-end inside the method. """ if target_list is not None: # We assume that target_list was returned by list_targets() # so we can assume checks for subnet reachability and IP version # have been done already. We skip all checks here. return [target for target in target_list if target['instance_id'] == instance_id] else: ports = self._target_ports_by_instance(instance_id) reachable_subnets = self._get_reachable_subnets( ports, fetch_router_ports=True) name = self._get_server_name(instance_id) targets = [] for p in ports: for ip in p.fixed_ips: if ip['subnet_id'] not in reachable_subnets: continue # Floating IPs can only target IPv4 addresses. if netaddr.IPAddress(ip['ip_address']).version != 4: continue targets.append(FloatingIpTarget(p, ip['ip_address'], name)) return targets
[ "def", "list_targets_by_instance", "(", "self", ",", "instance_id", ",", "target_list", "=", "None", ")", ":", "if", "target_list", "is", "not", "None", ":", "# We assume that target_list was returned by list_targets()", "# so we can assume checks for subnet reachability and IP version", "# have been done already. We skip all checks here.", "return", "[", "target", "for", "target", "in", "target_list", "if", "target", "[", "'instance_id'", "]", "==", "instance_id", "]", "else", ":", "ports", "=", "self", ".", "_target_ports_by_instance", "(", "instance_id", ")", "reachable_subnets", "=", "self", ".", "_get_reachable_subnets", "(", "ports", ",", "fetch_router_ports", "=", "True", ")", "name", "=", "self", ".", "_get_server_name", "(", "instance_id", ")", "targets", "=", "[", "]", "for", "p", "in", "ports", ":", "for", "ip", "in", "p", ".", "fixed_ips", ":", "if", "ip", "[", "'subnet_id'", "]", "not", "in", "reachable_subnets", ":", "continue", "# Floating IPs can only target IPv4 addresses.", "if", "netaddr", ".", "IPAddress", "(", "ip", "[", "'ip_address'", "]", ")", ".", "version", "!=", "4", ":", "continue", "targets", ".", "append", "(", "FloatingIpTarget", "(", "p", ",", "ip", "[", "'ip_address'", "]", ",", "name", ")", ")", "return", "targets" ]
Returns a list of FloatingIpTarget objects of FIP association. :param instance_id: ID of target VM instance :param target_list: (optional) a list returned by list_targets(). If specified, looking up is done against the specified list to save extra API calls to a back-end. Otherwise target list is retrieved from a back-end inside the method.
[ "Returns", "a", "list", "of", "FloatingIpTarget", "objects", "of", "FIP", "association", "." ]
python
train
PmagPy/PmagPy
pmagpy/builder2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L924-L954
def write_files(self): """ write all data out into er_* and pmag_* files as appropriate """ warnings = self.validate_data() print('-I- Writing all saved data to files') if self.measurements: self.write_measurements_file() for dtype in ['specimen', 'sample', 'site']: if self.data_lists[dtype][0]: do_pmag = dtype in self.incl_pmag_data self.write_magic_file(dtype, do_er=True, do_pmag=do_pmag) if not do_pmag: pmag_file = os.path.join(self.WD, 'pmag_' + dtype + 's.txt') if os.path.isfile(pmag_file): os.remove(pmag_file) if self.locations: self.write_magic_file('location', do_er=True, do_pmag=False) self.write_age_file() if self.results: self.write_result_file() if warnings: print('-W- ' + str(warnings)) return False, warnings return True, None
[ "def", "write_files", "(", "self", ")", ":", "warnings", "=", "self", ".", "validate_data", "(", ")", "print", "(", "'-I- Writing all saved data to files'", ")", "if", "self", ".", "measurements", ":", "self", ".", "write_measurements_file", "(", ")", "for", "dtype", "in", "[", "'specimen'", ",", "'sample'", ",", "'site'", "]", ":", "if", "self", ".", "data_lists", "[", "dtype", "]", "[", "0", "]", ":", "do_pmag", "=", "dtype", "in", "self", ".", "incl_pmag_data", "self", ".", "write_magic_file", "(", "dtype", ",", "do_er", "=", "True", ",", "do_pmag", "=", "do_pmag", ")", "if", "not", "do_pmag", ":", "pmag_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "WD", ",", "'pmag_'", "+", "dtype", "+", "'s.txt'", ")", "if", "os", ".", "path", ".", "isfile", "(", "pmag_file", ")", ":", "os", ".", "remove", "(", "pmag_file", ")", "if", "self", ".", "locations", ":", "self", ".", "write_magic_file", "(", "'location'", ",", "do_er", "=", "True", ",", "do_pmag", "=", "False", ")", "self", ".", "write_age_file", "(", ")", "if", "self", ".", "results", ":", "self", ".", "write_result_file", "(", ")", "if", "warnings", ":", "print", "(", "'-W- '", "+", "str", "(", "warnings", ")", ")", "return", "False", ",", "warnings", "return", "True", ",", "None" ]
write all data out into er_* and pmag_* files as appropriate
[ "write", "all", "data", "out", "into", "er_", "*", "and", "pmag_", "*", "files", "as", "appropriate" ]
python
train
pypa/pipenv
pipenv/vendor/requests/cookies.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/cookies.py#L348-L354
def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: self.set_cookie(copy.copy(cookie)) else: super(RequestsCookieJar, self).update(other)
[ "def", "update", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "cookielib", ".", "CookieJar", ")", ":", "for", "cookie", "in", "other", ":", "self", ".", "set_cookie", "(", "copy", ".", "copy", "(", "cookie", ")", ")", "else", ":", "super", "(", "RequestsCookieJar", ",", "self", ")", ".", "update", "(", "other", ")" ]
Updates this jar with cookies from another CookieJar or dict-like
[ "Updates", "this", "jar", "with", "cookies", "from", "another", "CookieJar", "or", "dict", "-", "like" ]
python
train
elastic/elasticsearch-py
elasticsearch/client/xpack/ml.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/ml.py#L350-L369
def get_categories(self, job_id, category_id=None, body=None, params=None): """ `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html>`_ :arg job_id: The name of the job :arg category_id: The identifier of the category definition of interest :arg body: Category selection details if not provided in URI :arg from_: skips a number of categories :arg size: specifies a max number of categories to get """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") return self.transport.perform_request( "GET", _make_path( "_ml", "anomaly_detectors", job_id, "results", "categories", category_id ), params=params, body=body, )
[ "def", "get_categories", "(", "self", ",", "job_id", ",", "category_id", "=", "None", ",", "body", "=", "None", ",", "params", "=", "None", ")", ":", "if", "job_id", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", "(", "\"Empty value passed for a required argument 'job_id'.\"", ")", "return", "self", ".", "transport", ".", "perform_request", "(", "\"GET\"", ",", "_make_path", "(", "\"_ml\"", ",", "\"anomaly_detectors\"", ",", "job_id", ",", "\"results\"", ",", "\"categories\"", ",", "category_id", ")", ",", "params", "=", "params", ",", "body", "=", "body", ",", ")" ]
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html>`_ :arg job_id: The name of the job :arg category_id: The identifier of the category definition of interest :arg body: Category selection details if not provided in URI :arg from_: skips a number of categories :arg size: specifies a max number of categories to get
[ "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "ml", "-", "get", "-", "category", ".", "html", ">", "_" ]
python
train
biosustain/optlang
optlang/scipy_interface.py
https://github.com/biosustain/optlang/blob/13673ac26f6b3ba37a2ef392489722c52e3c5ff1/optlang/scipy_interface.py#L154-L164
def remove_variable(self, name): """Remove a variable from the problem.""" index = self._get_var_index(name) # Remove from matrix self._A = np.delete(self.A, index, 1) # Remove from bounds del self.bounds[name] # Remove from var list del self._variables[name] self._update_variable_indices() self._reset_solution()
[ "def", "remove_variable", "(", "self", ",", "name", ")", ":", "index", "=", "self", ".", "_get_var_index", "(", "name", ")", "# Remove from matrix", "self", ".", "_A", "=", "np", ".", "delete", "(", "self", ".", "A", ",", "index", ",", "1", ")", "# Remove from bounds", "del", "self", ".", "bounds", "[", "name", "]", "# Remove from var list", "del", "self", ".", "_variables", "[", "name", "]", "self", ".", "_update_variable_indices", "(", ")", "self", ".", "_reset_solution", "(", ")" ]
Remove a variable from the problem.
[ "Remove", "a", "variable", "from", "the", "problem", "." ]
python
train
SatelliteQE/nailgun
docs/create_organization_nailgun_v2.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/docs/create_organization_nailgun_v2.py#L8-L12
def main(): """Create an organization, print out its attributes and delete it.""" org = Organization(name='junk org').create() pprint(org.get_values()) # e.g. {'name': 'junk org', …} org.delete()
[ "def", "main", "(", ")", ":", "org", "=", "Organization", "(", "name", "=", "'junk org'", ")", ".", "create", "(", ")", "pprint", "(", "org", ".", "get_values", "(", ")", ")", "# e.g. {'name': 'junk org', …}", "org", ".", "delete", "(", ")" ]
Create an organization, print out its attributes and delete it.
[ "Create", "an", "organization", "print", "out", "its", "attributes", "and", "delete", "it", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/subscribe/subscribe.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/subscribe/subscribe.py#L191-L206
def notify(self, data): """Notify subscribers that data was received""" triggered_channels = [] for channel_name, items in data.items(): for item in items or []: LOG.debug('notify received: %s', item) try: # some channels return strings rather than objects (e.g. de-registrations), # normalize them here item = {'value': item} if isinstance(item, six.string_types) else dict(item) # inject the channel name to the data (so channels can filter on it) item['channel'] = channel_name triggered_channels.extend(list(self._notify_single_item(item))) except Exception: # noqa LOG.exception('Subscription notification failed') return triggered_channels
[ "def", "notify", "(", "self", ",", "data", ")", ":", "triggered_channels", "=", "[", "]", "for", "channel_name", ",", "items", "in", "data", ".", "items", "(", ")", ":", "for", "item", "in", "items", "or", "[", "]", ":", "LOG", ".", "debug", "(", "'notify received: %s'", ",", "item", ")", "try", ":", "# some channels return strings rather than objects (e.g. de-registrations),", "# normalize them here", "item", "=", "{", "'value'", ":", "item", "}", "if", "isinstance", "(", "item", ",", "six", ".", "string_types", ")", "else", "dict", "(", "item", ")", "# inject the channel name to the data (so channels can filter on it)", "item", "[", "'channel'", "]", "=", "channel_name", "triggered_channels", ".", "extend", "(", "list", "(", "self", ".", "_notify_single_item", "(", "item", ")", ")", ")", "except", "Exception", ":", "# noqa", "LOG", ".", "exception", "(", "'Subscription notification failed'", ")", "return", "triggered_channels" ]
Notify subscribers that data was received
[ "Notify", "subscribers", "that", "data", "was", "received" ]
python
train
cackharot/suds-py3
suds/builder.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/builder.py#L96-L101
def add_attributes(self, data, type): """ add required attributes """ for attr, ancestry in type.attributes(): name = '_%s' % attr.name value = attr.get_default() setattr(data, name, value)
[ "def", "add_attributes", "(", "self", ",", "data", ",", "type", ")", ":", "for", "attr", ",", "ancestry", "in", "type", ".", "attributes", "(", ")", ":", "name", "=", "'_%s'", "%", "attr", ".", "name", "value", "=", "attr", ".", "get_default", "(", ")", "setattr", "(", "data", ",", "name", ",", "value", ")" ]
add required attributes
[ "add", "required", "attributes" ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/parser.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/parser.py#L69-L74
def _get_byte_parser(self): """Create a ByteParser on demand.""" if not self._byte_parser: self._byte_parser = \ ByteParser(text=self.text, filename=self.filename) return self._byte_parser
[ "def", "_get_byte_parser", "(", "self", ")", ":", "if", "not", "self", ".", "_byte_parser", ":", "self", ".", "_byte_parser", "=", "ByteParser", "(", "text", "=", "self", ".", "text", ",", "filename", "=", "self", ".", "filename", ")", "return", "self", ".", "_byte_parser" ]
Create a ByteParser on demand.
[ "Create", "a", "ByteParser", "on", "demand", "." ]
python
test
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L484-L498
def firmware_download_input_protocol_type_sftp_protocol_sftp_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") firmware_download = ET.Element("firmware_download") config = firmware_download input = ET.SubElement(firmware_download, "input") protocol_type = ET.SubElement(input, "protocol-type") sftp_protocol = ET.SubElement(protocol_type, "sftp-protocol") sftp = ET.SubElement(sftp_protocol, "sftp") port = ET.SubElement(sftp, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "firmware_download_input_protocol_type_sftp_protocol_sftp_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "firmware_download", "=", "ET", ".", "Element", "(", "\"firmware_download\"", ")", "config", "=", "firmware_download", "input", "=", "ET", ".", "SubElement", "(", "firmware_download", ",", "\"input\"", ")", "protocol_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"protocol-type\"", ")", "sftp_protocol", "=", "ET", ".", "SubElement", "(", "protocol_type", ",", "\"sftp-protocol\"", ")", "sftp", "=", "ET", ".", "SubElement", "(", "sftp_protocol", ",", "\"sftp\"", ")", "port", "=", "ET", ".", "SubElement", "(", "sftp", ",", "\"port\"", ")", "port", ".", "text", "=", "kwargs", ".", "pop", "(", "'port'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
amzn/ion-python
amazon/ion/writer.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/writer.py#L117-L131
def _drain(writer, ion_event): """Drain the writer of its pending write events. Args: writer (Coroutine): A writer co-routine. ion_event (amazon.ion.core.IonEvent): The first event to apply to the writer. Yields: DataEvent: Yields each pending data event. """ result_event = _WRITE_EVENT_HAS_PENDING_EMPTY while result_event.type is WriteEventType.HAS_PENDING: result_event = writer.send(ion_event) ion_event = None yield result_event
[ "def", "_drain", "(", "writer", ",", "ion_event", ")", ":", "result_event", "=", "_WRITE_EVENT_HAS_PENDING_EMPTY", "while", "result_event", ".", "type", "is", "WriteEventType", ".", "HAS_PENDING", ":", "result_event", "=", "writer", ".", "send", "(", "ion_event", ")", "ion_event", "=", "None", "yield", "result_event" ]
Drain the writer of its pending write events. Args: writer (Coroutine): A writer co-routine. ion_event (amazon.ion.core.IonEvent): The first event to apply to the writer. Yields: DataEvent: Yields each pending data event.
[ "Drain", "the", "writer", "of", "its", "pending", "write", "events", "." ]
python
train
fastai/fastai
fastai/vision/gan.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L90-L104
def on_train_begin(self, **kwargs): "Create the optimizers for the generator and critic if necessary, initialize smootheners." if not getattr(self,'opt_gen',None): self.opt_gen = self.opt.new([nn.Sequential(*flatten_model(self.generator))]) else: self.opt_gen.lr,self.opt_gen.wd = self.opt.lr,self.opt.wd if not getattr(self,'opt_critic',None): self.opt_critic = self.opt.new([nn.Sequential(*flatten_model(self.critic))]) else: self.opt_critic.lr,self.opt_critic.wd = self.opt.lr,self.opt.wd self.gen_mode = self.gen_first self.switch(self.gen_mode) self.closses,self.glosses = [],[] self.smoothenerG,self.smoothenerC = SmoothenValue(self.beta),SmoothenValue(self.beta) #self.recorder.no_val=True self.recorder.add_metric_names(['gen_loss', 'disc_loss']) self.imgs,self.titles = [],[]
[ "def", "on_train_begin", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "getattr", "(", "self", ",", "'opt_gen'", ",", "None", ")", ":", "self", ".", "opt_gen", "=", "self", ".", "opt", ".", "new", "(", "[", "nn", ".", "Sequential", "(", "*", "flatten_model", "(", "self", ".", "generator", ")", ")", "]", ")", "else", ":", "self", ".", "opt_gen", ".", "lr", ",", "self", ".", "opt_gen", ".", "wd", "=", "self", ".", "opt", ".", "lr", ",", "self", ".", "opt", ".", "wd", "if", "not", "getattr", "(", "self", ",", "'opt_critic'", ",", "None", ")", ":", "self", ".", "opt_critic", "=", "self", ".", "opt", ".", "new", "(", "[", "nn", ".", "Sequential", "(", "*", "flatten_model", "(", "self", ".", "critic", ")", ")", "]", ")", "else", ":", "self", ".", "opt_critic", ".", "lr", ",", "self", ".", "opt_critic", ".", "wd", "=", "self", ".", "opt", ".", "lr", ",", "self", ".", "opt", ".", "wd", "self", ".", "gen_mode", "=", "self", ".", "gen_first", "self", ".", "switch", "(", "self", ".", "gen_mode", ")", "self", ".", "closses", ",", "self", ".", "glosses", "=", "[", "]", ",", "[", "]", "self", ".", "smoothenerG", ",", "self", ".", "smoothenerC", "=", "SmoothenValue", "(", "self", ".", "beta", ")", ",", "SmoothenValue", "(", "self", ".", "beta", ")", "#self.recorder.no_val=True", "self", ".", "recorder", ".", "add_metric_names", "(", "[", "'gen_loss'", ",", "'disc_loss'", "]", ")", "self", ".", "imgs", ",", "self", ".", "titles", "=", "[", "]", ",", "[", "]" ]
Create the optimizers for the generator and critic if necessary, initialize smootheners.
[ "Create", "the", "optimizers", "for", "the", "generator", "and", "critic", "if", "necessary", "initialize", "smootheners", "." ]
python
train
getsentry/sentry-python
sentry_sdk/hub.py
https://github.com/getsentry/sentry-python/blob/a1d77722bdce0b94660ebf50b5c4a4645916d084/sentry_sdk/hub.py#L393-L410
def configure_scope(self, callback=None): # noqa """Reconfigures the scope.""" client, scope = self._stack[-1] if callback is not None: if client is not None: callback(scope) return None @contextmanager def inner(): if client is not None: yield scope else: yield Scope() return inner()
[ "def", "configure_scope", "(", "self", ",", "callback", "=", "None", ")", ":", "# noqa", "client", ",", "scope", "=", "self", ".", "_stack", "[", "-", "1", "]", "if", "callback", "is", "not", "None", ":", "if", "client", "is", "not", "None", ":", "callback", "(", "scope", ")", "return", "None", "@", "contextmanager", "def", "inner", "(", ")", ":", "if", "client", "is", "not", "None", ":", "yield", "scope", "else", ":", "yield", "Scope", "(", ")", "return", "inner", "(", ")" ]
Reconfigures the scope.
[ "Reconfigures", "the", "scope", "." ]
python
train
seperman/deepdiff
deepdiff/diff.py
https://github.com/seperman/deepdiff/blob/a66879190fadc671632f154c1fcb82f5c3cef800/deepdiff/diff.py#L564-L591
def __diff_numbers(self, level): """Diff Numbers""" t1_type = "number" if self.ignore_numeric_type_changes else level.t1.__class__.__name__ t2_type = "number" if self.ignore_numeric_type_changes else level.t2.__class__.__name__ if self.significant_digits is None: if level.t1 != level.t2: self.__report_result('values_changed', level) else: # Bernhard10: I use string formatting for comparison, to be consistent with usecases where # data is read from files that were previousely written from python and # to be consistent with on-screen representation of numbers. # Other options would be abs(t1-t2)<10**-self.significant_digits # or math.is_close (python3.5+) # Note that abs(3.25-3.251) = 0.0009999999999998899 < 0.001 # Note also that "{:.3f}".format(1.1135) = 1.113, but "{:.3f}".format(1.11351) = 1.114 # For Decimals, format seems to round 2.5 to 2 and 3.5 to 4 (to closest even number) t1_s = self.number_to_string(level.t1, significant_digits=self.significant_digits, number_format_notation=self.number_format_notation) t2_s = self.number_to_string(level.t2, significant_digits=self.significant_digits, number_format_notation=self.number_format_notation) t1_s = KEY_TO_VAL_STR.format(t1_type, t1_s) t2_s = KEY_TO_VAL_STR.format(t2_type, t2_s) if t1_s != t2_s: self.__report_result('values_changed', level)
[ "def", "__diff_numbers", "(", "self", ",", "level", ")", ":", "t1_type", "=", "\"number\"", "if", "self", ".", "ignore_numeric_type_changes", "else", "level", ".", "t1", ".", "__class__", ".", "__name__", "t2_type", "=", "\"number\"", "if", "self", ".", "ignore_numeric_type_changes", "else", "level", ".", "t2", ".", "__class__", ".", "__name__", "if", "self", ".", "significant_digits", "is", "None", ":", "if", "level", ".", "t1", "!=", "level", ".", "t2", ":", "self", ".", "__report_result", "(", "'values_changed'", ",", "level", ")", "else", ":", "# Bernhard10: I use string formatting for comparison, to be consistent with usecases where", "# data is read from files that were previousely written from python and", "# to be consistent with on-screen representation of numbers.", "# Other options would be abs(t1-t2)<10**-self.significant_digits", "# or math.is_close (python3.5+)", "# Note that abs(3.25-3.251) = 0.0009999999999998899 < 0.001", "# Note also that \"{:.3f}\".format(1.1135) = 1.113, but \"{:.3f}\".format(1.11351) = 1.114", "# For Decimals, format seems to round 2.5 to 2 and 3.5 to 4 (to closest even number)", "t1_s", "=", "self", ".", "number_to_string", "(", "level", ".", "t1", ",", "significant_digits", "=", "self", ".", "significant_digits", ",", "number_format_notation", "=", "self", ".", "number_format_notation", ")", "t2_s", "=", "self", ".", "number_to_string", "(", "level", ".", "t2", ",", "significant_digits", "=", "self", ".", "significant_digits", ",", "number_format_notation", "=", "self", ".", "number_format_notation", ")", "t1_s", "=", "KEY_TO_VAL_STR", ".", "format", "(", "t1_type", ",", "t1_s", ")", "t2_s", "=", "KEY_TO_VAL_STR", ".", "format", "(", "t2_type", ",", "t2_s", ")", "if", "t1_s", "!=", "t2_s", ":", "self", ".", "__report_result", "(", "'values_changed'", ",", "level", ")" ]
Diff Numbers
[ "Diff", "Numbers" ]
python
train
pipermerriam/flex
flex/core.py
https://github.com/pipermerriam/flex/blob/233f8149fb851a6255753bcec948cb6fefb2723b/flex/core.py#L141-L168
def validate_api_call(schema, raw_request, raw_response): """ Validate the request/response cycle of an api call against a swagger schema. Request/Response objects from the `requests` and `urllib` library are supported. """ request = normalize_request(raw_request) with ErrorDict() as errors: try: validate_request( request=request, schema=schema, ) except ValidationError as err: errors['request'].add_error(err.messages or getattr(err, 'detail')) return response = normalize_response(raw_response, raw_request) try: validate_response( response=response, request_method=request.method, schema=schema ) except ValidationError as err: errors['response'].add_error(err.messages or getattr(err, 'detail'))
[ "def", "validate_api_call", "(", "schema", ",", "raw_request", ",", "raw_response", ")", ":", "request", "=", "normalize_request", "(", "raw_request", ")", "with", "ErrorDict", "(", ")", "as", "errors", ":", "try", ":", "validate_request", "(", "request", "=", "request", ",", "schema", "=", "schema", ",", ")", "except", "ValidationError", "as", "err", ":", "errors", "[", "'request'", "]", ".", "add_error", "(", "err", ".", "messages", "or", "getattr", "(", "err", ",", "'detail'", ")", ")", "return", "response", "=", "normalize_response", "(", "raw_response", ",", "raw_request", ")", "try", ":", "validate_response", "(", "response", "=", "response", ",", "request_method", "=", "request", ".", "method", ",", "schema", "=", "schema", ")", "except", "ValidationError", "as", "err", ":", "errors", "[", "'response'", "]", ".", "add_error", "(", "err", ".", "messages", "or", "getattr", "(", "err", ",", "'detail'", ")", ")" ]
Validate the request/response cycle of an api call against a swagger schema. Request/Response objects from the `requests` and `urllib` library are supported.
[ "Validate", "the", "request", "/", "response", "cycle", "of", "an", "api", "call", "against", "a", "swagger", "schema", ".", "Request", "/", "Response", "objects", "from", "the", "requests", "and", "urllib", "library", "are", "supported", "." ]
python
train
cgarciae/phi
phi/dsl.py
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L640-L644
def Then0(self, f, *args, **kwargs): """ `Then0(f, ...)` is equivalent to `ThenAt(0, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. """ return self.ThenAt(0, f, *args, **kwargs)
[ "def", "Then0", "(", "self", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "ThenAt", "(", "0", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
`Then0(f, ...)` is equivalent to `ThenAt(0, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
[ "Then0", "(", "f", "...", ")", "is", "equivalent", "to", "ThenAt", "(", "0", "f", "...", ")", ".", "Checkout", "phi", ".", "builder", ".", "Builder", ".", "ThenAt", "for", "more", "information", "." ]
python
train
EventTeam/beliefs
src/beliefs/cells/bools.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/bools.py#L44-L49
def is_entailed_by(self, other): """ If the other is as or more specific than self""" other = BoolCell.coerce(other) if self.value == U or other.value == self.value: return True return False
[ "def", "is_entailed_by", "(", "self", ",", "other", ")", ":", "other", "=", "BoolCell", ".", "coerce", "(", "other", ")", "if", "self", ".", "value", "==", "U", "or", "other", ".", "value", "==", "self", ".", "value", ":", "return", "True", "return", "False" ]
If the other is as or more specific than self
[ "If", "the", "other", "is", "as", "or", "more", "specific", "than", "self" ]
python
train
mrstephenneal/mysql-toolkit
mysql/toolkit/components/operations/compare.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/operations/compare.py#L110-L122
def _schema_getter(self, db): """Retrieve a dictionary representing a database's data schema.""" # Change DB connection if needed if self.database != db: self.change_db(db) schema_dict = {tbl: self.get_schema(tbl) for tbl in self.tables} schema_lst = [] for table, schema in schema_dict.items(): for col in schema: col.insert(0, table) schema_lst.append(col) return schema_lst
[ "def", "_schema_getter", "(", "self", ",", "db", ")", ":", "# Change DB connection if needed", "if", "self", ".", "database", "!=", "db", ":", "self", ".", "change_db", "(", "db", ")", "schema_dict", "=", "{", "tbl", ":", "self", ".", "get_schema", "(", "tbl", ")", "for", "tbl", "in", "self", ".", "tables", "}", "schema_lst", "=", "[", "]", "for", "table", ",", "schema", "in", "schema_dict", ".", "items", "(", ")", ":", "for", "col", "in", "schema", ":", "col", ".", "insert", "(", "0", ",", "table", ")", "schema_lst", ".", "append", "(", "col", ")", "return", "schema_lst" ]
Retrieve a dictionary representing a database's data schema.
[ "Retrieve", "a", "dictionary", "representing", "a", "database", "s", "data", "schema", "." ]
python
train
Fizzadar/pyinfra
pyinfra/facts/util/files.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/facts/util/files.py#L57-L71
def _parse_mode(mode): ''' Converts ls mode output (rwxrwxrwx) -> integer (755). ''' result = '' # owner, group, world for group in [mode[0:3], mode[3:6], mode[6:9]]: if group in SYMBOL_TO_OCTAL_PERMISSIONS: result = '{0}{1}'.format(result, SYMBOL_TO_OCTAL_PERMISSIONS[group]) else: result = '{0}0'.format(result) # Return as an integer return int(result)
[ "def", "_parse_mode", "(", "mode", ")", ":", "result", "=", "''", "# owner, group, world", "for", "group", "in", "[", "mode", "[", "0", ":", "3", "]", ",", "mode", "[", "3", ":", "6", "]", ",", "mode", "[", "6", ":", "9", "]", "]", ":", "if", "group", "in", "SYMBOL_TO_OCTAL_PERMISSIONS", ":", "result", "=", "'{0}{1}'", ".", "format", "(", "result", ",", "SYMBOL_TO_OCTAL_PERMISSIONS", "[", "group", "]", ")", "else", ":", "result", "=", "'{0}0'", ".", "format", "(", "result", ")", "# Return as an integer", "return", "int", "(", "result", ")" ]
Converts ls mode output (rwxrwxrwx) -> integer (755).
[ "Converts", "ls", "mode", "output", "(", "rwxrwxrwx", ")", "-", ">", "integer", "(", "755", ")", "." ]
python
train
Erotemic/ubelt
ubelt/util_hash.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_hash.py#L181-L223
def _rectify_hasher(hasher): """ Convert a string-based key into a hasher class Notes: In terms of speed on 64bit systems, sha1 is the fastest followed by md5 and sha512. The slowest algorithm is sha256. If xxhash is installed the fastest algorithm is xxh64. Example: >>> assert _rectify_hasher(NoParam) is DEFAULT_HASHER >>> assert _rectify_hasher('sha1') is hashlib.sha1 >>> assert _rectify_hasher('sha256') is hashlib.sha256 >>> assert _rectify_hasher('sha512') is hashlib.sha512 >>> assert _rectify_hasher('md5') is hashlib.md5 >>> assert _rectify_hasher(hashlib.sha1) is hashlib.sha1 >>> assert _rectify_hasher(hashlib.sha1())().name == 'sha1' >>> import pytest >>> assert pytest.raises(KeyError, _rectify_hasher, '42') >>> #assert pytest.raises(TypeError, _rectify_hasher, object) >>> if xxhash: >>> assert _rectify_hasher('xxh64') is xxhash.xxh64 >>> assert _rectify_hasher('xxh32') is xxhash.xxh32 """ if xxhash is not None: # pragma: nobranch if hasher in {'xxh32', 'xx32', 'xxhash'}: return xxhash.xxh32 if hasher in {'xxh64', 'xx64'}: return xxhash.xxh64 if hasher is NoParam or hasher == 'default': hasher = DEFAULT_HASHER elif isinstance(hasher, six.string_types): if hasher not in hashlib.algorithms_available: raise KeyError('unknown hasher: {}'.format(hasher)) else: hasher = getattr(hashlib, hasher) elif isinstance(hasher, HASH): # by default the result of this function is a class we will make an # instance of, if we already have an instance, wrap it in a callable # so the external syntax does not need to change. return lambda: hasher return hasher
[ "def", "_rectify_hasher", "(", "hasher", ")", ":", "if", "xxhash", "is", "not", "None", ":", "# pragma: nobranch", "if", "hasher", "in", "{", "'xxh32'", ",", "'xx32'", ",", "'xxhash'", "}", ":", "return", "xxhash", ".", "xxh32", "if", "hasher", "in", "{", "'xxh64'", ",", "'xx64'", "}", ":", "return", "xxhash", ".", "xxh64", "if", "hasher", "is", "NoParam", "or", "hasher", "==", "'default'", ":", "hasher", "=", "DEFAULT_HASHER", "elif", "isinstance", "(", "hasher", ",", "six", ".", "string_types", ")", ":", "if", "hasher", "not", "in", "hashlib", ".", "algorithms_available", ":", "raise", "KeyError", "(", "'unknown hasher: {}'", ".", "format", "(", "hasher", ")", ")", "else", ":", "hasher", "=", "getattr", "(", "hashlib", ",", "hasher", ")", "elif", "isinstance", "(", "hasher", ",", "HASH", ")", ":", "# by default the result of this function is a class we will make an", "# instance of, if we already have an instance, wrap it in a callable", "# so the external syntax does not need to change.", "return", "lambda", ":", "hasher", "return", "hasher" ]
Convert a string-based key into a hasher class Notes: In terms of speed on 64bit systems, sha1 is the fastest followed by md5 and sha512. The slowest algorithm is sha256. If xxhash is installed the fastest algorithm is xxh64. Example: >>> assert _rectify_hasher(NoParam) is DEFAULT_HASHER >>> assert _rectify_hasher('sha1') is hashlib.sha1 >>> assert _rectify_hasher('sha256') is hashlib.sha256 >>> assert _rectify_hasher('sha512') is hashlib.sha512 >>> assert _rectify_hasher('md5') is hashlib.md5 >>> assert _rectify_hasher(hashlib.sha1) is hashlib.sha1 >>> assert _rectify_hasher(hashlib.sha1())().name == 'sha1' >>> import pytest >>> assert pytest.raises(KeyError, _rectify_hasher, '42') >>> #assert pytest.raises(TypeError, _rectify_hasher, object) >>> if xxhash: >>> assert _rectify_hasher('xxh64') is xxhash.xxh64 >>> assert _rectify_hasher('xxh32') is xxhash.xxh32
[ "Convert", "a", "string", "-", "based", "key", "into", "a", "hasher", "class" ]
python
valid
aws/sagemaker-containers
src/sagemaker_containers/_mapping.py
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_mapping.py#L144-L151
def properties(self): # type: () -> list """ Returns: (list[str]) List of public properties """ _type = type(self) return [_property for _property in dir(_type) if self._is_property(_property)]
[ "def", "properties", "(", "self", ")", ":", "# type: () -> list", "_type", "=", "type", "(", "self", ")", "return", "[", "_property", "for", "_property", "in", "dir", "(", "_type", ")", "if", "self", ".", "_is_property", "(", "_property", ")", "]" ]
Returns: (list[str]) List of public properties
[ "Returns", ":", "(", "list", "[", "str", "]", ")", "List", "of", "public", "properties" ]
python
train
ababic/django-cogwheels
cogwheels/helpers/settings.py
https://github.com/ababic/django-cogwheels/blob/f185245f6ea1d6a2c23b94ff7304c1594049ca56/cogwheels/helpers/settings.py#L133-L155
def _load_defaults(self): """ Called by ``__init__()`` to create a dictionary of the relevant values from the associated defaults module, and save it to the object's ``_defaults`` attribute to improve lookup performance. Only variables with upper-case names are included. :raises: ImportError It is assumed that the defaults module is defined in the same directory as ``settings.py`` where the settings helper class is defined. But, in cases where this differs, developers can specify an alternative import path using the ``defaults_path`` class attribute for their helper class. """ self._defaults_module_path = self.defaults_path or \ '.'.join(self.__module_path_split[:-1]) + ".defaults" module = self._do_import(self._defaults_module_path) self._defaults = { k: v for k, v in module.__dict__.items() if k.isupper() }
[ "def", "_load_defaults", "(", "self", ")", ":", "self", ".", "_defaults_module_path", "=", "self", ".", "defaults_path", "or", "'.'", ".", "join", "(", "self", ".", "__module_path_split", "[", ":", "-", "1", "]", ")", "+", "\".defaults\"", "module", "=", "self", ".", "_do_import", "(", "self", ".", "_defaults_module_path", ")", "self", ".", "_defaults", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "module", ".", "__dict__", ".", "items", "(", ")", "if", "k", ".", "isupper", "(", ")", "}" ]
Called by ``__init__()`` to create a dictionary of the relevant values from the associated defaults module, and save it to the object's ``_defaults`` attribute to improve lookup performance. Only variables with upper-case names are included. :raises: ImportError It is assumed that the defaults module is defined in the same directory as ``settings.py`` where the settings helper class is defined. But, in cases where this differs, developers can specify an alternative import path using the ``defaults_path`` class attribute for their helper class.
[ "Called", "by", "__init__", "()", "to", "create", "a", "dictionary", "of", "the", "relevant", "values", "from", "the", "associated", "defaults", "module", "and", "save", "it", "to", "the", "object", "s", "_defaults", "attribute", "to", "improve", "lookup", "performance", ".", "Only", "variables", "with", "upper", "-", "case", "names", "are", "included", "." ]
python
train
googledatalab/pydatalab
solutionbox/image_classification/mltoolbox/image/classification/_model.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L348-L360
def format_metric_values(self, metric_values): """Formats metric values - used for logging purpose.""" # Early in training, metric_values may actually be None. loss_str = 'N/A' accuracy_str = 'N/A' try: loss_str = 'loss: %.3f' % metric_values[0] accuracy_str = 'accuracy: %.3f' % metric_values[1] except (TypeError, IndexError): pass return '%s, %s' % (loss_str, accuracy_str)
[ "def", "format_metric_values", "(", "self", ",", "metric_values", ")", ":", "# Early in training, metric_values may actually be None.", "loss_str", "=", "'N/A'", "accuracy_str", "=", "'N/A'", "try", ":", "loss_str", "=", "'loss: %.3f'", "%", "metric_values", "[", "0", "]", "accuracy_str", "=", "'accuracy: %.3f'", "%", "metric_values", "[", "1", "]", "except", "(", "TypeError", ",", "IndexError", ")", ":", "pass", "return", "'%s, %s'", "%", "(", "loss_str", ",", "accuracy_str", ")" ]
Formats metric values - used for logging purpose.
[ "Formats", "metric", "values", "-", "used", "for", "logging", "purpose", "." ]
python
train
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L45-L104
def _add_countriesdata(cls, iso3, country): # type: (str, hxl.Row) -> None """ Set up countries data from data in form provided by UNStats and World Bank Args: iso3 (str): ISO3 code for country country (hxl.Row): Country information Returns: None """ countryname = country.get('#country+name+preferred') cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3 iso2 = country.get('#country+code+v_iso2') if iso2: cls._countriesdata['iso2iso3'][iso2] = iso3 # different types so keys won't clash cls._countriesdata['iso2iso3'][iso3] = iso2 m49 = country.get('#country+code+num+v_m49') if m49: m49 = int(m49) cls._countriesdata['m49iso3'][m49] = iso3 # different types so keys won't clash cls._countriesdata['m49iso3'][iso3] = m49 cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE) regionname = country.get('#region+main+name+preferred') sub_regionname = country.get('#region+sub+name+preferred') intermediate_regionname = country.get('#region+intermediate+name+preferred') regionid = country.get('#region+main+code') if regionid: regionid = int(regionid) sub_regionid = country.get('#region+sub+code') if sub_regionid: sub_regionid = int(sub_regionid) intermediate_regionid = country.get('#region+intermediate+code') if intermediate_regionid: intermediate_regionid = int(intermediate_regionid) # region, subregion and intermediate region codes do not clash so only need one dict def add_country_to_set(colname, idval, iso3): value = cls._countriesdata[colname].get(idval) if value is None: value = set() cls._countriesdata['regioncodes2countries'][idval] = value value.add(iso3) if regionname: add_country_to_set('regioncodes2countries', regionid, iso3) cls._countriesdata['regioncodes2names'][regionid] = regionname cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid if sub_regionname: add_country_to_set('regioncodes2countries', sub_regionid, iso3) cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid if intermediate_regionname: add_country_to_set('regioncodes2countries', intermediate_regionid, iso3) cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \ intermediate_regionid
[ "def", "_add_countriesdata", "(", "cls", ",", "iso3", ",", "country", ")", ":", "# type: (str, hxl.Row) -> None", "countryname", "=", "country", ".", "get", "(", "'#country+name+preferred'", ")", "cls", ".", "_countriesdata", "[", "'countrynames2iso3'", "]", "[", "countryname", ".", "upper", "(", ")", "]", "=", "iso3", "iso2", "=", "country", ".", "get", "(", "'#country+code+v_iso2'", ")", "if", "iso2", ":", "cls", ".", "_countriesdata", "[", "'iso2iso3'", "]", "[", "iso2", "]", "=", "iso3", "# different types so keys won't clash", "cls", ".", "_countriesdata", "[", "'iso2iso3'", "]", "[", "iso3", "]", "=", "iso2", "m49", "=", "country", ".", "get", "(", "'#country+code+num+v_m49'", ")", "if", "m49", ":", "m49", "=", "int", "(", "m49", ")", "cls", ".", "_countriesdata", "[", "'m49iso3'", "]", "[", "m49", "]", "=", "iso3", "# different types so keys won't clash", "cls", ".", "_countriesdata", "[", "'m49iso3'", "]", "[", "iso3", "]", "=", "m49", "cls", ".", "_countriesdata", "[", "'aliases'", "]", "[", "iso3", "]", "=", "re", ".", "compile", "(", "country", ".", "get", "(", "'#country+regex'", ")", ",", "re", ".", "IGNORECASE", ")", "regionname", "=", "country", ".", "get", "(", "'#region+main+name+preferred'", ")", "sub_regionname", "=", "country", ".", "get", "(", "'#region+sub+name+preferred'", ")", "intermediate_regionname", "=", "country", ".", "get", "(", "'#region+intermediate+name+preferred'", ")", "regionid", "=", "country", ".", "get", "(", "'#region+main+code'", ")", "if", "regionid", ":", "regionid", "=", "int", "(", "regionid", ")", "sub_regionid", "=", "country", ".", "get", "(", "'#region+sub+code'", ")", "if", "sub_regionid", ":", "sub_regionid", "=", "int", "(", "sub_regionid", ")", "intermediate_regionid", "=", "country", ".", "get", "(", "'#region+intermediate+code'", ")", "if", "intermediate_regionid", ":", "intermediate_regionid", "=", "int", "(", "intermediate_regionid", ")", "# region, subregion and intermediate region codes do not clash so only need one dict", "def", "add_country_to_set", "(", "colname", ",", "idval", ",", "iso3", ")", ":", "value", "=", "cls", ".", "_countriesdata", "[", "colname", "]", ".", "get", "(", "idval", ")", "if", "value", "is", "None", ":", "value", "=", "set", "(", ")", "cls", ".", "_countriesdata", "[", "'regioncodes2countries'", "]", "[", "idval", "]", "=", "value", "value", ".", "add", "(", "iso3", ")", "if", "regionname", ":", "add_country_to_set", "(", "'regioncodes2countries'", ",", "regionid", ",", "iso3", ")", "cls", ".", "_countriesdata", "[", "'regioncodes2names'", "]", "[", "regionid", "]", "=", "regionname", "cls", ".", "_countriesdata", "[", "'regionnames2codes'", "]", "[", "regionname", ".", "upper", "(", ")", "]", "=", "regionid", "if", "sub_regionname", ":", "add_country_to_set", "(", "'regioncodes2countries'", ",", "sub_regionid", ",", "iso3", ")", "cls", ".", "_countriesdata", "[", "'regioncodes2names'", "]", "[", "sub_regionid", "]", "=", "sub_regionname", "cls", ".", "_countriesdata", "[", "'regionnames2codes'", "]", "[", "sub_regionname", ".", "upper", "(", ")", "]", "=", "sub_regionid", "if", "intermediate_regionname", ":", "add_country_to_set", "(", "'regioncodes2countries'", ",", "intermediate_regionid", ",", "iso3", ")", "cls", ".", "_countriesdata", "[", "'regioncodes2names'", "]", "[", "intermediate_regionid", "]", "=", "intermediate_regionname", "cls", ".", "_countriesdata", "[", "'regionnames2codes'", "]", "[", "intermediate_regionname", ".", "upper", "(", ")", "]", "=", "intermediate_regionid" ]
Set up countries data from data in form provided by UNStats and World Bank Args: iso3 (str): ISO3 code for country country (hxl.Row): Country information Returns: None
[ "Set", "up", "countries", "data", "from", "data", "in", "form", "provided", "by", "UNStats", "and", "World", "Bank" ]
python
train
saltstack/salt
salt/proxy/esxcluster.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/esxcluster.py#L281-L302
def find_credentials(): ''' Cycle through all the possible credentials and return the first one that works. ''' # if the username and password were already found don't fo though the # connection process again if 'username' in DETAILS and 'password' in DETAILS: return DETAILS['username'], DETAILS['password'] passwords = DETAILS['passwords'] for password in passwords: DETAILS['password'] = password if not __salt__['vsphere.test_vcenter_connection'](): # We are unable to authenticate continue # If we have data returned from above, we've successfully authenticated. return DETAILS['username'], password # We've reached the end of the list without successfully authenticating. raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' 'incorrect credentials.')
[ "def", "find_credentials", "(", ")", ":", "# if the username and password were already found don't fo though the", "# connection process again", "if", "'username'", "in", "DETAILS", "and", "'password'", "in", "DETAILS", ":", "return", "DETAILS", "[", "'username'", "]", ",", "DETAILS", "[", "'password'", "]", "passwords", "=", "DETAILS", "[", "'passwords'", "]", "for", "password", "in", "passwords", ":", "DETAILS", "[", "'password'", "]", "=", "password", "if", "not", "__salt__", "[", "'vsphere.test_vcenter_connection'", "]", "(", ")", ":", "# We are unable to authenticate", "continue", "# If we have data returned from above, we've successfully authenticated.", "return", "DETAILS", "[", "'username'", "]", ",", "password", "# We've reached the end of the list without successfully authenticating.", "raise", "salt", ".", "exceptions", ".", "VMwareConnectionError", "(", "'Cannot complete login due to '", "'incorrect credentials.'", ")" ]
Cycle through all the possible credentials and return the first one that works.
[ "Cycle", "through", "all", "the", "possible", "credentials", "and", "return", "the", "first", "one", "that", "works", "." ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/wonambi.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/wonambi.py#L120-L168
def write_wonambi(data, filename, subj_id='', dtype='float64'): """Write file in simple Wonambi format. Parameters ---------- data : instance of ChanTime data with only one trial filename : path to file file to export to (the extensions .won and .dat will be added) subj_id : str subject id dtype : str numpy dtype in which you want to save the data Notes ----- Wonambi format creates two files, one .won with the dataset info as json file and one .dat with the memmap recordings. It will happily overwrite any existing file with the same name. Memory-mapped matrices are column-major, Fortran-style, to be compatible with Matlab. """ filename = Path(filename) json_file = filename.with_suffix('.won') memmap_file = filename.with_suffix('.dat') start_time = data.start_time + timedelta(seconds=data.axis['time'][0][0]) start_time_str = start_time.strftime('%Y-%m-%d %H:%M:%S.%f') dataset = {'subj_id': subj_id, 'start_time': start_time_str, 's_freq': data.s_freq, 'chan_name': list(data.axis['chan'][0]), 'n_samples': int(data.number_of('time')[0]), 'dtype': dtype, } with json_file.open('w') as f: dump(dataset, f, sort_keys=True, indent=4) memshape = (len(dataset['chan_name']), dataset['n_samples']) mem = memmap(str(memmap_file), dtype, mode='w+', shape=memshape, order='F') mem[:, :] = data.data[0] mem.flush()
[ "def", "write_wonambi", "(", "data", ",", "filename", ",", "subj_id", "=", "''", ",", "dtype", "=", "'float64'", ")", ":", "filename", "=", "Path", "(", "filename", ")", "json_file", "=", "filename", ".", "with_suffix", "(", "'.won'", ")", "memmap_file", "=", "filename", ".", "with_suffix", "(", "'.dat'", ")", "start_time", "=", "data", ".", "start_time", "+", "timedelta", "(", "seconds", "=", "data", ".", "axis", "[", "'time'", "]", "[", "0", "]", "[", "0", "]", ")", "start_time_str", "=", "start_time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S.%f'", ")", "dataset", "=", "{", "'subj_id'", ":", "subj_id", ",", "'start_time'", ":", "start_time_str", ",", "'s_freq'", ":", "data", ".", "s_freq", ",", "'chan_name'", ":", "list", "(", "data", ".", "axis", "[", "'chan'", "]", "[", "0", "]", ")", ",", "'n_samples'", ":", "int", "(", "data", ".", "number_of", "(", "'time'", ")", "[", "0", "]", ")", ",", "'dtype'", ":", "dtype", ",", "}", "with", "json_file", ".", "open", "(", "'w'", ")", "as", "f", ":", "dump", "(", "dataset", ",", "f", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", "memshape", "=", "(", "len", "(", "dataset", "[", "'chan_name'", "]", ")", ",", "dataset", "[", "'n_samples'", "]", ")", "mem", "=", "memmap", "(", "str", "(", "memmap_file", ")", ",", "dtype", ",", "mode", "=", "'w+'", ",", "shape", "=", "memshape", ",", "order", "=", "'F'", ")", "mem", "[", ":", ",", ":", "]", "=", "data", ".", "data", "[", "0", "]", "mem", ".", "flush", "(", ")" ]
Write file in simple Wonambi format. Parameters ---------- data : instance of ChanTime data with only one trial filename : path to file file to export to (the extensions .won and .dat will be added) subj_id : str subject id dtype : str numpy dtype in which you want to save the data Notes ----- Wonambi format creates two files, one .won with the dataset info as json file and one .dat with the memmap recordings. It will happily overwrite any existing file with the same name. Memory-mapped matrices are column-major, Fortran-style, to be compatible with Matlab.
[ "Write", "file", "in", "simple", "Wonambi", "format", "." ]
python
train
gnosis/gnosis-py
gnosis/eth/ethereum_client.py
https://github.com/gnosis/gnosis-py/blob/2a9a5d75a375fc9813ac04df133e6910c82f9d49/gnosis/eth/ethereum_client.py#L518-L543
def send_eth_to(self, private_key: str, to: str, gas_price: int, value: int, gas: int=22000, retry: bool = False, block_identifier=None, max_eth_to_send: int = 0) -> bytes: """ Send ether using configured account :param to: to :param gas_price: gas_price :param value: value(wei) :param gas: gas, defaults to 22000 :param retry: Retry if a problem is found :param block_identifier: None default, 'pending' not confirmed txs :return: tx_hash """ assert check_checksum(to) if max_eth_to_send and value > self.w3.toWei(max_eth_to_send, 'ether'): raise EtherLimitExceeded('%d is bigger than %f' % (value, max_eth_to_send)) tx = { 'to': to, 'value': value, 'gas': gas, 'gasPrice': gas_price, } return self.send_unsigned_transaction(tx, private_key=private_key, retry=retry, block_identifier=block_identifier)
[ "def", "send_eth_to", "(", "self", ",", "private_key", ":", "str", ",", "to", ":", "str", ",", "gas_price", ":", "int", ",", "value", ":", "int", ",", "gas", ":", "int", "=", "22000", ",", "retry", ":", "bool", "=", "False", ",", "block_identifier", "=", "None", ",", "max_eth_to_send", ":", "int", "=", "0", ")", "->", "bytes", ":", "assert", "check_checksum", "(", "to", ")", "if", "max_eth_to_send", "and", "value", ">", "self", ".", "w3", ".", "toWei", "(", "max_eth_to_send", ",", "'ether'", ")", ":", "raise", "EtherLimitExceeded", "(", "'%d is bigger than %f'", "%", "(", "value", ",", "max_eth_to_send", ")", ")", "tx", "=", "{", "'to'", ":", "to", ",", "'value'", ":", "value", ",", "'gas'", ":", "gas", ",", "'gasPrice'", ":", "gas_price", ",", "}", "return", "self", ".", "send_unsigned_transaction", "(", "tx", ",", "private_key", "=", "private_key", ",", "retry", "=", "retry", ",", "block_identifier", "=", "block_identifier", ")" ]
Send ether using configured account :param to: to :param gas_price: gas_price :param value: value(wei) :param gas: gas, defaults to 22000 :param retry: Retry if a problem is found :param block_identifier: None default, 'pending' not confirmed txs :return: tx_hash
[ "Send", "ether", "using", "configured", "account", ":", "param", "to", ":", "to", ":", "param", "gas_price", ":", "gas_price", ":", "param", "value", ":", "value", "(", "wei", ")", ":", "param", "gas", ":", "gas", "defaults", "to", "22000", ":", "param", "retry", ":", "Retry", "if", "a", "problem", "is", "found", ":", "param", "block_identifier", ":", "None", "default", "pending", "not", "confirmed", "txs", ":", "return", ":", "tx_hash" ]
python
test
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/color/color_space.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/color/color_space.py#L25-L36
def _hex_to_rgba(hexs): """Convert hex to rgba, permitting alpha values in hex""" hexs = np.atleast_1d(np.array(hexs, '|U9')) out = np.ones((len(hexs), 4), np.float32) for hi, h in enumerate(hexs): assert isinstance(h, string_types) off = 1 if h[0] == '#' else 0 assert len(h) in (6+off, 8+off) e = (len(h)-off) // 2 out[hi, :e] = [int(h[i:i+2], 16) / 255. for i in range(off, len(h), 2)] return out
[ "def", "_hex_to_rgba", "(", "hexs", ")", ":", "hexs", "=", "np", ".", "atleast_1d", "(", "np", ".", "array", "(", "hexs", ",", "'|U9'", ")", ")", "out", "=", "np", ".", "ones", "(", "(", "len", "(", "hexs", ")", ",", "4", ")", ",", "np", ".", "float32", ")", "for", "hi", ",", "h", "in", "enumerate", "(", "hexs", ")", ":", "assert", "isinstance", "(", "h", ",", "string_types", ")", "off", "=", "1", "if", "h", "[", "0", "]", "==", "'#'", "else", "0", "assert", "len", "(", "h", ")", "in", "(", "6", "+", "off", ",", "8", "+", "off", ")", "e", "=", "(", "len", "(", "h", ")", "-", "off", ")", "//", "2", "out", "[", "hi", ",", ":", "e", "]", "=", "[", "int", "(", "h", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "/", "255.", "for", "i", "in", "range", "(", "off", ",", "len", "(", "h", ")", ",", "2", ")", "]", "return", "out" ]
Convert hex to rgba, permitting alpha values in hex
[ "Convert", "hex", "to", "rgba", "permitting", "alpha", "values", "in", "hex" ]
python
train
spyder-ide/spyder
spyder/plugins/console/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/plugin.py#L252-L276
def run_script(self, filename=None, silent=False, set_focus=False, args=None): """Run a Python script""" if filename is None: self.shell.interpreter.restore_stds() filename, _selfilter = getopenfilename( self, _("Run Python script"), getcwd_or_home(), _("Python scripts")+" (*.py ; *.pyw ; *.ipy)") self.shell.interpreter.redirect_stds() if filename: os.chdir( osp.dirname(filename) ) filename = osp.basename(filename) else: return logger.debug("Running script with %s", args) filename = osp.abspath(filename) rbs = remove_backslashes command = "runfile('%s', args='%s')" % (rbs(filename), rbs(args)) if set_focus: self.shell.setFocus() if self.dockwidget and not self.ismaximized: self.dockwidget.setVisible(True) self.dockwidget.raise_() self.shell.write(command+'\n') self.shell.run_command(command)
[ "def", "run_script", "(", "self", ",", "filename", "=", "None", ",", "silent", "=", "False", ",", "set_focus", "=", "False", ",", "args", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "self", ".", "shell", ".", "interpreter", ".", "restore_stds", "(", ")", "filename", ",", "_selfilter", "=", "getopenfilename", "(", "self", ",", "_", "(", "\"Run Python script\"", ")", ",", "getcwd_or_home", "(", ")", ",", "_", "(", "\"Python scripts\"", ")", "+", "\" (*.py ; *.pyw ; *.ipy)\"", ")", "self", ".", "shell", ".", "interpreter", ".", "redirect_stds", "(", ")", "if", "filename", ":", "os", ".", "chdir", "(", "osp", ".", "dirname", "(", "filename", ")", ")", "filename", "=", "osp", ".", "basename", "(", "filename", ")", "else", ":", "return", "logger", ".", "debug", "(", "\"Running script with %s\"", ",", "args", ")", "filename", "=", "osp", ".", "abspath", "(", "filename", ")", "rbs", "=", "remove_backslashes", "command", "=", "\"runfile('%s', args='%s')\"", "%", "(", "rbs", "(", "filename", ")", ",", "rbs", "(", "args", ")", ")", "if", "set_focus", ":", "self", ".", "shell", ".", "setFocus", "(", ")", "if", "self", ".", "dockwidget", "and", "not", "self", ".", "ismaximized", ":", "self", ".", "dockwidget", ".", "setVisible", "(", "True", ")", "self", ".", "dockwidget", ".", "raise_", "(", ")", "self", ".", "shell", ".", "write", "(", "command", "+", "'\\n'", ")", "self", ".", "shell", ".", "run_command", "(", "command", ")" ]
Run a Python script
[ "Run", "a", "Python", "script" ]
python
train
click-contrib/click-configfile
tasks/_vendor/pathlib.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/tasks/_vendor/pathlib.py#L1222-L1233
def is_block_device(self): """ Whether this path is a block device. """ try: return S_ISBLK(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False
[ "def", "is_block_device", "(", "self", ")", ":", "try", ":", "return", "S_ISBLK", "(", "self", ".", "stat", "(", ")", ".", "st_mode", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "!=", "ENOENT", ":", "raise", "# Path doesn't exist or is a broken symlink", "# (see https://bitbucket.org/pitrou/pathlib/issue/12/)", "return", "False" ]
Whether this path is a block device.
[ "Whether", "this", "path", "is", "a", "block", "device", "." ]
python
train
metagriffin/fso
fso/filesystemoverlay.py
https://github.com/metagriffin/fso/blob/c37701fbfdfde359a2044eb9420abe569a7b35e4/fso/filesystemoverlay.py#L465-L472
def fso_rmdir(self, path): 'overlays os.rmdir()' st = self.fso_lstat(path) if not stat.S_ISDIR(st.st_mode): raise OSError(20, 'Not a directory', path) if len(self.fso_listdir(path)) > 0: raise OSError(39, 'Directory not empty', path) self._addentry(OverlayEntry(self, path, None))
[ "def", "fso_rmdir", "(", "self", ",", "path", ")", ":", "st", "=", "self", ".", "fso_lstat", "(", "path", ")", "if", "not", "stat", ".", "S_ISDIR", "(", "st", ".", "st_mode", ")", ":", "raise", "OSError", "(", "20", ",", "'Not a directory'", ",", "path", ")", "if", "len", "(", "self", ".", "fso_listdir", "(", "path", ")", ")", ">", "0", ":", "raise", "OSError", "(", "39", ",", "'Directory not empty'", ",", "path", ")", "self", ".", "_addentry", "(", "OverlayEntry", "(", "self", ",", "path", ",", "None", ")", ")" ]
overlays os.rmdir()
[ "overlays", "os", ".", "rmdir", "()" ]
python
valid
theonion/django-bulbs
bulbs/content/custom_search.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/custom_search.py#L162-L203
def groups_filter_from_query(query, field_map={}): """Creates an F object for the groups of a search query.""" f = None # filter groups for group in query.get("groups", []): group_f = MatchAll() for condition in group.get("conditions", []): field_name = condition["field"] field_name = field_map.get(field_name, field_name) operation = condition["type"] values = condition["values"] if values: values = [v["value"] for v in values] if operation == "all": # NOTE: is there a better way to express this? for value in values: if "." in field_name: path = field_name.split(".")[0] group_f &= Nested(path=path, filter=Term(**{field_name: value})) else: group_f &= Term(**{field_name: value}) elif operation == "any": if "." in field_name: path = field_name.split(".")[0] group_f &= Nested(path=path, filter=Terms(**{field_name: values})) else: group_f &= Terms(**{field_name: values}) elif operation == "none": if "." in field_name: path = field_name.split(".")[0] group_f &= ~Nested(path=path, filter=Terms(**{field_name: values})) else: group_f &= ~Terms(**{field_name: values}) date_range = group.get("time") if date_range: group_f &= date_range_filter(date_range) if f: f |= group_f else: f = group_f return f
[ "def", "groups_filter_from_query", "(", "query", ",", "field_map", "=", "{", "}", ")", ":", "f", "=", "None", "# filter groups", "for", "group", "in", "query", ".", "get", "(", "\"groups\"", ",", "[", "]", ")", ":", "group_f", "=", "MatchAll", "(", ")", "for", "condition", "in", "group", ".", "get", "(", "\"conditions\"", ",", "[", "]", ")", ":", "field_name", "=", "condition", "[", "\"field\"", "]", "field_name", "=", "field_map", ".", "get", "(", "field_name", ",", "field_name", ")", "operation", "=", "condition", "[", "\"type\"", "]", "values", "=", "condition", "[", "\"values\"", "]", "if", "values", ":", "values", "=", "[", "v", "[", "\"value\"", "]", "for", "v", "in", "values", "]", "if", "operation", "==", "\"all\"", ":", "# NOTE: is there a better way to express this?", "for", "value", "in", "values", ":", "if", "\".\"", "in", "field_name", ":", "path", "=", "field_name", ".", "split", "(", "\".\"", ")", "[", "0", "]", "group_f", "&=", "Nested", "(", "path", "=", "path", ",", "filter", "=", "Term", "(", "*", "*", "{", "field_name", ":", "value", "}", ")", ")", "else", ":", "group_f", "&=", "Term", "(", "*", "*", "{", "field_name", ":", "value", "}", ")", "elif", "operation", "==", "\"any\"", ":", "if", "\".\"", "in", "field_name", ":", "path", "=", "field_name", ".", "split", "(", "\".\"", ")", "[", "0", "]", "group_f", "&=", "Nested", "(", "path", "=", "path", ",", "filter", "=", "Terms", "(", "*", "*", "{", "field_name", ":", "values", "}", ")", ")", "else", ":", "group_f", "&=", "Terms", "(", "*", "*", "{", "field_name", ":", "values", "}", ")", "elif", "operation", "==", "\"none\"", ":", "if", "\".\"", "in", "field_name", ":", "path", "=", "field_name", ".", "split", "(", "\".\"", ")", "[", "0", "]", "group_f", "&=", "~", "Nested", "(", "path", "=", "path", ",", "filter", "=", "Terms", "(", "*", "*", "{", "field_name", ":", "values", "}", ")", ")", "else", ":", "group_f", "&=", "~", "Terms", "(", "*", "*", "{", "field_name", ":", "values", "}", ")", "date_range", "=", "group", ".", "get", "(", "\"time\"", ")", "if", "date_range", ":", "group_f", "&=", "date_range_filter", "(", "date_range", ")", "if", "f", ":", "f", "|=", "group_f", "else", ":", "f", "=", "group_f", "return", "f" ]
Creates an F object for the groups of a search query.
[ "Creates", "an", "F", "object", "for", "the", "groups", "of", "a", "search", "query", "." ]
python
train
Jaymon/prom
prom/cli/generate.py
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/cli/generate.py#L12-L28
def get_table_info(*table_names): """Returns a dict with table_name keys mapped to the Interface that table exists in :param *table_names: the tables you are searching for """ ret = {} if table_names: for table_name in table_names: for name, inter in get_interfaces().items(): if inter.has_table(table_name): yield table_name, inter, inter.get_fields(table_name) else: for name, inter in get_interfaces().items(): table_names = inter.get_tables() for table_name in table_names: yield table_name, inter, inter.get_fields(table_name)
[ "def", "get_table_info", "(", "*", "table_names", ")", ":", "ret", "=", "{", "}", "if", "table_names", ":", "for", "table_name", "in", "table_names", ":", "for", "name", ",", "inter", "in", "get_interfaces", "(", ")", ".", "items", "(", ")", ":", "if", "inter", ".", "has_table", "(", "table_name", ")", ":", "yield", "table_name", ",", "inter", ",", "inter", ".", "get_fields", "(", "table_name", ")", "else", ":", "for", "name", ",", "inter", "in", "get_interfaces", "(", ")", ".", "items", "(", ")", ":", "table_names", "=", "inter", ".", "get_tables", "(", ")", "for", "table_name", "in", "table_names", ":", "yield", "table_name", ",", "inter", ",", "inter", ".", "get_fields", "(", "table_name", ")" ]
Returns a dict with table_name keys mapped to the Interface that table exists in :param *table_names: the tables you are searching for
[ "Returns", "a", "dict", "with", "table_name", "keys", "mapped", "to", "the", "Interface", "that", "table", "exists", "in" ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L5739-L5826
def initialize(self, modulo=None): """reset logger, overwrite original files, `modulo`: log only every modulo call""" if modulo is not None: self.modulo = modulo try: es = self.es # must have been registered except AttributeError: pass # TODO: revise usage of es... that this can pass raise _Error('call register() before initialize()') self.counter = 0 # number of calls of add self.last_iteration = 0 # some lines are only written if iteration>last_iteration # write headers for output fn = self.name_prefix + 'fit.dat' strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime()) try: with open(fn, 'w') as f: f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' + 'bestever, best, median, worst objective function value, ' + 'further objective values of best", ' + strseedtime + # strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do '\n') except (IOError, OSError): print('could not open file ' + fn) fn = self.name_prefix + 'axlen.dat' try: with open(fn, 'w') as f: f.write('% columns="iteration, evaluation, sigma, ' + 'max axis length, ' + ' min axis length, all principle axes lengths ' + ' (sorted square roots of eigenvalues of C)", ' + strseedtime + '\n') except (IOError, OSError): print('could not open/write file ' + fn) fn = self.name_prefix + 'axlencorr.dat' try: with open(fn, 'w') as f: f.write('% columns="iteration, evaluation, min max(neg(.)) min(pos(.))' + ' max correlation, correlation matrix principle axes lengths ' + ' (sorted square roots of eigenvalues of correlation matrix)", ' + strseedtime + '\n') except (IOError, OSError): print('could not open file ' + fn) fn = self.name_prefix + 'stddev.dat' try: with open(fn, 'w') as f: f.write('% # columns=["iteration, evaluation, sigma, void, void, ' + ' stds==sigma*sqrt(diag(C))", ' + strseedtime + '\n') except (IOError, OSError): print('could not open file ' + fn) fn = self.name_prefix + 'xmean.dat' try: with open(fn, 'w') as f: f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' + strseedtime) f.write(' # scaling_of_variables: ') if np.size(es.gp.scales) > 1: f.write(' '.join(map(str, es.gp.scales))) else: f.write(str(es.gp.scales)) f.write(', typical_x: ') if np.size(es.gp.typical_x) > 1: f.write(' '.join(map(str, es.gp.typical_x))) else: f.write(str(es.gp.typical_x)) f.write('\n') except (IOError, OSError): print('could not open/write file ' + fn) fn = self.name_prefix + 'xrecentbest.dat' try: with open(fn, 'w') as f: f.write('% # iter+eval+sigma+0+fitness+xbest, ' + strseedtime + '\n') except (IOError, OSError): print('could not open/write file ' + fn) return self
[ "def", "initialize", "(", "self", ",", "modulo", "=", "None", ")", ":", "if", "modulo", "is", "not", "None", ":", "self", ".", "modulo", "=", "modulo", "try", ":", "es", "=", "self", ".", "es", "# must have been registered", "except", "AttributeError", ":", "pass", "# TODO: revise usage of es... that this can pass", "raise", "_Error", "(", "'call register() before initialize()'", ")", "self", ".", "counter", "=", "0", "# number of calls of add", "self", ".", "last_iteration", "=", "0", "# some lines are only written if iteration>last_iteration", "# write headers for output", "fn", "=", "self", ".", "name_prefix", "+", "'fit.dat'", "strseedtime", "=", "'seed=%d, %s'", "%", "(", "es", ".", "opts", "[", "'seed'", "]", ",", "time", ".", "asctime", "(", ")", ")", "try", ":", "with", "open", "(", "fn", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'% # columns=\"iteration, evaluation, sigma, axis ratio, '", "+", "'bestever, best, median, worst objective function value, '", "+", "'further objective values of best\", '", "+", "strseedtime", "+", "# strftime(\"%Y/%m/%d %H:%M:%S\", localtime()) + # just asctime() would do", "'\\n'", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "print", "(", "'could not open file '", "+", "fn", ")", "fn", "=", "self", ".", "name_prefix", "+", "'axlen.dat'", "try", ":", "with", "open", "(", "fn", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'% columns=\"iteration, evaluation, sigma, '", "+", "'max axis length, '", "+", "' min axis length, all principle axes lengths '", "+", "' (sorted square roots of eigenvalues of C)\", '", "+", "strseedtime", "+", "'\\n'", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "print", "(", "'could not open/write file '", "+", "fn", ")", "fn", "=", "self", ".", "name_prefix", "+", "'axlencorr.dat'", "try", ":", "with", "open", "(", "fn", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'% columns=\"iteration, evaluation, min max(neg(.)) min(pos(.))'", "+", "' max correlation, correlation matrix principle axes lengths '", "+", "' (sorted square roots of eigenvalues of correlation matrix)\", '", "+", "strseedtime", "+", "'\\n'", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "print", "(", "'could not open file '", "+", "fn", ")", "fn", "=", "self", ".", "name_prefix", "+", "'stddev.dat'", "try", ":", "with", "open", "(", "fn", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'% # columns=[\"iteration, evaluation, sigma, void, void, '", "+", "' stds==sigma*sqrt(diag(C))\", '", "+", "strseedtime", "+", "'\\n'", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "print", "(", "'could not open file '", "+", "fn", ")", "fn", "=", "self", ".", "name_prefix", "+", "'xmean.dat'", "try", ":", "with", "open", "(", "fn", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'% # columns=\"iteration, evaluation, void, void, void, xmean\", '", "+", "strseedtime", ")", "f", ".", "write", "(", "' # scaling_of_variables: '", ")", "if", "np", ".", "size", "(", "es", ".", "gp", ".", "scales", ")", ">", "1", ":", "f", ".", "write", "(", "' '", ".", "join", "(", "map", "(", "str", ",", "es", ".", "gp", ".", "scales", ")", ")", ")", "else", ":", "f", ".", "write", "(", "str", "(", "es", ".", "gp", ".", "scales", ")", ")", "f", ".", "write", "(", "', typical_x: '", ")", "if", "np", ".", "size", "(", "es", ".", "gp", ".", "typical_x", ")", ">", "1", ":", "f", ".", "write", "(", "' '", ".", "join", "(", "map", "(", "str", ",", "es", ".", "gp", ".", "typical_x", ")", ")", ")", "else", ":", "f", ".", "write", "(", "str", "(", "es", ".", "gp", ".", "typical_x", ")", ")", "f", ".", "write", "(", "'\\n'", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "print", "(", "'could not open/write file '", "+", "fn", ")", "fn", "=", "self", ".", "name_prefix", "+", "'xrecentbest.dat'", "try", ":", "with", "open", "(", "fn", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'% # iter+eval+sigma+0+fitness+xbest, '", "+", "strseedtime", "+", "'\\n'", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "print", "(", "'could not open/write file '", "+", "fn", ")", "return", "self" ]
reset logger, overwrite original files, `modulo`: log only every modulo call
[ "reset", "logger", "overwrite", "original", "files", "modulo", ":", "log", "only", "every", "modulo", "call" ]
python
train
fhcrc/nestly
nestly/scripts/nestrun.py
https://github.com/fhcrc/nestly/blob/4d7818b5950f405d2067a6b8577d5afb7527c9ff/nestly/scripts/nestrun.py#L186-L192
def complete(self, return_code): """ Mark the process as complete with provided return_code """ self.return_code = return_code self.status = 'COMPLETE' if not return_code else 'FAILED' self.end_time = datetime.datetime.now()
[ "def", "complete", "(", "self", ",", "return_code", ")", ":", "self", ".", "return_code", "=", "return_code", "self", ".", "status", "=", "'COMPLETE'", "if", "not", "return_code", "else", "'FAILED'", "self", ".", "end_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Mark the process as complete with provided return_code
[ "Mark", "the", "process", "as", "complete", "with", "provided", "return_code" ]
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L1659-L1665
def update_payload(self, fields=None): """Wrap submitted data within an extra dict.""" payload = super(JobTemplate, self).update_payload(fields) effective_user = payload.pop(u'effective_user', None) if effective_user: payload[u'ssh'] = {u'effective_user': effective_user} return {u'job_template': payload}
[ "def", "update_payload", "(", "self", ",", "fields", "=", "None", ")", ":", "payload", "=", "super", "(", "JobTemplate", ",", "self", ")", ".", "update_payload", "(", "fields", ")", "effective_user", "=", "payload", ".", "pop", "(", "u'effective_user'", ",", "None", ")", "if", "effective_user", ":", "payload", "[", "u'ssh'", "]", "=", "{", "u'effective_user'", ":", "effective_user", "}", "return", "{", "u'job_template'", ":", "payload", "}" ]
Wrap submitted data within an extra dict.
[ "Wrap", "submitted", "data", "within", "an", "extra", "dict", "." ]
python
train
pyQode/pyqode.core
pyqode/core/api/code_edit.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/api/code_edit.py#L890-L898
def copy(self): """ Copy the selected text to the clipboard. If no text was selected, the entire line is copied (this feature can be turned off by setting :attr:`select_line_on_copy_empty` to False. """ if self.select_line_on_copy_empty and not self.textCursor().hasSelection(): TextHelper(self).select_whole_line() super(CodeEdit, self).copy()
[ "def", "copy", "(", "self", ")", ":", "if", "self", ".", "select_line_on_copy_empty", "and", "not", "self", ".", "textCursor", "(", ")", ".", "hasSelection", "(", ")", ":", "TextHelper", "(", "self", ")", ".", "select_whole_line", "(", ")", "super", "(", "CodeEdit", ",", "self", ")", ".", "copy", "(", ")" ]
Copy the selected text to the clipboard. If no text was selected, the entire line is copied (this feature can be turned off by setting :attr:`select_line_on_copy_empty` to False.
[ "Copy", "the", "selected", "text", "to", "the", "clipboard", ".", "If", "no", "text", "was", "selected", "the", "entire", "line", "is", "copied", "(", "this", "feature", "can", "be", "turned", "off", "by", "setting", ":", "attr", ":", "select_line_on_copy_empty", "to", "False", "." ]
python
train
summa-tx/riemann
riemann/encoding/base58.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/base58.py#L32-L39
def encode(data, checksum=True): """Convert binary to base58 using BASE58_ALPHABET.""" if checksum: data = data + utils.hash256(data)[:4] v, prefix = to_long(256, lambda x: x, iter(data)) data = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v]) return data.decode("utf8")
[ "def", "encode", "(", "data", ",", "checksum", "=", "True", ")", ":", "if", "checksum", ":", "data", "=", "data", "+", "utils", ".", "hash256", "(", "data", ")", "[", ":", "4", "]", "v", ",", "prefix", "=", "to_long", "(", "256", ",", "lambda", "x", ":", "x", ",", "iter", "(", "data", ")", ")", "data", "=", "from_long", "(", "v", ",", "prefix", ",", "BASE58_BASE", ",", "lambda", "v", ":", "BASE58_ALPHABET", "[", "v", "]", ")", "return", "data", ".", "decode", "(", "\"utf8\"", ")" ]
Convert binary to base58 using BASE58_ALPHABET.
[ "Convert", "binary", "to", "base58", "using", "BASE58_ALPHABET", "." ]
python
train
pgmpy/pgmpy
pgmpy/models/BayesianModel.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/BayesianModel.py#L511-L567
def predict(self, data): """ Predicts states of all the missing variables. Parameters ---------- data : pandas DataFrame object A DataFrame object with column names same as the variables in the model. Examples -------- >>> import numpy as np >>> import pandas as pd >>> from pgmpy.models import BayesianModel >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> train_data = values[:800] >>> predict_data = values[800:] >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> predict_data = predict_data.copy() >>> predict_data.drop('E', axis=1, inplace=True) >>> y_pred = model.predict(predict_data) >>> y_pred E 800 0 801 1 802 1 803 1 804 0 ... ... 993 0 994 0 995 1 996 1 997 0 998 0 999 0 """ from pgmpy.inference import VariableElimination if set(data.columns) == set(self.nodes()): raise ValueError("No variable missing in data. Nothing to predict") elif set(data.columns) - set(self.nodes()): raise ValueError("Data has variables which are not in the model") missing_variables = set(self.nodes()) - set(data.columns) pred_values = defaultdict(list) # Send state_names dict from one of the estimated CPDs to the inference class. model_inference = VariableElimination(self, state_names=self.get_cpds()[0].state_names) for index, data_point in data.iterrows(): states_dict = model_inference.map_query(variables=missing_variables, evidence=data_point.to_dict()) for k, v in states_dict.items(): pred_values[k].append(v) return pd.DataFrame(pred_values, index=data.index)
[ "def", "predict", "(", "self", ",", "data", ")", ":", "from", "pgmpy", ".", "inference", "import", "VariableElimination", "if", "set", "(", "data", ".", "columns", ")", "==", "set", "(", "self", ".", "nodes", "(", ")", ")", ":", "raise", "ValueError", "(", "\"No variable missing in data. Nothing to predict\"", ")", "elif", "set", "(", "data", ".", "columns", ")", "-", "set", "(", "self", ".", "nodes", "(", ")", ")", ":", "raise", "ValueError", "(", "\"Data has variables which are not in the model\"", ")", "missing_variables", "=", "set", "(", "self", ".", "nodes", "(", ")", ")", "-", "set", "(", "data", ".", "columns", ")", "pred_values", "=", "defaultdict", "(", "list", ")", "# Send state_names dict from one of the estimated CPDs to the inference class.", "model_inference", "=", "VariableElimination", "(", "self", ",", "state_names", "=", "self", ".", "get_cpds", "(", ")", "[", "0", "]", ".", "state_names", ")", "for", "index", ",", "data_point", "in", "data", ".", "iterrows", "(", ")", ":", "states_dict", "=", "model_inference", ".", "map_query", "(", "variables", "=", "missing_variables", ",", "evidence", "=", "data_point", ".", "to_dict", "(", ")", ")", "for", "k", ",", "v", "in", "states_dict", ".", "items", "(", ")", ":", "pred_values", "[", "k", "]", ".", "append", "(", "v", ")", "return", "pd", ".", "DataFrame", "(", "pred_values", ",", "index", "=", "data", ".", "index", ")" ]
Predicts states of all the missing variables. Parameters ---------- data : pandas DataFrame object A DataFrame object with column names same as the variables in the model. Examples -------- >>> import numpy as np >>> import pandas as pd >>> from pgmpy.models import BayesianModel >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> train_data = values[:800] >>> predict_data = values[800:] >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> predict_data = predict_data.copy() >>> predict_data.drop('E', axis=1, inplace=True) >>> y_pred = model.predict(predict_data) >>> y_pred E 800 0 801 1 802 1 803 1 804 0 ... ... 993 0 994 0 995 1 996 1 997 0 998 0 999 0
[ "Predicts", "states", "of", "all", "the", "missing", "variables", "." ]
python
train
tumblr/pytumblr
pytumblr/__init__.py
https://github.com/tumblr/pytumblr/blob/4a5cd7c4b8ae78d12811d9fd52620afa1692a415/pytumblr/__init__.py#L114-L134
def posts(self, blogname, type=None, **kwargs): """ Gets a list of posts from a particular blog :param blogname: a string, the blogname you want to look up posts for. eg: codingjester.tumblr.com :param id: an int, the id of the post you are looking for on the blog :param tag: a string, the tag you are looking for on posts :param limit: an int, the number of results you want :param offset: an int, the offset of the posts you want to start at. :param before: an int, the timestamp for posts you want before. :param filter: the post format you want returned: HTML, text or raw. :param type: the type of posts you want returned, e.g. video. If omitted returns all post types. :returns: a dict created from the JSON response """ if type is None: url = '/v2/blog/{}/posts'.format(blogname) else: url = '/v2/blog/{}/posts/{}'.format(blogname, type) return self.send_api_request("get", url, kwargs, ['id', 'tag', 'limit', 'offset', 'before', 'reblog_info', 'notes_info', 'filter', 'api_key'], True)
[ "def", "posts", "(", "self", ",", "blogname", ",", "type", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "type", "is", "None", ":", "url", "=", "'/v2/blog/{}/posts'", ".", "format", "(", "blogname", ")", "else", ":", "url", "=", "'/v2/blog/{}/posts/{}'", ".", "format", "(", "blogname", ",", "type", ")", "return", "self", ".", "send_api_request", "(", "\"get\"", ",", "url", ",", "kwargs", ",", "[", "'id'", ",", "'tag'", ",", "'limit'", ",", "'offset'", ",", "'before'", ",", "'reblog_info'", ",", "'notes_info'", ",", "'filter'", ",", "'api_key'", "]", ",", "True", ")" ]
Gets a list of posts from a particular blog :param blogname: a string, the blogname you want to look up posts for. eg: codingjester.tumblr.com :param id: an int, the id of the post you are looking for on the blog :param tag: a string, the tag you are looking for on posts :param limit: an int, the number of results you want :param offset: an int, the offset of the posts you want to start at. :param before: an int, the timestamp for posts you want before. :param filter: the post format you want returned: HTML, text or raw. :param type: the type of posts you want returned, e.g. video. If omitted returns all post types. :returns: a dict created from the JSON response
[ "Gets", "a", "list", "of", "posts", "from", "a", "particular", "blog" ]
python
train
RedisJSON/rejson-py
rejson/client.py
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L106-L123
def jsonget(self, name, *args): """ Get the object stored as a JSON value at key ``name`` ``args`` is zero or more paths, and defaults to root path """ pieces = [name] if len(args) == 0: pieces.append(Path.rootPath()) else: for p in args: pieces.append(str_path(p)) # Handle case where key doesn't exist. The JSONDecoder would raise a # TypeError exception since it can't decode None try: return self.execute_command('JSON.GET', *pieces) except TypeError: return None
[ "def", "jsonget", "(", "self", ",", "name", ",", "*", "args", ")", ":", "pieces", "=", "[", "name", "]", "if", "len", "(", "args", ")", "==", "0", ":", "pieces", ".", "append", "(", "Path", ".", "rootPath", "(", ")", ")", "else", ":", "for", "p", "in", "args", ":", "pieces", ".", "append", "(", "str_path", "(", "p", ")", ")", "# Handle case where key doesn't exist. The JSONDecoder would raise a", "# TypeError exception since it can't decode None", "try", ":", "return", "self", ".", "execute_command", "(", "'JSON.GET'", ",", "*", "pieces", ")", "except", "TypeError", ":", "return", "None" ]
Get the object stored as a JSON value at key ``name`` ``args`` is zero or more paths, and defaults to root path
[ "Get", "the", "object", "stored", "as", "a", "JSON", "value", "at", "key", "name", "args", "is", "zero", "or", "more", "paths", "and", "defaults", "to", "root", "path" ]
python
train
hawkular/hawkular-client-python
hawkular/alerts/triggers.py
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L212-L222
def group_members(self, group_id, include_orphans=False): """ Find all group member trigger definitions :param group_id: group trigger id :param include_orphans: If True, include orphan members :return: list of asociated group members as trigger objects """ params = {'includeOrphans': str(include_orphans).lower()} url = self._service_url(['triggers', 'groups', group_id, 'members'], params=params) return Trigger.list_to_object_list(self._get(url))
[ "def", "group_members", "(", "self", ",", "group_id", ",", "include_orphans", "=", "False", ")", ":", "params", "=", "{", "'includeOrphans'", ":", "str", "(", "include_orphans", ")", ".", "lower", "(", ")", "}", "url", "=", "self", ".", "_service_url", "(", "[", "'triggers'", ",", "'groups'", ",", "group_id", ",", "'members'", "]", ",", "params", "=", "params", ")", "return", "Trigger", ".", "list_to_object_list", "(", "self", ".", "_get", "(", "url", ")", ")" ]
Find all group member trigger definitions :param group_id: group trigger id :param include_orphans: If True, include orphan members :return: list of asociated group members as trigger objects
[ "Find", "all", "group", "member", "trigger", "definitions" ]
python
train
spdx/tools-python
spdx/parsers/tagvaluebuilders.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvaluebuilders.py#L907-L924
def set_concluded_license(self, doc, lic): """ Raises OrderError if no package or file defined. Raises CardinalityError if already set. Raises SPDXValueError if malformed. """ if self.has_package(doc) and self.has_file(doc): if not self.file_conc_lics_set: self.file_conc_lics_set = True if validations.validate_lics_conc(lic): self.file(doc).conc_lics = lic return True else: raise SPDXValueError('File::ConcludedLicense') else: raise CardinalityError('File::ConcludedLicense') else: raise OrderError('File::ConcludedLicense')
[ "def", "set_concluded_license", "(", "self", ",", "doc", ",", "lic", ")", ":", "if", "self", ".", "has_package", "(", "doc", ")", "and", "self", ".", "has_file", "(", "doc", ")", ":", "if", "not", "self", ".", "file_conc_lics_set", ":", "self", ".", "file_conc_lics_set", "=", "True", "if", "validations", ".", "validate_lics_conc", "(", "lic", ")", ":", "self", ".", "file", "(", "doc", ")", ".", "conc_lics", "=", "lic", "return", "True", "else", ":", "raise", "SPDXValueError", "(", "'File::ConcludedLicense'", ")", "else", ":", "raise", "CardinalityError", "(", "'File::ConcludedLicense'", ")", "else", ":", "raise", "OrderError", "(", "'File::ConcludedLicense'", ")" ]
Raises OrderError if no package or file defined. Raises CardinalityError if already set. Raises SPDXValueError if malformed.
[ "Raises", "OrderError", "if", "no", "package", "or", "file", "defined", ".", "Raises", "CardinalityError", "if", "already", "set", ".", "Raises", "SPDXValueError", "if", "malformed", "." ]
python
valid
totalgood/nlpia
src/nlpia/web.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/web.py#L224-L252
def download_file_from_google_drive(driveid, filename=None, destination=os.path.curdir): """ Download script for google drive shared links Thank you @turdus-merula and Andrew Hundt! https://stackoverflow.com/a/39225039/623735 """ if '&id=' in driveid: # https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs # dailymail_stories.tgz driveid = driveid.split('&id=')[-1] if '?id=' in driveid: # 'https://drive.google.com/open?id=14mELuzm0OvXnwjb0mzAiG-Ake9_NP_LQ' # SSD pretrainined keras model driveid = driveid.split('?id=')[-1] URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': driveid}, stream=True) token = get_response_confirmation_token(response) if token: params = {'id': driveid, 'confirm': token} response = session.get(URL, params=params, stream=True) filename = filename or get_url_filename(driveid=driveid) full_destination_path = save_response_content(response, filename=fileanme, destination=destination) return os.path.abspath(destination)
[ "def", "download_file_from_google_drive", "(", "driveid", ",", "filename", "=", "None", ",", "destination", "=", "os", ".", "path", ".", "curdir", ")", ":", "if", "'&id='", "in", "driveid", ":", "# https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs # dailymail_stories.tgz", "driveid", "=", "driveid", ".", "split", "(", "'&id='", ")", "[", "-", "1", "]", "if", "'?id='", "in", "driveid", ":", "# 'https://drive.google.com/open?id=14mELuzm0OvXnwjb0mzAiG-Ake9_NP_LQ' # SSD pretrainined keras model", "driveid", "=", "driveid", ".", "split", "(", "'?id='", ")", "[", "-", "1", "]", "URL", "=", "\"https://docs.google.com/uc?export=download\"", "session", "=", "requests", ".", "Session", "(", ")", "response", "=", "session", ".", "get", "(", "URL", ",", "params", "=", "{", "'id'", ":", "driveid", "}", ",", "stream", "=", "True", ")", "token", "=", "get_response_confirmation_token", "(", "response", ")", "if", "token", ":", "params", "=", "{", "'id'", ":", "driveid", ",", "'confirm'", ":", "token", "}", "response", "=", "session", ".", "get", "(", "URL", ",", "params", "=", "params", ",", "stream", "=", "True", ")", "filename", "=", "filename", "or", "get_url_filename", "(", "driveid", "=", "driveid", ")", "full_destination_path", "=", "save_response_content", "(", "response", ",", "filename", "=", "fileanme", ",", "destination", "=", "destination", ")", "return", "os", ".", "path", ".", "abspath", "(", "destination", ")" ]
Download script for google drive shared links Thank you @turdus-merula and Andrew Hundt! https://stackoverflow.com/a/39225039/623735
[ "Download", "script", "for", "google", "drive", "shared", "links" ]
python
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L20-L38
def proposal_metrics(iou): """ Add summaries for RPN proposals. Args: iou: nxm, #proposal x #gt """ # find best roi for each gt, for summary only best_iou = tf.reduce_max(iou, axis=0) mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt') summaries = [mean_best_iou] with tf.device('/cpu:0'): for th in [0.3, 0.5]: recall = tf.truediv( tf.count_nonzero(best_iou >= th), tf.size(best_iou, out_type=tf.int64), name='recall_iou{}'.format(th)) summaries.append(recall) add_moving_summary(*summaries)
[ "def", "proposal_metrics", "(", "iou", ")", ":", "# find best roi for each gt, for summary only", "best_iou", "=", "tf", ".", "reduce_max", "(", "iou", ",", "axis", "=", "0", ")", "mean_best_iou", "=", "tf", ".", "reduce_mean", "(", "best_iou", ",", "name", "=", "'best_iou_per_gt'", ")", "summaries", "=", "[", "mean_best_iou", "]", "with", "tf", ".", "device", "(", "'/cpu:0'", ")", ":", "for", "th", "in", "[", "0.3", ",", "0.5", "]", ":", "recall", "=", "tf", ".", "truediv", "(", "tf", ".", "count_nonzero", "(", "best_iou", ">=", "th", ")", ",", "tf", ".", "size", "(", "best_iou", ",", "out_type", "=", "tf", ".", "int64", ")", ",", "name", "=", "'recall_iou{}'", ".", "format", "(", "th", ")", ")", "summaries", ".", "append", "(", "recall", ")", "add_moving_summary", "(", "*", "summaries", ")" ]
Add summaries for RPN proposals. Args: iou: nxm, #proposal x #gt
[ "Add", "summaries", "for", "RPN", "proposals", "." ]
python
train
sdispater/orator
orator/schema/blueprint.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/schema/blueprint.py#L61-L71
def _add_implied_commands(self): """ Add the commands that are implied by the blueprint. """ if len(self.get_added_columns()) and not self._creating(): self._commands.insert(0, self._create_command("add")) if len(self.get_changed_columns()) and not self._creating(): self._commands.insert(0, self._create_command("change")) return self._add_fluent_indexes()
[ "def", "_add_implied_commands", "(", "self", ")", ":", "if", "len", "(", "self", ".", "get_added_columns", "(", ")", ")", "and", "not", "self", ".", "_creating", "(", ")", ":", "self", ".", "_commands", ".", "insert", "(", "0", ",", "self", ".", "_create_command", "(", "\"add\"", ")", ")", "if", "len", "(", "self", ".", "get_changed_columns", "(", ")", ")", "and", "not", "self", ".", "_creating", "(", ")", ":", "self", ".", "_commands", ".", "insert", "(", "0", ",", "self", ".", "_create_command", "(", "\"change\"", ")", ")", "return", "self", ".", "_add_fluent_indexes", "(", ")" ]
Add the commands that are implied by the blueprint.
[ "Add", "the", "commands", "that", "are", "implied", "by", "the", "blueprint", "." ]
python
train
bokeh/bokeh
bokeh/embed/standalone.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/embed/standalone.py#L111-L248
def components(models, wrap_script=True, wrap_plot_info=True, theme=FromCurdoc): ''' Return HTML components to embed a Bokeh plot. The data for the plot is stored directly in the returned HTML. An example can be found in examples/embed/embed_multiple.py The returned components assume that BokehJS resources are **already loaded**. The html template in which they will be embedded needs to include the following links and scripts tags. The widgets and tables resources are only necessary if the components make use of widgets and tables. .. code-block:: html <link href="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.css" rel="stylesheet" type="text/css"> <link href="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.css" rel="stylesheet" type="text/css"> <link href="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.css" rel="stylesheet" type="text/css"> <script src="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.js"></script> <script src="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.js"></script> <script src="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.js"></script> Note that in Jupyter Notebooks, it is not possible to use components and show in the same notebook cell. Args: models (Model|list|dict|tuple) : A single Model, a list/tuple of Models, or a dictionary of keys and Models. wrap_script (boolean, optional) : If True, the returned javascript is wrapped in a script tag. (default: True) wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings. Otherwise, return dicts that can be used to build your own divs. (default: True) If False, the returned dictionary contains the following information: .. code-block:: python { 'modelid': 'The model ID, used with Document.get_model_by_id', 'elementid': 'The css identifier the BokehJS will look for to target the plot', 'docid': 'Used by Bokeh to find the doc embedded in the returned script', } theme (Theme, optional) : Defaults to the ``Theme`` instance in the current document. Setting this to ``None`` uses the default theme or the theme already specified in the document. Any other value must be an instance of the ``Theme`` class. Returns: UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])* Examples: With default wrapping parameter values: .. code-block:: python components(plot) # => (script, plot_div) components((plot1, plot2)) # => (script, (plot1_div, plot2_div)) components({"Plot 1": plot1, "Plot 2": plot2}) # => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div}) Examples: With wrapping parameters set to ``False``: .. code-block:: python components(plot, wrap_script=False, wrap_plot_info=False) # => (javascript, plot_dict) components((plot1, plot2), wrap_script=False, wrap_plot_info=False) # => (javascript, (plot1_dict, plot2_dict)) components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False) # => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict}) ''' # 1) Convert single items and dicts into list was_single_object = isinstance(models, Model) or isinstance(models, Document) models = _check_models_or_docs(models) # now convert dict to list, saving keys in the same order model_keys = None dict_type = None if isinstance(models, dict): model_keys = models.keys() dict_type = models.__class__ values = [] # don't just use .values() to ensure we are in the same order as key list for k in model_keys: values.append(models[k]) models = values # 2) Append models to one document. Either pre-existing or new and render with OutputDocumentFor(models, apply_theme=theme): (docs_json, [render_item]) = standalone_docs_json_and_render_items(models) script = bundle_all_models() or "" script += script_for_render_items(docs_json, [render_item]) if wrap_script: script = wrap_in_script_tag(script) script = encode_utf8(script) def div_for_root(root): return ROOT_DIV.render(root=root, macros=MACROS) if wrap_plot_info: results = list(div_for_root(root) for root in render_item.roots) else: results = render_item.roots # 3) convert back to the input shape if was_single_object: result = results[0] elif model_keys is not None: result = dict_type(zip(model_keys, results)) else: result = tuple(results) return script, result
[ "def", "components", "(", "models", ",", "wrap_script", "=", "True", ",", "wrap_plot_info", "=", "True", ",", "theme", "=", "FromCurdoc", ")", ":", "# 1) Convert single items and dicts into list", "was_single_object", "=", "isinstance", "(", "models", ",", "Model", ")", "or", "isinstance", "(", "models", ",", "Document", ")", "models", "=", "_check_models_or_docs", "(", "models", ")", "# now convert dict to list, saving keys in the same order", "model_keys", "=", "None", "dict_type", "=", "None", "if", "isinstance", "(", "models", ",", "dict", ")", ":", "model_keys", "=", "models", ".", "keys", "(", ")", "dict_type", "=", "models", ".", "__class__", "values", "=", "[", "]", "# don't just use .values() to ensure we are in the same order as key list", "for", "k", "in", "model_keys", ":", "values", ".", "append", "(", "models", "[", "k", "]", ")", "models", "=", "values", "# 2) Append models to one document. Either pre-existing or new and render", "with", "OutputDocumentFor", "(", "models", ",", "apply_theme", "=", "theme", ")", ":", "(", "docs_json", ",", "[", "render_item", "]", ")", "=", "standalone_docs_json_and_render_items", "(", "models", ")", "script", "=", "bundle_all_models", "(", ")", "or", "\"\"", "script", "+=", "script_for_render_items", "(", "docs_json", ",", "[", "render_item", "]", ")", "if", "wrap_script", ":", "script", "=", "wrap_in_script_tag", "(", "script", ")", "script", "=", "encode_utf8", "(", "script", ")", "def", "div_for_root", "(", "root", ")", ":", "return", "ROOT_DIV", ".", "render", "(", "root", "=", "root", ",", "macros", "=", "MACROS", ")", "if", "wrap_plot_info", ":", "results", "=", "list", "(", "div_for_root", "(", "root", ")", "for", "root", "in", "render_item", ".", "roots", ")", "else", ":", "results", "=", "render_item", ".", "roots", "# 3) convert back to the input shape", "if", "was_single_object", ":", "result", "=", "results", "[", "0", "]", "elif", "model_keys", "is", "not", "None", ":", "result", "=", "dict_type", "(", "zip", "(", "model_keys", ",", "results", ")", ")", "else", ":", "result", "=", "tuple", "(", "results", ")", "return", "script", ",", "result" ]
Return HTML components to embed a Bokeh plot. The data for the plot is stored directly in the returned HTML. An example can be found in examples/embed/embed_multiple.py The returned components assume that BokehJS resources are **already loaded**. The html template in which they will be embedded needs to include the following links and scripts tags. The widgets and tables resources are only necessary if the components make use of widgets and tables. .. code-block:: html <link href="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.css" rel="stylesheet" type="text/css"> <link href="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.css" rel="stylesheet" type="text/css"> <link href="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.css" rel="stylesheet" type="text/css"> <script src="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.js"></script> <script src="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.js"></script> <script src="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.js"></script> Note that in Jupyter Notebooks, it is not possible to use components and show in the same notebook cell. Args: models (Model|list|dict|tuple) : A single Model, a list/tuple of Models, or a dictionary of keys and Models. wrap_script (boolean, optional) : If True, the returned javascript is wrapped in a script tag. (default: True) wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings. Otherwise, return dicts that can be used to build your own divs. (default: True) If False, the returned dictionary contains the following information: .. code-block:: python { 'modelid': 'The model ID, used with Document.get_model_by_id', 'elementid': 'The css identifier the BokehJS will look for to target the plot', 'docid': 'Used by Bokeh to find the doc embedded in the returned script', } theme (Theme, optional) : Defaults to the ``Theme`` instance in the current document. Setting this to ``None`` uses the default theme or the theme already specified in the document. Any other value must be an instance of the ``Theme`` class. Returns: UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])* Examples: With default wrapping parameter values: .. code-block:: python components(plot) # => (script, plot_div) components((plot1, plot2)) # => (script, (plot1_div, plot2_div)) components({"Plot 1": plot1, "Plot 2": plot2}) # => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div}) Examples: With wrapping parameters set to ``False``: .. code-block:: python components(plot, wrap_script=False, wrap_plot_info=False) # => (javascript, plot_dict) components((plot1, plot2), wrap_script=False, wrap_plot_info=False) # => (javascript, (plot1_dict, plot2_dict)) components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False) # => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict})
[ "Return", "HTML", "components", "to", "embed", "a", "Bokeh", "plot", ".", "The", "data", "for", "the", "plot", "is", "stored", "directly", "in", "the", "returned", "HTML", "." ]
python
train
mvcisback/py-aiger
aiger/common.py
https://github.com/mvcisback/py-aiger/blob/475ae75bd19a54ac5a71aa4dadb8369a009a1627/aiger/common.py#L126-L133
def _ite(test: str, in1: str, in0: str, output: str = None): r"test -> in1 /\ ~test -> in0" assert len({test, in0, in1}) == 3 true_out = bit_flipper([test]) >> or_gate([test, in1], 'true_out') false_out = or_gate([test, in0], 'false_out') return (true_out | false_out) >> and_gate(['true_out', 'false_out'], output)
[ "def", "_ite", "(", "test", ":", "str", ",", "in1", ":", "str", ",", "in0", ":", "str", ",", "output", ":", "str", "=", "None", ")", ":", "assert", "len", "(", "{", "test", ",", "in0", ",", "in1", "}", ")", "==", "3", "true_out", "=", "bit_flipper", "(", "[", "test", "]", ")", ">>", "or_gate", "(", "[", "test", ",", "in1", "]", ",", "'true_out'", ")", "false_out", "=", "or_gate", "(", "[", "test", ",", "in0", "]", ",", "'false_out'", ")", "return", "(", "true_out", "|", "false_out", ")", ">>", "and_gate", "(", "[", "'true_out'", ",", "'false_out'", "]", ",", "output", ")" ]
r"test -> in1 /\ ~test -> in0
[ "r", "test", "-", ">", "in1", "/", "\\", "~test", "-", ">", "in0" ]
python
train
shopkick/flawless
flawless/client/client.py
https://github.com/shopkick/flawless/blob/c54b63ca1991c153e6f75080536f6df445aacc64/flawless/client/client.py#L173-L228
def record_error(hostname, exc_info, preceding_stack=None, error_threshold=None, additional_info=None): ''' Helper function to record errors to the flawless backend ''' stack = [] exc_type, exc_value, sys_traceback = exc_info while sys_traceback is not None: stack.append(sys_traceback) sys_traceback = sys_traceback.tb_next stack_lines = [] for row in preceding_stack or []: stack_lines.append( api_ttypes.StackLine(filename=os.path.abspath(row[0]), line_number=row[1], function_name=row[2], text=row[3]) ) for index, tb in enumerate(stack): filename = tb.tb_frame.f_code.co_filename func_name = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno line = linecache.getline(filename, lineno, tb.tb_frame.f_globals) frame_locals = None if index >= (len(stack) - NUM_FRAMES_TO_SAVE): # Include some limits on max string length & number of variables to keep things from getting # out of hand frame_locals = dict((k, _myrepr(k, v)) for k, v in list(tb.tb_frame.f_locals.items())[:MAX_LOCALS] if k != "self") if "self" in tb.tb_frame.f_locals and hasattr(tb.tb_frame.f_locals["self"], "__dict__"): frame_locals.update(dict(("self." + k, _myrepr(k, v)) for k, v in list(tb.tb_frame.f_locals["self"].__dict__.items())[:MAX_LOCALS] if k != "self")) stack_lines.append( api_ttypes.StackLine(filename=os.path.abspath(filename), line_number=lineno, function_name=func_name, text=line, frame_locals=frame_locals) ) # Check LRU cache & potentially do not send error report if this client has already reported this error # several times. key = CachedErrorInfo.get_hash_key(stack_lines) info = ERROR_CACHE.get(key) or CachedErrorInfo() info.increment() ERROR_CACHE[key] = info if info.should_report(): error_count = info.mark_reported() _send_request( api_ttypes.RecordErrorRequest( traceback=stack_lines, exception_message=repr(exc_value), exception_type=exc_type.__module__ + "." + exc_type.__name__, hostname=hostname, error_threshold=error_threshold, additional_info=additional_info, error_count=error_count, ) )
[ "def", "record_error", "(", "hostname", ",", "exc_info", ",", "preceding_stack", "=", "None", ",", "error_threshold", "=", "None", ",", "additional_info", "=", "None", ")", ":", "stack", "=", "[", "]", "exc_type", ",", "exc_value", ",", "sys_traceback", "=", "exc_info", "while", "sys_traceback", "is", "not", "None", ":", "stack", ".", "append", "(", "sys_traceback", ")", "sys_traceback", "=", "sys_traceback", ".", "tb_next", "stack_lines", "=", "[", "]", "for", "row", "in", "preceding_stack", "or", "[", "]", ":", "stack_lines", ".", "append", "(", "api_ttypes", ".", "StackLine", "(", "filename", "=", "os", ".", "path", ".", "abspath", "(", "row", "[", "0", "]", ")", ",", "line_number", "=", "row", "[", "1", "]", ",", "function_name", "=", "row", "[", "2", "]", ",", "text", "=", "row", "[", "3", "]", ")", ")", "for", "index", ",", "tb", "in", "enumerate", "(", "stack", ")", ":", "filename", "=", "tb", ".", "tb_frame", ".", "f_code", ".", "co_filename", "func_name", "=", "tb", ".", "tb_frame", ".", "f_code", ".", "co_name", "lineno", "=", "tb", ".", "tb_lineno", "line", "=", "linecache", ".", "getline", "(", "filename", ",", "lineno", ",", "tb", ".", "tb_frame", ".", "f_globals", ")", "frame_locals", "=", "None", "if", "index", ">=", "(", "len", "(", "stack", ")", "-", "NUM_FRAMES_TO_SAVE", ")", ":", "# Include some limits on max string length & number of variables to keep things from getting", "# out of hand", "frame_locals", "=", "dict", "(", "(", "k", ",", "_myrepr", "(", "k", ",", "v", ")", ")", "for", "k", ",", "v", "in", "list", "(", "tb", ".", "tb_frame", ".", "f_locals", ".", "items", "(", ")", ")", "[", ":", "MAX_LOCALS", "]", "if", "k", "!=", "\"self\"", ")", "if", "\"self\"", "in", "tb", ".", "tb_frame", ".", "f_locals", "and", "hasattr", "(", "tb", ".", "tb_frame", ".", "f_locals", "[", "\"self\"", "]", ",", "\"__dict__\"", ")", ":", "frame_locals", ".", "update", "(", "dict", "(", "(", "\"self.\"", "+", "k", ",", "_myrepr", "(", "k", ",", "v", ")", ")", "for", "k", ",", "v", "in", "list", "(", "tb", ".", "tb_frame", ".", "f_locals", "[", "\"self\"", "]", ".", "__dict__", ".", "items", "(", ")", ")", "[", ":", "MAX_LOCALS", "]", "if", "k", "!=", "\"self\"", ")", ")", "stack_lines", ".", "append", "(", "api_ttypes", ".", "StackLine", "(", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", ",", "line_number", "=", "lineno", ",", "function_name", "=", "func_name", ",", "text", "=", "line", ",", "frame_locals", "=", "frame_locals", ")", ")", "# Check LRU cache & potentially do not send error report if this client has already reported this error", "# several times.", "key", "=", "CachedErrorInfo", ".", "get_hash_key", "(", "stack_lines", ")", "info", "=", "ERROR_CACHE", ".", "get", "(", "key", ")", "or", "CachedErrorInfo", "(", ")", "info", ".", "increment", "(", ")", "ERROR_CACHE", "[", "key", "]", "=", "info", "if", "info", ".", "should_report", "(", ")", ":", "error_count", "=", "info", ".", "mark_reported", "(", ")", "_send_request", "(", "api_ttypes", ".", "RecordErrorRequest", "(", "traceback", "=", "stack_lines", ",", "exception_message", "=", "repr", "(", "exc_value", ")", ",", "exception_type", "=", "exc_type", ".", "__module__", "+", "\".\"", "+", "exc_type", ".", "__name__", ",", "hostname", "=", "hostname", ",", "error_threshold", "=", "error_threshold", ",", "additional_info", "=", "additional_info", ",", "error_count", "=", "error_count", ",", ")", ")" ]
Helper function to record errors to the flawless backend
[ "Helper", "function", "to", "record", "errors", "to", "the", "flawless", "backend" ]
python
test
inveniosoftware-attic/invenio-comments
invenio_comments/api.py
https://github.com/inveniosoftware-attic/invenio-comments/blob/62bb6e07c146baf75bf8de80b5896ab2a01a8423/invenio_comments/api.py#L954-L1121
def query_add_comment_or_remark(reviews=0, recID=0, uid=-1, msg="", note="", score=0, priority=0, client_ip_address='', editor_type='textarea', req=None, reply_to=None, attached_files=None): """ Private function Insert a comment/review or remarkinto the database :param recID: record id :param uid: user id :param msg: comment body :param note: comment title :param score: review star score :param priority: remark priority #!FIXME :param editor_type: the kind of editor used to submit the comment: 'textarea', 'ckeditor' :param req: request object. If provided, email notification are sent after we reply to user request. :param reply_to: the id of the comment we are replying to with this inserted comment. :return: integer >0 representing id if successful, integer 0 if not """ current_date = calculate_start_date('0d') # change utf-8 message into general unicode msg = msg.decode('utf-8') note = note.decode('utf-8') # change general unicode back to utf-8 msg = msg.encode('utf-8') note = note.encode('utf-8') (restriction, round_name) = get_record_status(recID) if attached_files is None: attached_files = {} if reply_to and CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH >= 0: # Check that we have not reached max depth comment_ancestors = get_comment_ancestors(reply_to) if len(comment_ancestors) >= CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH: if CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH == 0: reply_to = None else: reply_to = comment_ancestors[ CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH - 1] # Inherit restriction and group/round of 'parent' comment = query_get_comment(reply_to) if comment: (round_name, restriction) = comment[10:12] if editor_type == 'ckeditor': # Here we remove the line feeds introduced by CKEditor (they # have no meaning for the user) and replace the HTML line # breaks by linefeeds, so that we are close to an input that # would be done without the CKEditor. That's much better if a # reply to a comment is made with a browser that does not # support CKEditor. msg = msg.replace('\n', '').replace('\r', '') # We clean the quotes that could have been introduced by # CKEditor when clicking the 'quote' button, as well as those # that we have introduced when quoting the original message. # We can however not use directly '>>' chars to quote, as it # will be washed/fixed when calling tidy_html(): double-escape # all &gt; first, and use &gt;&gt; msg = msg.replace('&gt;', '&amp;gt;') msg = re.sub('^\s*<blockquote', '<br/> <blockquote', msg) msg = re.sub('<blockquote.*?>\s*<(p|div).*?>', '&gt;&gt;', msg) msg = re.sub('</(p|div)>\s*</blockquote>', '', msg) # Then definitely remove any blockquote, whatever it is msg = re.sub('<blockquote.*?>', '<div>', msg) msg = re.sub('</blockquote>', '</div>', msg) # Tidy up the HTML msg = tidy_html(msg) # We remove EOL that might have been introduced when tidying msg = msg.replace('\n', '').replace('\r', '') # Now that HTML has been cleaned, unescape &gt; msg = msg.replace('&gt;', '>') msg = msg.replace('&amp;gt;', '&gt;') msg = re.sub('<br .*?(/>)', '\n', msg) msg = msg.replace('&nbsp;', ' ') # In case additional <p> or <div> got inserted, interpret # these as new lines (with a sad trick to do it only once) # (note that it has been deactivated, as it is messing up # indentation with >>) #msg = msg.replace('</div><', '</div>\n<') #msg = msg.replace('</p><', '</p>\n<') query = """INSERT INTO "cmtRECORDCOMMENT" (id_bibrec, id_user, body, date_creation, star_score, nb_votes_total, title, round_name, restriction, "in_reply_to_id_cmtRECORDCOMMENT") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""" params = ( recID, uid, msg, current_date, score, 0, note, round_name, restriction, reply_to or 0) res = run_sql(query, params) if res: new_comid = int(res) move_attached_files_to_storage(attached_files, recID, new_comid) parent_reply_order = run_sql( """SELECT reply_order_cached_data from "cmtRECORDCOMMENT" where id=%s""", (reply_to, )) if not parent_reply_order or parent_reply_order[0][0] is None: # This is not a reply, but a first 0-level comment parent_reply_order = '' else: parent_reply_order = parent_reply_order[0][0] run_sql( """UPDATE "cmtRECORDCOMMENT" SET reply_order_cached_data=%s WHERE id=%s""", (parent_reply_order + get_reply_order_cache_data(new_comid), new_comid)) action_code = CFG_WEBCOMMENT_ACTION_CODE[ reviews and 'ADD_REVIEW' or 'ADD_COMMENT'] action_time = convert_datestruct_to_datetext(time.localtime()) query2 = """INSERT INTO "cmtACTIONHISTORY" ("id_cmtRECORDCOMMENT", id_bibrec, id_user, client_host, action_time, action_code) VALUES (%s, %s, %s, inet_aton(%s), %s, %s)""" params2 = ( res, recID, uid, client_ip_address, action_time, action_code) run_sql(query2, params2) def notify_subscribers_callback(data): """ Define a callback that retrieves subscribed users, and notify them by email. :param data: contains the necessary parameters in a tuple: (recid, uid, comid, msg, note, score, editor_type, reviews) """ recid, uid, comid, msg, note, score, editor_type, reviews = data # Email this comment to 'subscribers' (subscribers_emails1, subscribers_emails2) = \ get_users_subscribed_to_discussion(recid) email_subscribers_about_new_comment( recid, reviews=reviews, emails1=subscribers_emails1, emails2=subscribers_emails2, comID=comid, msg=msg, note=note, score=score, editor_type=editor_type, uid=uid) # Register our callback to notify subscribed people after # having replied to our current user. data = (recID, uid, res, msg, note, score, editor_type, reviews) if req: req.register_cleanup(notify_subscribers_callback, data) else: notify_subscribers_callback(data) return int(res)
[ "def", "query_add_comment_or_remark", "(", "reviews", "=", "0", ",", "recID", "=", "0", ",", "uid", "=", "-", "1", ",", "msg", "=", "\"\"", ",", "note", "=", "\"\"", ",", "score", "=", "0", ",", "priority", "=", "0", ",", "client_ip_address", "=", "''", ",", "editor_type", "=", "'textarea'", ",", "req", "=", "None", ",", "reply_to", "=", "None", ",", "attached_files", "=", "None", ")", ":", "current_date", "=", "calculate_start_date", "(", "'0d'", ")", "# change utf-8 message into general unicode", "msg", "=", "msg", ".", "decode", "(", "'utf-8'", ")", "note", "=", "note", ".", "decode", "(", "'utf-8'", ")", "# change general unicode back to utf-8", "msg", "=", "msg", ".", "encode", "(", "'utf-8'", ")", "note", "=", "note", ".", "encode", "(", "'utf-8'", ")", "(", "restriction", ",", "round_name", ")", "=", "get_record_status", "(", "recID", ")", "if", "attached_files", "is", "None", ":", "attached_files", "=", "{", "}", "if", "reply_to", "and", "CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH", ">=", "0", ":", "# Check that we have not reached max depth", "comment_ancestors", "=", "get_comment_ancestors", "(", "reply_to", ")", "if", "len", "(", "comment_ancestors", ")", ">=", "CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH", ":", "if", "CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH", "==", "0", ":", "reply_to", "=", "None", "else", ":", "reply_to", "=", "comment_ancestors", "[", "CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH", "-", "1", "]", "# Inherit restriction and group/round of 'parent'", "comment", "=", "query_get_comment", "(", "reply_to", ")", "if", "comment", ":", "(", "round_name", ",", "restriction", ")", "=", "comment", "[", "10", ":", "12", "]", "if", "editor_type", "==", "'ckeditor'", ":", "# Here we remove the line feeds introduced by CKEditor (they", "# have no meaning for the user) and replace the HTML line", "# breaks by linefeeds, so that we are close to an input that", "# would be done without the CKEditor. That's much better if a", "# reply to a comment is made with a browser that does not", "# support CKEditor.", "msg", "=", "msg", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "# We clean the quotes that could have been introduced by", "# CKEditor when clicking the 'quote' button, as well as those", "# that we have introduced when quoting the original message.", "# We can however not use directly '>>' chars to quote, as it", "# will be washed/fixed when calling tidy_html(): double-escape", "# all &gt; first, and use &gt;&gt;", "msg", "=", "msg", ".", "replace", "(", "'&gt;'", ",", "'&amp;gt;'", ")", "msg", "=", "re", ".", "sub", "(", "'^\\s*<blockquote'", ",", "'<br/> <blockquote'", ",", "msg", ")", "msg", "=", "re", ".", "sub", "(", "'<blockquote.*?>\\s*<(p|div).*?>'", ",", "'&gt;&gt;'", ",", "msg", ")", "msg", "=", "re", ".", "sub", "(", "'</(p|div)>\\s*</blockquote>'", ",", "''", ",", "msg", ")", "# Then definitely remove any blockquote, whatever it is", "msg", "=", "re", ".", "sub", "(", "'<blockquote.*?>'", ",", "'<div>'", ",", "msg", ")", "msg", "=", "re", ".", "sub", "(", "'</blockquote>'", ",", "'</div>'", ",", "msg", ")", "# Tidy up the HTML", "msg", "=", "tidy_html", "(", "msg", ")", "# We remove EOL that might have been introduced when tidying", "msg", "=", "msg", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "# Now that HTML has been cleaned, unescape &gt;", "msg", "=", "msg", ".", "replace", "(", "'&gt;'", ",", "'>'", ")", "msg", "=", "msg", ".", "replace", "(", "'&amp;gt;'", ",", "'&gt;'", ")", "msg", "=", "re", ".", "sub", "(", "'<br .*?(/>)'", ",", "'\\n'", ",", "msg", ")", "msg", "=", "msg", ".", "replace", "(", "'&nbsp;'", ",", "' '", ")", "# In case additional <p> or <div> got inserted, interpret", "# these as new lines (with a sad trick to do it only once)", "# (note that it has been deactivated, as it is messing up", "# indentation with >>)", "#msg = msg.replace('</div><', '</div>\\n<')", "#msg = msg.replace('</p><', '</p>\\n<')", "query", "=", "\"\"\"INSERT INTO \"cmtRECORDCOMMENT\" (id_bibrec,\n id_user,\n body,\n date_creation,\n star_score,\n nb_votes_total,\n title,\n round_name,\n restriction,\n \"in_reply_to_id_cmtRECORDCOMMENT\")\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"", "params", "=", "(", "recID", ",", "uid", ",", "msg", ",", "current_date", ",", "score", ",", "0", ",", "note", ",", "round_name", ",", "restriction", ",", "reply_to", "or", "0", ")", "res", "=", "run_sql", "(", "query", ",", "params", ")", "if", "res", ":", "new_comid", "=", "int", "(", "res", ")", "move_attached_files_to_storage", "(", "attached_files", ",", "recID", ",", "new_comid", ")", "parent_reply_order", "=", "run_sql", "(", "\"\"\"SELECT reply_order_cached_data from \"cmtRECORDCOMMENT\" where id=%s\"\"\"", ",", "(", "reply_to", ",", ")", ")", "if", "not", "parent_reply_order", "or", "parent_reply_order", "[", "0", "]", "[", "0", "]", "is", "None", ":", "# This is not a reply, but a first 0-level comment", "parent_reply_order", "=", "''", "else", ":", "parent_reply_order", "=", "parent_reply_order", "[", "0", "]", "[", "0", "]", "run_sql", "(", "\"\"\"UPDATE \"cmtRECORDCOMMENT\" SET reply_order_cached_data=%s WHERE id=%s\"\"\"", ",", "(", "parent_reply_order", "+", "get_reply_order_cache_data", "(", "new_comid", ")", ",", "new_comid", ")", ")", "action_code", "=", "CFG_WEBCOMMENT_ACTION_CODE", "[", "reviews", "and", "'ADD_REVIEW'", "or", "'ADD_COMMENT'", "]", "action_time", "=", "convert_datestruct_to_datetext", "(", "time", ".", "localtime", "(", ")", ")", "query2", "=", "\"\"\"INSERT INTO \"cmtACTIONHISTORY\" (\"id_cmtRECORDCOMMENT\",\n id_bibrec, id_user, client_host, action_time, action_code)\n VALUES (%s, %s, %s, inet_aton(%s), %s, %s)\"\"\"", "params2", "=", "(", "res", ",", "recID", ",", "uid", ",", "client_ip_address", ",", "action_time", ",", "action_code", ")", "run_sql", "(", "query2", ",", "params2", ")", "def", "notify_subscribers_callback", "(", "data", ")", ":", "\"\"\"\n Define a callback that retrieves subscribed users, and\n notify them by email.\n\n :param data: contains the necessary parameters in a tuple:\n (recid, uid, comid, msg, note, score, editor_type, reviews)\n \"\"\"", "recid", ",", "uid", ",", "comid", ",", "msg", ",", "note", ",", "score", ",", "editor_type", ",", "reviews", "=", "data", "# Email this comment to 'subscribers'", "(", "subscribers_emails1", ",", "subscribers_emails2", ")", "=", "get_users_subscribed_to_discussion", "(", "recid", ")", "email_subscribers_about_new_comment", "(", "recid", ",", "reviews", "=", "reviews", ",", "emails1", "=", "subscribers_emails1", ",", "emails2", "=", "subscribers_emails2", ",", "comID", "=", "comid", ",", "msg", "=", "msg", ",", "note", "=", "note", ",", "score", "=", "score", ",", "editor_type", "=", "editor_type", ",", "uid", "=", "uid", ")", "# Register our callback to notify subscribed people after", "# having replied to our current user.", "data", "=", "(", "recID", ",", "uid", ",", "res", ",", "msg", ",", "note", ",", "score", ",", "editor_type", ",", "reviews", ")", "if", "req", ":", "req", ".", "register_cleanup", "(", "notify_subscribers_callback", ",", "data", ")", "else", ":", "notify_subscribers_callback", "(", "data", ")", "return", "int", "(", "res", ")" ]
Private function Insert a comment/review or remarkinto the database :param recID: record id :param uid: user id :param msg: comment body :param note: comment title :param score: review star score :param priority: remark priority #!FIXME :param editor_type: the kind of editor used to submit the comment: 'textarea', 'ckeditor' :param req: request object. If provided, email notification are sent after we reply to user request. :param reply_to: the id of the comment we are replying to with this inserted comment. :return: integer >0 representing id if successful, integer 0 if not
[ "Private", "function", "Insert", "a", "comment", "/", "review", "or", "remarkinto", "the", "database", ":", "param", "recID", ":", "record", "id", ":", "param", "uid", ":", "user", "id", ":", "param", "msg", ":", "comment", "body", ":", "param", "note", ":", "comment", "title", ":", "param", "score", ":", "review", "star", "score", ":", "param", "priority", ":", "remark", "priority", "#!FIXME", ":", "param", "editor_type", ":", "the", "kind", "of", "editor", "used", "to", "submit", "the", "comment", ":", "textarea", "ckeditor", ":", "param", "req", ":", "request", "object", ".", "If", "provided", "email", "notification", "are", "sent", "after", "we", "reply", "to", "user", "request", ".", ":", "param", "reply_to", ":", "the", "id", "of", "the", "comment", "we", "are", "replying", "to", "with", "this", "inserted", "comment", ".", ":", "return", ":", "integer", ">", "0", "representing", "id", "if", "successful", "integer", "0", "if", "not" ]
python
train
rodluger/everest
everest/basecamp.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L57-L70
def show(self): """Show the overfitting PDF summary.""" try: if platform.system().lower().startswith('darwin'): subprocess.call(['open', self.pdf]) elif os.name == 'nt': os.startfile(self.pdf) elif os.name == 'posix': subprocess.call(['xdg-open', self.pdf]) else: raise IOError("") except IOError: log.info("Unable to open the pdf. Try opening it manually:") log.info(self.pdf)
[ "def", "show", "(", "self", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", ".", "startswith", "(", "'darwin'", ")", ":", "subprocess", ".", "call", "(", "[", "'open'", ",", "self", ".", "pdf", "]", ")", "elif", "os", ".", "name", "==", "'nt'", ":", "os", ".", "startfile", "(", "self", ".", "pdf", ")", "elif", "os", ".", "name", "==", "'posix'", ":", "subprocess", ".", "call", "(", "[", "'xdg-open'", ",", "self", ".", "pdf", "]", ")", "else", ":", "raise", "IOError", "(", "\"\"", ")", "except", "IOError", ":", "log", ".", "info", "(", "\"Unable to open the pdf. Try opening it manually:\"", ")", "log", ".", "info", "(", "self", ".", "pdf", ")" ]
Show the overfitting PDF summary.
[ "Show", "the", "overfitting", "PDF", "summary", "." ]
python
train
openvax/varcode
varcode/effects/effect_collection.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L272-L301
def to_dataframe(self): """Build a dataframe from the effect collection""" # list of properties to extract from Variant objects if they're # not None variant_properties = [ "contig", "start", "ref", "alt", "is_snv", "is_transversion", "is_transition" ] def row_from_effect(effect): row = OrderedDict() row['variant'] = str(effect.variant.short_description) for field_name in variant_properties: # if effect.variant is None then this column value will be None row[field_name] = getattr(effect.variant, field_name, None) row['gene_id'] = effect.gene_id row['gene_name'] = effect.gene_name row['transcript_id'] = effect.transcript_id row['transcript_name'] = effect.transcript_name row['effect_type'] = effect.__class__.__name__ row['effect'] = effect.short_description return row return pd.DataFrame.from_records([row_from_effect(effect) for effect in self])
[ "def", "to_dataframe", "(", "self", ")", ":", "# list of properties to extract from Variant objects if they're", "# not None", "variant_properties", "=", "[", "\"contig\"", ",", "\"start\"", ",", "\"ref\"", ",", "\"alt\"", ",", "\"is_snv\"", ",", "\"is_transversion\"", ",", "\"is_transition\"", "]", "def", "row_from_effect", "(", "effect", ")", ":", "row", "=", "OrderedDict", "(", ")", "row", "[", "'variant'", "]", "=", "str", "(", "effect", ".", "variant", ".", "short_description", ")", "for", "field_name", "in", "variant_properties", ":", "# if effect.variant is None then this column value will be None", "row", "[", "field_name", "]", "=", "getattr", "(", "effect", ".", "variant", ",", "field_name", ",", "None", ")", "row", "[", "'gene_id'", "]", "=", "effect", ".", "gene_id", "row", "[", "'gene_name'", "]", "=", "effect", ".", "gene_name", "row", "[", "'transcript_id'", "]", "=", "effect", ".", "transcript_id", "row", "[", "'transcript_name'", "]", "=", "effect", ".", "transcript_name", "row", "[", "'effect_type'", "]", "=", "effect", ".", "__class__", ".", "__name__", "row", "[", "'effect'", "]", "=", "effect", ".", "short_description", "return", "row", "return", "pd", ".", "DataFrame", ".", "from_records", "(", "[", "row_from_effect", "(", "effect", ")", "for", "effect", "in", "self", "]", ")" ]
Build a dataframe from the effect collection
[ "Build", "a", "dataframe", "from", "the", "effect", "collection" ]
python
train
tcalmant/ipopo
samples/rsa/helloimpl.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/samples/rsa/helloimpl.py#L58-L71
def sayHelloPromise(self, name="Not given", message="nothing"): """ Implementation of IHello.sayHelloPromise. This method will be executed via some thread, and the remote caller will not block. """ print( "Python.sayHelloPromise called by: {0} " "with message: '{1}'".format(name, message) ) return ( "PythonPromise says: Howdy {0} " "that's a nice runtime you got there".format(name) )
[ "def", "sayHelloPromise", "(", "self", ",", "name", "=", "\"Not given\"", ",", "message", "=", "\"nothing\"", ")", ":", "print", "(", "\"Python.sayHelloPromise called by: {0} \"", "\"with message: '{1}'\"", ".", "format", "(", "name", ",", "message", ")", ")", "return", "(", "\"PythonPromise says: Howdy {0} \"", "\"that's a nice runtime you got there\"", ".", "format", "(", "name", ")", ")" ]
Implementation of IHello.sayHelloPromise. This method will be executed via some thread, and the remote caller will not block.
[ "Implementation", "of", "IHello", ".", "sayHelloPromise", ".", "This", "method", "will", "be", "executed", "via", "some", "thread", "and", "the", "remote", "caller", "will", "not", "block", "." ]
python
train
geophysics-ubonn/crtomo_tools
lib/crtomo/grid.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L417-L427
def get_electrode_node(self, electrode): """ For a given electrode (e.g. from a config.dat file), return the true node number as in self.nodes['sorted'] """ elec_node_raw = int(self.electrodes[electrode - 1][0]) if(self.header['cutmck']): elec_node = self.nodes['rev_cutmck_index'][elec_node_raw] else: elec_node = elec_node_raw - 1 return int(elec_node)
[ "def", "get_electrode_node", "(", "self", ",", "electrode", ")", ":", "elec_node_raw", "=", "int", "(", "self", ".", "electrodes", "[", "electrode", "-", "1", "]", "[", "0", "]", ")", "if", "(", "self", ".", "header", "[", "'cutmck'", "]", ")", ":", "elec_node", "=", "self", ".", "nodes", "[", "'rev_cutmck_index'", "]", "[", "elec_node_raw", "]", "else", ":", "elec_node", "=", "elec_node_raw", "-", "1", "return", "int", "(", "elec_node", ")" ]
For a given electrode (e.g. from a config.dat file), return the true node number as in self.nodes['sorted']
[ "For", "a", "given", "electrode", "(", "e", ".", "g", ".", "from", "a", "config", ".", "dat", "file", ")", "return", "the", "true", "node", "number", "as", "in", "self", ".", "nodes", "[", "sorted", "]" ]
python
train
arkottke/pysra
pysra/propagation.py
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L284-L338
def _calc_waves(self, angular_freqs, profile): """Compute the wave numbers and amplitudes (up- and down-going). Parameters ---------- angular_freqs: :class:`numpy.ndarray` Angular frequency at which the waves are computed. profile: :class:`~.base.site.Profile` Site profile. """ # Compute the complex wave numbers of the system wave_nums = np.empty((len(profile), len(angular_freqs)), np.complex) for i, l in enumerate(profile): wave_nums[i, :] = angular_freqs / l.comp_shear_vel # Compute the waves. In the top surface layer, the up-going and # down-going waves have an amplitude of 1 as they are completely # reflected at the surface. waves_a = np.ones_like(wave_nums, np.complex) waves_b = np.ones_like(wave_nums, np.complex) for i, l in enumerate(profile[:-1]): # Complex impedance -- wave number can be zero which causes an # error. with np.errstate(invalid='ignore'): cimped = ((wave_nums[i] * l.comp_shear_mod) / (wave_nums[i + 1] * profile[i + 1].comp_shear_mod)) # Complex term to simplify equations -- uses full layer height cterm = 1j * wave_nums[i, :] * l.thickness waves_a[i + 1, :] = ( 0.5 * waves_a[i] * (1 + cimped) * np.exp(cterm) + 0.5 * waves_b[i] * (1 - cimped) * np.exp(-cterm)) waves_b[i + 1, :] = ( 0.5 * waves_a[i] * (1 - cimped) * np.exp(cterm) + 0.5 * waves_b[i] * (1 + cimped) * np.exp(-cterm)) # Set wave amplitudes with zero frequency to 1 mask = ~np.isfinite(cimped) waves_a[i + 1, mask] = 1. waves_b[i + 1, mask] = 1. # fixme: Better way to handle this? # Set wave amplitudes to 1 at frequencies near 0 mask = np.isclose(angular_freqs, 0) waves_a[-1, mask] = 1. waves_b[-1, mask] = 1. self._waves_a = waves_a self._waves_b = waves_b self._wave_nums = wave_nums
[ "def", "_calc_waves", "(", "self", ",", "angular_freqs", ",", "profile", ")", ":", "# Compute the complex wave numbers of the system", "wave_nums", "=", "np", ".", "empty", "(", "(", "len", "(", "profile", ")", ",", "len", "(", "angular_freqs", ")", ")", ",", "np", ".", "complex", ")", "for", "i", ",", "l", "in", "enumerate", "(", "profile", ")", ":", "wave_nums", "[", "i", ",", ":", "]", "=", "angular_freqs", "/", "l", ".", "comp_shear_vel", "# Compute the waves. In the top surface layer, the up-going and", "# down-going waves have an amplitude of 1 as they are completely", "# reflected at the surface.", "waves_a", "=", "np", ".", "ones_like", "(", "wave_nums", ",", "np", ".", "complex", ")", "waves_b", "=", "np", ".", "ones_like", "(", "wave_nums", ",", "np", ".", "complex", ")", "for", "i", ",", "l", "in", "enumerate", "(", "profile", "[", ":", "-", "1", "]", ")", ":", "# Complex impedance -- wave number can be zero which causes an", "# error.", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ")", ":", "cimped", "=", "(", "(", "wave_nums", "[", "i", "]", "*", "l", ".", "comp_shear_mod", ")", "/", "(", "wave_nums", "[", "i", "+", "1", "]", "*", "profile", "[", "i", "+", "1", "]", ".", "comp_shear_mod", ")", ")", "# Complex term to simplify equations -- uses full layer height", "cterm", "=", "1j", "*", "wave_nums", "[", "i", ",", ":", "]", "*", "l", ".", "thickness", "waves_a", "[", "i", "+", "1", ",", ":", "]", "=", "(", "0.5", "*", "waves_a", "[", "i", "]", "*", "(", "1", "+", "cimped", ")", "*", "np", ".", "exp", "(", "cterm", ")", "+", "0.5", "*", "waves_b", "[", "i", "]", "*", "(", "1", "-", "cimped", ")", "*", "np", ".", "exp", "(", "-", "cterm", ")", ")", "waves_b", "[", "i", "+", "1", ",", ":", "]", "=", "(", "0.5", "*", "waves_a", "[", "i", "]", "*", "(", "1", "-", "cimped", ")", "*", "np", ".", "exp", "(", "cterm", ")", "+", "0.5", "*", "waves_b", "[", "i", "]", "*", "(", "1", "+", "cimped", ")", "*", "np", ".", "exp", "(", "-", "cterm", ")", ")", "# Set wave amplitudes with zero frequency to 1", "mask", "=", "~", "np", ".", "isfinite", "(", "cimped", ")", "waves_a", "[", "i", "+", "1", ",", "mask", "]", "=", "1.", "waves_b", "[", "i", "+", "1", ",", "mask", "]", "=", "1.", "# fixme: Better way to handle this?", "# Set wave amplitudes to 1 at frequencies near 0", "mask", "=", "np", ".", "isclose", "(", "angular_freqs", ",", "0", ")", "waves_a", "[", "-", "1", ",", "mask", "]", "=", "1.", "waves_b", "[", "-", "1", ",", "mask", "]", "=", "1.", "self", ".", "_waves_a", "=", "waves_a", "self", ".", "_waves_b", "=", "waves_b", "self", ".", "_wave_nums", "=", "wave_nums" ]
Compute the wave numbers and amplitudes (up- and down-going). Parameters ---------- angular_freqs: :class:`numpy.ndarray` Angular frequency at which the waves are computed. profile: :class:`~.base.site.Profile` Site profile.
[ "Compute", "the", "wave", "numbers", "and", "amplitudes", "(", "up", "-", "and", "down", "-", "going", ")", "." ]
python
train
evhub/coconut
coconut/compiler/matching.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/matching.py#L223-L226
def match_all_in(self, matches, item): """Matches all matches to elements of item.""" for i, match in enumerate(matches): self.match(match, item + "[" + str(i) + "]")
[ "def", "match_all_in", "(", "self", ",", "matches", ",", "item", ")", ":", "for", "i", ",", "match", "in", "enumerate", "(", "matches", ")", ":", "self", ".", "match", "(", "match", ",", "item", "+", "\"[\"", "+", "str", "(", "i", ")", "+", "\"]\"", ")" ]
Matches all matches to elements of item.
[ "Matches", "all", "matches", "to", "elements", "of", "item", "." ]
python
train
PyGithub/PyGithub
github/MainClass.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/MainClass.py#L206-L219
def get_licenses(self): """ :calls: `GET /licenses <https://developer.github.com/v3/licenses/#list-all-licenses>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.License.License` """ url_parameters = dict() return github.PaginatedList.PaginatedList( github.License.License, self.__requester, "/licenses", url_parameters )
[ "def", "get_licenses", "(", "self", ")", ":", "url_parameters", "=", "dict", "(", ")", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", ".", "License", ".", "License", ",", "self", ".", "__requester", ",", "\"/licenses\"", ",", "url_parameters", ")" ]
:calls: `GET /licenses <https://developer.github.com/v3/licenses/#list-all-licenses>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.License.License`
[ ":", "calls", ":", "GET", "/", "licenses", "<https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "licenses", "/", "#list", "-", "all", "-", "licenses", ">", "_", ":", "rtype", ":", ":", "class", ":", "github", ".", "PaginatedList", ".", "PaginatedList", "of", ":", "class", ":", "github", ".", "License", ".", "License" ]
python
train
tensorpack/tensorpack
tensorpack/tfutils/common.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/common.py#L167-L245
def collect_env_info(): """ Returns: str - a table contains important information about the environment """ data = [] data.append(("sys.platform", sys.platform)) data.append(("Python", sys.version.replace("\n", ""))) data.append(("Tensorpack", __git_version__)) data.append(("Numpy", np.__version__)) data.append(("TensorFlow", tfv1.VERSION + "/" + tfv1.GIT_VERSION)) data.append(("TF Compiler Version", tfv1.COMPILER_VERSION)) has_cuda = tf.test.is_built_with_cuda() data.append(("TF CUDA support", has_cuda)) try: from tensorflow.python.framework import test_util data.append(("TF MKL support", test_util.IsMklEnabled())) except Exception: pass try: from tensorflow.python.framework import test_util data.append(("TF XLA support", test_util.is_xla_enabled())) except Exception: pass if has_cuda: data.append(("Nvidia Driver", find_library("nvidia-ml"))) data.append(("CUDA", find_library("cudart"))) data.append(("CUDNN", find_library("cudnn"))) data.append(("NCCL", find_library("nccl"))) # List devices with NVML data.append( ("CUDA_VISIBLE_DEVICES", os.environ.get("CUDA_VISIBLE_DEVICES", str(None)))) try: devs = defaultdict(list) with NVMLContext() as ctx: for idx, dev in enumerate(ctx.devices()): devs[dev.name()].append(str(idx)) for devname, devids in devs.items(): data.append( ("GPU " + ",".join(devids), devname)) except Exception: data.append(("GPU", "Not found with NVML")) vram = psutil.virtual_memory() data.append(("Free RAM", "{:.2f}/{:.2f} GB".format(vram.available / 1024**3, vram.total / 1024**3))) data.append(("CPU Count", psutil.cpu_count())) # Other important dependencies: try: import horovod data.append(("horovod", horovod.__version__)) except ImportError: pass try: import cv2 data.append(("cv2", cv2.__version__)) except ImportError: pass import msgpack data.append(("msgpack", ".".join([str(x) for x in msgpack.version]))) has_prctl = True try: import prctl _ = prctl.set_pdeathsig # noqa except Exception: has_prctl = False data.append(("python-prctl", has_prctl)) return tabulate(data)
[ "def", "collect_env_info", "(", ")", ":", "data", "=", "[", "]", "data", ".", "append", "(", "(", "\"sys.platform\"", ",", "sys", ".", "platform", ")", ")", "data", ".", "append", "(", "(", "\"Python\"", ",", "sys", ".", "version", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"Tensorpack\"", ",", "__git_version__", ")", ")", "data", ".", "append", "(", "(", "\"Numpy\"", ",", "np", ".", "__version__", ")", ")", "data", ".", "append", "(", "(", "\"TensorFlow\"", ",", "tfv1", ".", "VERSION", "+", "\"/\"", "+", "tfv1", ".", "GIT_VERSION", ")", ")", "data", ".", "append", "(", "(", "\"TF Compiler Version\"", ",", "tfv1", ".", "COMPILER_VERSION", ")", ")", "has_cuda", "=", "tf", ".", "test", ".", "is_built_with_cuda", "(", ")", "data", ".", "append", "(", "(", "\"TF CUDA support\"", ",", "has_cuda", ")", ")", "try", ":", "from", "tensorflow", ".", "python", ".", "framework", "import", "test_util", "data", ".", "append", "(", "(", "\"TF MKL support\"", ",", "test_util", ".", "IsMklEnabled", "(", ")", ")", ")", "except", "Exception", ":", "pass", "try", ":", "from", "tensorflow", ".", "python", ".", "framework", "import", "test_util", "data", ".", "append", "(", "(", "\"TF XLA support\"", ",", "test_util", ".", "is_xla_enabled", "(", ")", ")", ")", "except", "Exception", ":", "pass", "if", "has_cuda", ":", "data", ".", "append", "(", "(", "\"Nvidia Driver\"", ",", "find_library", "(", "\"nvidia-ml\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"CUDA\"", ",", "find_library", "(", "\"cudart\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"CUDNN\"", ",", "find_library", "(", "\"cudnn\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"NCCL\"", ",", "find_library", "(", "\"nccl\"", ")", ")", ")", "# List devices with NVML", "data", ".", "append", "(", "(", "\"CUDA_VISIBLE_DEVICES\"", ",", "os", ".", "environ", ".", "get", "(", "\"CUDA_VISIBLE_DEVICES\"", ",", "str", "(", "None", ")", ")", ")", ")", "try", ":", "devs", "=", "defaultdict", "(", "list", ")", "with", "NVMLContext", "(", ")", "as", "ctx", ":", "for", "idx", ",", "dev", "in", "enumerate", "(", "ctx", ".", "devices", "(", ")", ")", ":", "devs", "[", "dev", ".", "name", "(", ")", "]", ".", "append", "(", "str", "(", "idx", ")", ")", "for", "devname", ",", "devids", "in", "devs", ".", "items", "(", ")", ":", "data", ".", "append", "(", "(", "\"GPU \"", "+", "\",\"", ".", "join", "(", "devids", ")", ",", "devname", ")", ")", "except", "Exception", ":", "data", ".", "append", "(", "(", "\"GPU\"", ",", "\"Not found with NVML\"", ")", ")", "vram", "=", "psutil", ".", "virtual_memory", "(", ")", "data", ".", "append", "(", "(", "\"Free RAM\"", ",", "\"{:.2f}/{:.2f} GB\"", ".", "format", "(", "vram", ".", "available", "/", "1024", "**", "3", ",", "vram", ".", "total", "/", "1024", "**", "3", ")", ")", ")", "data", ".", "append", "(", "(", "\"CPU Count\"", ",", "psutil", ".", "cpu_count", "(", ")", ")", ")", "# Other important dependencies:", "try", ":", "import", "horovod", "data", ".", "append", "(", "(", "\"horovod\"", ",", "horovod", ".", "__version__", ")", ")", "except", "ImportError", ":", "pass", "try", ":", "import", "cv2", "data", ".", "append", "(", "(", "\"cv2\"", ",", "cv2", ".", "__version__", ")", ")", "except", "ImportError", ":", "pass", "import", "msgpack", "data", ".", "append", "(", "(", "\"msgpack\"", ",", "\".\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "msgpack", ".", "version", "]", ")", ")", ")", "has_prctl", "=", "True", "try", ":", "import", "prctl", "_", "=", "prctl", ".", "set_pdeathsig", "# noqa", "except", "Exception", ":", "has_prctl", "=", "False", "data", ".", "append", "(", "(", "\"python-prctl\"", ",", "has_prctl", ")", ")", "return", "tabulate", "(", "data", ")" ]
Returns: str - a table contains important information about the environment
[ "Returns", ":", "str", "-", "a", "table", "contains", "important", "information", "about", "the", "environment" ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/inverse.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/inverse.py#L81-L93
def _guess_x_kmeans(self, y_desired, **kwargs): """Provide an initial guesses for a probable x from y""" k = kwargs.get('k', self.k) _, indexes = self.fmodel.dataset.nn_y(y_desired, k=k) X = np.array([self.fmodel.get_x(i) for i in indexes]) if np.sum(X) == 0.: centroids = [self.fmodel.get_x(indexes[0])] else: try: centroids, _ = kmeans2(X, 2) except np.linalg.linalg.LinAlgError: centroids = [self.fmodel.get_x(indexes[0])] return centroids
[ "def", "_guess_x_kmeans", "(", "self", ",", "y_desired", ",", "*", "*", "kwargs", ")", ":", "k", "=", "kwargs", ".", "get", "(", "'k'", ",", "self", ".", "k", ")", "_", ",", "indexes", "=", "self", ".", "fmodel", ".", "dataset", ".", "nn_y", "(", "y_desired", ",", "k", "=", "k", ")", "X", "=", "np", ".", "array", "(", "[", "self", ".", "fmodel", ".", "get_x", "(", "i", ")", "for", "i", "in", "indexes", "]", ")", "if", "np", ".", "sum", "(", "X", ")", "==", "0.", ":", "centroids", "=", "[", "self", ".", "fmodel", ".", "get_x", "(", "indexes", "[", "0", "]", ")", "]", "else", ":", "try", ":", "centroids", ",", "_", "=", "kmeans2", "(", "X", ",", "2", ")", "except", "np", ".", "linalg", ".", "linalg", ".", "LinAlgError", ":", "centroids", "=", "[", "self", ".", "fmodel", ".", "get_x", "(", "indexes", "[", "0", "]", ")", "]", "return", "centroids" ]
Provide an initial guesses for a probable x from y
[ "Provide", "an", "initial", "guesses", "for", "a", "probable", "x", "from", "y" ]
python
train
merll/docker-map
dockermap/map/runner/image.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/runner/image.py#L51-L71
def pull(self, action, image_name, **kwargs): """ Pulls an image for a container configuration :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param image_name: Image name. :type image_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict """ config_id = action.config_id registry, __, image = config_id.config_name.rpartition('/') if registry and '.' in registry and registry not in self._login_registries: self.login(action, registry, insecure_registry=kwargs.get('insecure_registry')) log.info("Pulling image %s:%s.", config_id.config_name, config_id.instance_name) res = action.client.pull(repository=config_id.config_name, tag=config_id.instance_name, **kwargs) log.debug("Done pulling image %s:%s.", config_id.config_name, config_id.instance_name) self._policy.images[action.client_name].refresh_repo(config_id.config_name) log.debug("Refreshed image cache for repo %s.", config_id.config_name) return res
[ "def", "pull", "(", "self", ",", "action", ",", "image_name", ",", "*", "*", "kwargs", ")", ":", "config_id", "=", "action", ".", "config_id", "registry", ",", "__", ",", "image", "=", "config_id", ".", "config_name", ".", "rpartition", "(", "'/'", ")", "if", "registry", "and", "'.'", "in", "registry", "and", "registry", "not", "in", "self", ".", "_login_registries", ":", "self", ".", "login", "(", "action", ",", "registry", ",", "insecure_registry", "=", "kwargs", ".", "get", "(", "'insecure_registry'", ")", ")", "log", ".", "info", "(", "\"Pulling image %s:%s.\"", ",", "config_id", ".", "config_name", ",", "config_id", ".", "instance_name", ")", "res", "=", "action", ".", "client", ".", "pull", "(", "repository", "=", "config_id", ".", "config_name", ",", "tag", "=", "config_id", ".", "instance_name", ",", "*", "*", "kwargs", ")", "log", ".", "debug", "(", "\"Done pulling image %s:%s.\"", ",", "config_id", ".", "config_name", ",", "config_id", ".", "instance_name", ")", "self", ".", "_policy", ".", "images", "[", "action", ".", "client_name", "]", ".", "refresh_repo", "(", "config_id", ".", "config_name", ")", "log", ".", "debug", "(", "\"Refreshed image cache for repo %s.\"", ",", "config_id", ".", "config_name", ")", "return", "res" ]
Pulls an image for a container configuration :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param image_name: Image name. :type image_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict
[ "Pulls", "an", "image", "for", "a", "container", "configuration" ]
python
train
jut-io/jut-python-tools
jut/api/accounts.py
https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/accounts.py#L130-L147
def user_exists(username, token_manager=None, app_url=defaults.APP_URL): """ check if the user exists with the specified username """ headers = token_manager.get_access_token_headers() auth_url = environment.get_auth_url(app_url=app_url) url = "%s/api/v1/accounts?username=%s" % (auth_url, username) response = requests.get(url, headers=headers) if response.status_code == 404: return False elif response.status_code == 200: return True else: raise JutException('Error %s: %s' % (response.status_code, response.text))
[ "def", "user_exists", "(", "username", ",", "token_manager", "=", "None", ",", "app_url", "=", "defaults", ".", "APP_URL", ")", ":", "headers", "=", "token_manager", ".", "get_access_token_headers", "(", ")", "auth_url", "=", "environment", ".", "get_auth_url", "(", "app_url", "=", "app_url", ")", "url", "=", "\"%s/api/v1/accounts?username=%s\"", "%", "(", "auth_url", ",", "username", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "if", "response", ".", "status_code", "==", "404", ":", "return", "False", "elif", "response", ".", "status_code", "==", "200", ":", "return", "True", "else", ":", "raise", "JutException", "(", "'Error %s: %s'", "%", "(", "response", ".", "status_code", ",", "response", ".", "text", ")", ")" ]
check if the user exists with the specified username
[ "check", "if", "the", "user", "exists", "with", "the", "specified", "username" ]
python
train
The-Politico/politico-civic-election
election/models/candidate.py
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/candidate.py#L71-L77
def get_election_electoral_votes(self, election): """Get all electoral votes for this candidate in an election.""" candidate_election = CandidateElection.objects.get( candidate=self, election=election ) return candidate_election.electoral_votes.all()
[ "def", "get_election_electoral_votes", "(", "self", ",", "election", ")", ":", "candidate_election", "=", "CandidateElection", ".", "objects", ".", "get", "(", "candidate", "=", "self", ",", "election", "=", "election", ")", "return", "candidate_election", ".", "electoral_votes", ".", "all", "(", ")" ]
Get all electoral votes for this candidate in an election.
[ "Get", "all", "electoral", "votes", "for", "this", "candidate", "in", "an", "election", "." ]
python
train
prthkms/alex
alex/duckduckgo.py
https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/duckduckgo.py#L30-L42
def query(string): """query(user string) -- make http request to duckduckgo api, to get result in json format, then call parse_result. """ url = "https://api.duckduckgo.com/?q=" formating = "&format=json" query_string = url+'+'.join(string)+formating try: result = json.loads(requests.get(query_string).text) except: print "I'm sorry! Something went wrong. Maybe we could try again later." return parse_result(result)
[ "def", "query", "(", "string", ")", ":", "url", "=", "\"https://api.duckduckgo.com/?q=\"", "formating", "=", "\"&format=json\"", "query_string", "=", "url", "+", "'+'", ".", "join", "(", "string", ")", "+", "formating", "try", ":", "result", "=", "json", ".", "loads", "(", "requests", ".", "get", "(", "query_string", ")", ".", "text", ")", "except", ":", "print", "\"I'm sorry! Something went wrong. Maybe we could try again later.\"", "return", "parse_result", "(", "result", ")" ]
query(user string) -- make http request to duckduckgo api, to get result in json format, then call parse_result.
[ "query", "(", "user", "string", ")", "--", "make", "http", "request", "to", "duckduckgo", "api", "to", "get", "result", "in", "json", "format", "then", "call", "parse_result", "." ]
python
train
pysam-developers/pysam
pysam/Pileup.py
https://github.com/pysam-developers/pysam/blob/9961bebd4cd1f2bf5e42817df25699a6e6344b5a/pysam/Pileup.py#L198-L253
def vcf2pileup(vcf, sample): '''convert vcf record to pileup record.''' chromosome = vcf.contig pos = vcf.pos reference = vcf.ref allelles = [reference] + vcf.alt data = vcf[sample] # get genotype genotypes = data["GT"] if len(genotypes) > 1: raise ValueError("only single genotype per position, %s" % (str(vcf))) genotypes = genotypes[0] # not a variant if genotypes[0] == ".": return None genotypes = [allelles[int(x)] for x in genotypes if x != "/"] # snp_quality is "genotype quality" snp_quality = consensus_quality = data.get("GQ", [0])[0] mapping_quality = vcf.info.get("MQ", [0])[0] coverage = data.get("DP", 0) if len(reference) > 1 or max([len(x) for x in vcf.alt]) > 1: # indel genotype, offset = translateIndelGenotypeFromVCF(genotypes, reference) return PileupIndel(chromosome, pos + offset, "*", genotype, consensus_quality, snp_quality, mapping_quality, coverage, genotype, "<" * len(genotype), 0, 0, 0) else: genotype = encodeGenotype("".join(genotypes)) read_bases = "" base_qualities = "" return PileupSubstitution(chromosome, pos, reference, genotype, consensus_quality, snp_quality, mapping_quality, coverage, read_bases, base_qualities)
[ "def", "vcf2pileup", "(", "vcf", ",", "sample", ")", ":", "chromosome", "=", "vcf", ".", "contig", "pos", "=", "vcf", ".", "pos", "reference", "=", "vcf", ".", "ref", "allelles", "=", "[", "reference", "]", "+", "vcf", ".", "alt", "data", "=", "vcf", "[", "sample", "]", "# get genotype", "genotypes", "=", "data", "[", "\"GT\"", "]", "if", "len", "(", "genotypes", ")", ">", "1", ":", "raise", "ValueError", "(", "\"only single genotype per position, %s\"", "%", "(", "str", "(", "vcf", ")", ")", ")", "genotypes", "=", "genotypes", "[", "0", "]", "# not a variant", "if", "genotypes", "[", "0", "]", "==", "\".\"", ":", "return", "None", "genotypes", "=", "[", "allelles", "[", "int", "(", "x", ")", "]", "for", "x", "in", "genotypes", "if", "x", "!=", "\"/\"", "]", "# snp_quality is \"genotype quality\"", "snp_quality", "=", "consensus_quality", "=", "data", ".", "get", "(", "\"GQ\"", ",", "[", "0", "]", ")", "[", "0", "]", "mapping_quality", "=", "vcf", ".", "info", ".", "get", "(", "\"MQ\"", ",", "[", "0", "]", ")", "[", "0", "]", "coverage", "=", "data", ".", "get", "(", "\"DP\"", ",", "0", ")", "if", "len", "(", "reference", ")", ">", "1", "or", "max", "(", "[", "len", "(", "x", ")", "for", "x", "in", "vcf", ".", "alt", "]", ")", ">", "1", ":", "# indel", "genotype", ",", "offset", "=", "translateIndelGenotypeFromVCF", "(", "genotypes", ",", "reference", ")", "return", "PileupIndel", "(", "chromosome", ",", "pos", "+", "offset", ",", "\"*\"", ",", "genotype", ",", "consensus_quality", ",", "snp_quality", ",", "mapping_quality", ",", "coverage", ",", "genotype", ",", "\"<\"", "*", "len", "(", "genotype", ")", ",", "0", ",", "0", ",", "0", ")", "else", ":", "genotype", "=", "encodeGenotype", "(", "\"\"", ".", "join", "(", "genotypes", ")", ")", "read_bases", "=", "\"\"", "base_qualities", "=", "\"\"", "return", "PileupSubstitution", "(", "chromosome", ",", "pos", ",", "reference", ",", "genotype", ",", "consensus_quality", ",", "snp_quality", ",", "mapping_quality", ",", "coverage", ",", "read_bases", ",", "base_qualities", ")" ]
convert vcf record to pileup record.
[ "convert", "vcf", "record", "to", "pileup", "record", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/core/classes.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L398-L407
def logger(self): """ Returns the logger object. :return: the logger :rtype: logger """ if self._logger is None: self._logger = self.new_logger() return self._logger
[ "def", "logger", "(", "self", ")", ":", "if", "self", ".", "_logger", "is", "None", ":", "self", ".", "_logger", "=", "self", ".", "new_logger", "(", ")", "return", "self", ".", "_logger" ]
Returns the logger object. :return: the logger :rtype: logger
[ "Returns", "the", "logger", "object", "." ]
python
train
kislyuk/aegea
aegea/packages/github3/github.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/github.py#L76-L86
def authorization(self, id_num): """Get information about authorization ``id``. :param int id_num: (required), unique id of the authorization :returns: :class:`Authorization <Authorization>` """ json = None if int(id_num) > 0: url = self._build_url('authorizations', str(id_num)) json = self._json(self._get(url), 200) return Authorization(json, self) if json else None
[ "def", "authorization", "(", "self", ",", "id_num", ")", ":", "json", "=", "None", "if", "int", "(", "id_num", ")", ">", "0", ":", "url", "=", "self", ".", "_build_url", "(", "'authorizations'", ",", "str", "(", "id_num", ")", ")", "json", "=", "self", ".", "_json", "(", "self", ".", "_get", "(", "url", ")", ",", "200", ")", "return", "Authorization", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
Get information about authorization ``id``. :param int id_num: (required), unique id of the authorization :returns: :class:`Authorization <Authorization>`
[ "Get", "information", "about", "authorization", "id", "." ]
python
train
Genida/django-meerkat
src/meerkat/utils/list.py
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/utils/list.py#L6-L18
def distinct(l): """ Return a list where the duplicates have been removed. Args: l (list): the list to filter. Returns: list: the same list without duplicates. """ seen = set() seen_add = seen.add return (_ for _ in l if not (_ in seen or seen_add(_)))
[ "def", "distinct", "(", "l", ")", ":", "seen", "=", "set", "(", ")", "seen_add", "=", "seen", ".", "add", "return", "(", "_", "for", "_", "in", "l", "if", "not", "(", "_", "in", "seen", "or", "seen_add", "(", "_", ")", ")", ")" ]
Return a list where the duplicates have been removed. Args: l (list): the list to filter. Returns: list: the same list without duplicates.
[ "Return", "a", "list", "where", "the", "duplicates", "have", "been", "removed", "." ]
python
train
chrisspen/dtree
dtree.py
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L429-L473
def get_gain(data, attr, class_attr, method=DEFAULT_DISCRETE_METRIC, only_sub=0, prefer_fewer_values=False, entropy_func=None): """ Calculates the information gain (reduction in entropy) that would result by splitting the data on the chosen attribute (attr). Parameters: prefer_fewer_values := Weights the gain by the count of the attribute's unique values. If multiple attributes have the same gain, but one has slightly fewer attributes, this will cause the one with fewer attributes to be preferred. """ entropy_func = entropy_func or entropy val_freq = defaultdict(float) subset_entropy = 0.0 # Calculate the frequency of each of the values in the target attribute for record in data: val_freq[record.get(attr)] += 1.0 # Calculate the sum of the entropy for each subset of records weighted # by their probability of occuring in the training set. for val in val_freq.keys(): val_prob = val_freq[val] / sum(val_freq.values()) data_subset = [record for record in data if record.get(attr) == val] e = entropy_func(data_subset, class_attr, method=method) subset_entropy += val_prob * e if only_sub: return subset_entropy # Subtract the entropy of the chosen attribute from the entropy of the # whole data set with respect to the target attribute (and return it) main_entropy = entropy_func(data, class_attr, method=method) # Prefer gains on attributes with fewer values. if prefer_fewer_values: # n = len(val_freq) # w = (n+1)/float(n)/2 #return (main_entropy - subset_entropy)*w return ((main_entropy - subset_entropy), 1./len(val_freq)) else: return (main_entropy - subset_entropy)
[ "def", "get_gain", "(", "data", ",", "attr", ",", "class_attr", ",", "method", "=", "DEFAULT_DISCRETE_METRIC", ",", "only_sub", "=", "0", ",", "prefer_fewer_values", "=", "False", ",", "entropy_func", "=", "None", ")", ":", "entropy_func", "=", "entropy_func", "or", "entropy", "val_freq", "=", "defaultdict", "(", "float", ")", "subset_entropy", "=", "0.0", "# Calculate the frequency of each of the values in the target attribute", "for", "record", "in", "data", ":", "val_freq", "[", "record", ".", "get", "(", "attr", ")", "]", "+=", "1.0", "# Calculate the sum of the entropy for each subset of records weighted", "# by their probability of occuring in the training set.", "for", "val", "in", "val_freq", ".", "keys", "(", ")", ":", "val_prob", "=", "val_freq", "[", "val", "]", "/", "sum", "(", "val_freq", ".", "values", "(", ")", ")", "data_subset", "=", "[", "record", "for", "record", "in", "data", "if", "record", ".", "get", "(", "attr", ")", "==", "val", "]", "e", "=", "entropy_func", "(", "data_subset", ",", "class_attr", ",", "method", "=", "method", ")", "subset_entropy", "+=", "val_prob", "*", "e", "if", "only_sub", ":", "return", "subset_entropy", "# Subtract the entropy of the chosen attribute from the entropy of the", "# whole data set with respect to the target attribute (and return it)", "main_entropy", "=", "entropy_func", "(", "data", ",", "class_attr", ",", "method", "=", "method", ")", "# Prefer gains on attributes with fewer values.", "if", "prefer_fewer_values", ":", "# n = len(val_freq)", "# w = (n+1)/float(n)/2", "#return (main_entropy - subset_entropy)*w", "return", "(", "(", "main_entropy", "-", "subset_entropy", ")", ",", "1.", "/", "len", "(", "val_freq", ")", ")", "else", ":", "return", "(", "main_entropy", "-", "subset_entropy", ")" ]
Calculates the information gain (reduction in entropy) that would result by splitting the data on the chosen attribute (attr). Parameters: prefer_fewer_values := Weights the gain by the count of the attribute's unique values. If multiple attributes have the same gain, but one has slightly fewer attributes, this will cause the one with fewer attributes to be preferred.
[ "Calculates", "the", "information", "gain", "(", "reduction", "in", "entropy", ")", "that", "would", "result", "by", "splitting", "the", "data", "on", "the", "chosen", "attribute", "(", "attr", ")", ".", "Parameters", ":", "prefer_fewer_values", ":", "=", "Weights", "the", "gain", "by", "the", "count", "of", "the", "attribute", "s", "unique", "values", ".", "If", "multiple", "attributes", "have", "the", "same", "gain", "but", "one", "has", "slightly", "fewer", "attributes", "this", "will", "cause", "the", "one", "with", "fewer", "attributes", "to", "be", "preferred", "." ]
python
train
spdx/tools-python
spdx/parsers/tagvaluebuilders.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvaluebuilders.py#L248-L262
def build_person(self, doc, entity): """Builds an organization object of of a string representation. Returns built organization. Raises SPDXValueError if failed to extract name. """ match = self.person_re.match(entity) if match and validations.validate_person_name(match.group(self.PERSON_NAME_GROUP)): name = match.group(self.PERSON_NAME_GROUP).strip() email = match.group(self.PERSON_EMAIL_GROUP) if (email is not None) and (len(email) != 0): return creationinfo.Person(name=name, email=email.strip()) else: return creationinfo.Person(name=name, email=None) else: raise SPDXValueError('Failed to extract person name')
[ "def", "build_person", "(", "self", ",", "doc", ",", "entity", ")", ":", "match", "=", "self", ".", "person_re", ".", "match", "(", "entity", ")", "if", "match", "and", "validations", ".", "validate_person_name", "(", "match", ".", "group", "(", "self", ".", "PERSON_NAME_GROUP", ")", ")", ":", "name", "=", "match", ".", "group", "(", "self", ".", "PERSON_NAME_GROUP", ")", ".", "strip", "(", ")", "email", "=", "match", ".", "group", "(", "self", ".", "PERSON_EMAIL_GROUP", ")", "if", "(", "email", "is", "not", "None", ")", "and", "(", "len", "(", "email", ")", "!=", "0", ")", ":", "return", "creationinfo", ".", "Person", "(", "name", "=", "name", ",", "email", "=", "email", ".", "strip", "(", ")", ")", "else", ":", "return", "creationinfo", ".", "Person", "(", "name", "=", "name", ",", "email", "=", "None", ")", "else", ":", "raise", "SPDXValueError", "(", "'Failed to extract person name'", ")" ]
Builds an organization object of of a string representation. Returns built organization. Raises SPDXValueError if failed to extract name.
[ "Builds", "an", "organization", "object", "of", "of", "a", "string", "representation", ".", "Returns", "built", "organization", ".", "Raises", "SPDXValueError", "if", "failed", "to", "extract", "name", "." ]
python
valid
kytos/kytos-utils
kytos/cli/commands/napps/api.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/cli/commands/napps/api.py#L284-L300
def reload(cls, args): """Reload NApps code.""" LOG.info('Reloading NApps...') mgr = NAppsManager() try: if args['all']: mgr.reload(None) else: napps = args['<napp>'] mgr.reload(napps) LOG.info('\tReloaded.') except requests.HTTPError as exception: if exception.response.status_code != 200: msg = json.loads(exception.response.content) LOG.error('\tServer error: %s - ', msg['error'])
[ "def", "reload", "(", "cls", ",", "args", ")", ":", "LOG", ".", "info", "(", "'Reloading NApps...'", ")", "mgr", "=", "NAppsManager", "(", ")", "try", ":", "if", "args", "[", "'all'", "]", ":", "mgr", ".", "reload", "(", "None", ")", "else", ":", "napps", "=", "args", "[", "'<napp>'", "]", "mgr", ".", "reload", "(", "napps", ")", "LOG", ".", "info", "(", "'\\tReloaded.'", ")", "except", "requests", ".", "HTTPError", "as", "exception", ":", "if", "exception", ".", "response", ".", "status_code", "!=", "200", ":", "msg", "=", "json", ".", "loads", "(", "exception", ".", "response", ".", "content", ")", "LOG", ".", "error", "(", "'\\tServer error: %s - '", ",", "msg", "[", "'error'", "]", ")" ]
Reload NApps code.
[ "Reload", "NApps", "code", "." ]
python
train
akissa/spamc
examples/example1.py
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/examples/example1.py#L32-L109
def runit(): """run things""" parser = OptionParser() parser.add_option('-s', '--server', help='The spamassassin spamd server to connect to', dest='server', type='str', default='standalone.home.topdog-software.com') parser.add_option('-p', '--port', help='The spamassassin spamd server port to connect to', dest='port', type='int', default=783) parser.add_option('-u', '--unix-socket', help='The spamassassin spamd unix socket to connect to', dest='socket_path', type='str') parser.add_option('-t', '--tls', help='Use TLS', dest='tls', action='store_true', default=False) parser.add_option('-z', '--use-zlib-compression', help='Use Zlib compression', dest='gzip', action='store_true', default=False) parser.add_option('-l', '--zlib-compression-level', help='Zlib compression level', dest='compress_level', type='choice', choices=[str(val) for val in range(0, 10)], default=6) parser.add_option('-a', '--user', help=('''Username of the user on whose behalf''' '''this scan is being performed'''), dest='user', type='str', default='exim') options, _ = parser.parse_args() sslopts = {} if options.tls: sslopts = dict(ssl_version=PROTOCOL_TLSv1) if options.socket_path and os.path.exists(options.socket_path): options.server = None client = SpamC( options.server, port=options.port, socket_file=options.socket_path, user=options.user, gzip=options.gzip, compress_level=int(options.compress_level), is_ssl=options.tls, **sslopts) pprint.pprint(client.ping()) path = os.path.dirname(__file__) for test in FILES: filename = os.path.join(path, test['name']) print "File => %s" % filename fileobj = open(filename) print "=" * 10, "client.check()" pprint.pprint(client.check(fileobj)) print "=" * 10, "client.symbols()" pprint.pprint(client.symbols(fileobj)) print "=" * 10, "client.report()" pprint.pprint(client.report(fileobj)) print "=" * 10, "client.report_ifspam()" pprint.pprint(client.report_ifspam(fileobj)) print "=" * 10, "client.process()" pprint.pprint(client.process(fileobj)) print "=" * 10, "client.headers()" pprint.pprint(client.headers(fileobj)) print "=" * 10, "client.learn()" pprint.pprint(client.learn(fileobj, test['type'])) print "=" * 10, "client.tell()" pprint.pprint(client.tell(fileobj, 'forget')) print "=" * 10, "client.revoke()" pprint.pprint(client.revoke(fileobj))
[ "def", "runit", "(", ")", ":", "parser", "=", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "'-s'", ",", "'--server'", ",", "help", "=", "'The spamassassin spamd server to connect to'", ",", "dest", "=", "'server'", ",", "type", "=", "'str'", ",", "default", "=", "'standalone.home.topdog-software.com'", ")", "parser", ".", "add_option", "(", "'-p'", ",", "'--port'", ",", "help", "=", "'The spamassassin spamd server port to connect to'", ",", "dest", "=", "'port'", ",", "type", "=", "'int'", ",", "default", "=", "783", ")", "parser", ".", "add_option", "(", "'-u'", ",", "'--unix-socket'", ",", "help", "=", "'The spamassassin spamd unix socket to connect to'", ",", "dest", "=", "'socket_path'", ",", "type", "=", "'str'", ")", "parser", ".", "add_option", "(", "'-t'", ",", "'--tls'", ",", "help", "=", "'Use TLS'", ",", "dest", "=", "'tls'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "parser", ".", "add_option", "(", "'-z'", ",", "'--use-zlib-compression'", ",", "help", "=", "'Use Zlib compression'", ",", "dest", "=", "'gzip'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "parser", ".", "add_option", "(", "'-l'", ",", "'--zlib-compression-level'", ",", "help", "=", "'Zlib compression level'", ",", "dest", "=", "'compress_level'", ",", "type", "=", "'choice'", ",", "choices", "=", "[", "str", "(", "val", ")", "for", "val", "in", "range", "(", "0", ",", "10", ")", "]", ",", "default", "=", "6", ")", "parser", ".", "add_option", "(", "'-a'", ",", "'--user'", ",", "help", "=", "(", "'''Username of the user on whose behalf'''", "'''this scan is being performed'''", ")", ",", "dest", "=", "'user'", ",", "type", "=", "'str'", ",", "default", "=", "'exim'", ")", "options", ",", "_", "=", "parser", ".", "parse_args", "(", ")", "sslopts", "=", "{", "}", "if", "options", ".", "tls", ":", "sslopts", "=", "dict", "(", "ssl_version", "=", "PROTOCOL_TLSv1", ")", "if", "options", ".", "socket_path", "and", "os", ".", "path", ".", "exists", "(", "options", ".", "socket_path", ")", ":", "options", ".", "server", "=", "None", "client", "=", "SpamC", "(", "options", ".", "server", ",", "port", "=", "options", ".", "port", ",", "socket_file", "=", "options", ".", "socket_path", ",", "user", "=", "options", ".", "user", ",", "gzip", "=", "options", ".", "gzip", ",", "compress_level", "=", "int", "(", "options", ".", "compress_level", ")", ",", "is_ssl", "=", "options", ".", "tls", ",", "*", "*", "sslopts", ")", "pprint", ".", "pprint", "(", "client", ".", "ping", "(", ")", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "for", "test", "in", "FILES", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "path", ",", "test", "[", "'name'", "]", ")", "print", "\"File => %s\"", "%", "filename", "fileobj", "=", "open", "(", "filename", ")", "print", "\"=\"", "*", "10", ",", "\"client.check()\"", "pprint", ".", "pprint", "(", "client", ".", "check", "(", "fileobj", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.symbols()\"", "pprint", ".", "pprint", "(", "client", ".", "symbols", "(", "fileobj", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.report()\"", "pprint", ".", "pprint", "(", "client", ".", "report", "(", "fileobj", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.report_ifspam()\"", "pprint", ".", "pprint", "(", "client", ".", "report_ifspam", "(", "fileobj", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.process()\"", "pprint", ".", "pprint", "(", "client", ".", "process", "(", "fileobj", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.headers()\"", "pprint", ".", "pprint", "(", "client", ".", "headers", "(", "fileobj", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.learn()\"", "pprint", ".", "pprint", "(", "client", ".", "learn", "(", "fileobj", ",", "test", "[", "'type'", "]", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.tell()\"", "pprint", ".", "pprint", "(", "client", ".", "tell", "(", "fileobj", ",", "'forget'", ")", ")", "print", "\"=\"", "*", "10", ",", "\"client.revoke()\"", "pprint", ".", "pprint", "(", "client", ".", "revoke", "(", "fileobj", ")", ")" ]
run things
[ "run", "things" ]
python
train
nilp0inter/cpe
cpe/cpelang2_3.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpelang2_3.py#L172-L277
def language_match(self, cpeset, cpel_dom=None): """ Accepts a set of known CPE Names and an expression in the CPE language, and delivers the answer True if the expression matches with the set. Otherwise, it returns False. :param CPELanguage self: An expression in the CPE Applicability Language, represented as the XML infoset for the platform element. :param CPESet cpeset: CPE set object to match with self expression. :param string cpel_dom: An expression in the CPE Applicability Language, represented as DOM tree. :returns: True if self expression can be satisfied by language matching against cpeset, False otherwise. :rtype: boolean """ # Root element tag TAG_ROOT = '#document' # A container for child platform definitions TAG_PLATSPEC = 'cpe:platform-specification' # Information about a platform definition TAG_PLATFORM = 'cpe:platform' TAG_LOGITEST = 'cpe:logical-test' TAG_CPE = 'cpe:fact-ref' TAG_CHECK_CPE = 'check-fact-ref' # Tag attributes ATT_NAME = 'name' ATT_OP = 'operator' ATT_NEGATE = 'negate' # Attribute values ATT_OP_AND = 'AND' ATT_OP_OR = 'OR' ATT_NEGATE_TRUE = 'TRUE' # Constant associated with an error in language matching ERROR = 2 if cpel_dom is None: cpel_dom = self.document # Identify the root element if cpel_dom.nodeName == TAG_ROOT or cpel_dom.nodeName == TAG_PLATSPEC: for node in cpel_dom.childNodes: if node.nodeName == TAG_PLATSPEC: return self.language_match(cpeset, node) if node.nodeName == TAG_PLATFORM: return self.language_match(cpeset, node) # Identify a platform element elif cpel_dom.nodeName == TAG_PLATFORM: # Parse through E's elements and ignore all but logical-test for node in cpel_dom.childNodes: if node.nodeName == TAG_LOGITEST: # Call the function again, but with logical-test # as the root element return self.language_match(cpeset, node) # Identify a CPE element elif cpel_dom.nodeName == TAG_CPE: # fact-ref's name attribute is a bound name, # so we unbind it to a WFN before passing it cpename = cpel_dom.getAttribute(ATT_NAME) wfn = CPELanguage2_3._unbind(cpename) return CPELanguage2_3._fact_ref_eval(cpeset, wfn) # Identify a check of CPE names (OVAL, OCIL...) elif cpel_dom.nodeName == TAG_CHECK_CPE: return CPELanguage2_3._check_fact_ref_Eval(cpel_dom) # Identify a logical operator element elif cpel_dom.nodeName == TAG_LOGITEST: count = 0 len = 0 answer = False for node in cpel_dom.childNodes: if node.nodeName.find("#") == 0: continue len = len + 1 result = self.language_match(cpeset, node) if result: count = count + 1 elif result == ERROR: answer = ERROR operator = cpel_dom.getAttribute(ATT_OP).upper() if operator == ATT_OP_AND: if count == len: answer = True elif operator == ATT_OP_OR: if count > 0: answer = True operator_not = cpel_dom.getAttribute(ATT_NEGATE) if operator_not: if ((operator_not.upper() == ATT_NEGATE_TRUE) and (answer != ERROR)): answer = not answer return answer else: return False
[ "def", "language_match", "(", "self", ",", "cpeset", ",", "cpel_dom", "=", "None", ")", ":", "# Root element tag", "TAG_ROOT", "=", "'#document'", "# A container for child platform definitions", "TAG_PLATSPEC", "=", "'cpe:platform-specification'", "# Information about a platform definition", "TAG_PLATFORM", "=", "'cpe:platform'", "TAG_LOGITEST", "=", "'cpe:logical-test'", "TAG_CPE", "=", "'cpe:fact-ref'", "TAG_CHECK_CPE", "=", "'check-fact-ref'", "# Tag attributes", "ATT_NAME", "=", "'name'", "ATT_OP", "=", "'operator'", "ATT_NEGATE", "=", "'negate'", "# Attribute values", "ATT_OP_AND", "=", "'AND'", "ATT_OP_OR", "=", "'OR'", "ATT_NEGATE_TRUE", "=", "'TRUE'", "# Constant associated with an error in language matching", "ERROR", "=", "2", "if", "cpel_dom", "is", "None", ":", "cpel_dom", "=", "self", ".", "document", "# Identify the root element", "if", "cpel_dom", ".", "nodeName", "==", "TAG_ROOT", "or", "cpel_dom", ".", "nodeName", "==", "TAG_PLATSPEC", ":", "for", "node", "in", "cpel_dom", ".", "childNodes", ":", "if", "node", ".", "nodeName", "==", "TAG_PLATSPEC", ":", "return", "self", ".", "language_match", "(", "cpeset", ",", "node", ")", "if", "node", ".", "nodeName", "==", "TAG_PLATFORM", ":", "return", "self", ".", "language_match", "(", "cpeset", ",", "node", ")", "# Identify a platform element", "elif", "cpel_dom", ".", "nodeName", "==", "TAG_PLATFORM", ":", "# Parse through E's elements and ignore all but logical-test", "for", "node", "in", "cpel_dom", ".", "childNodes", ":", "if", "node", ".", "nodeName", "==", "TAG_LOGITEST", ":", "# Call the function again, but with logical-test", "# as the root element", "return", "self", ".", "language_match", "(", "cpeset", ",", "node", ")", "# Identify a CPE element", "elif", "cpel_dom", ".", "nodeName", "==", "TAG_CPE", ":", "# fact-ref's name attribute is a bound name,", "# so we unbind it to a WFN before passing it", "cpename", "=", "cpel_dom", ".", "getAttribute", "(", "ATT_NAME", ")", "wfn", "=", "CPELanguage2_3", ".", "_unbind", "(", "cpename", ")", "return", "CPELanguage2_3", ".", "_fact_ref_eval", "(", "cpeset", ",", "wfn", ")", "# Identify a check of CPE names (OVAL, OCIL...)", "elif", "cpel_dom", ".", "nodeName", "==", "TAG_CHECK_CPE", ":", "return", "CPELanguage2_3", ".", "_check_fact_ref_Eval", "(", "cpel_dom", ")", "# Identify a logical operator element", "elif", "cpel_dom", ".", "nodeName", "==", "TAG_LOGITEST", ":", "count", "=", "0", "len", "=", "0", "answer", "=", "False", "for", "node", "in", "cpel_dom", ".", "childNodes", ":", "if", "node", ".", "nodeName", ".", "find", "(", "\"#\"", ")", "==", "0", ":", "continue", "len", "=", "len", "+", "1", "result", "=", "self", ".", "language_match", "(", "cpeset", ",", "node", ")", "if", "result", ":", "count", "=", "count", "+", "1", "elif", "result", "==", "ERROR", ":", "answer", "=", "ERROR", "operator", "=", "cpel_dom", ".", "getAttribute", "(", "ATT_OP", ")", ".", "upper", "(", ")", "if", "operator", "==", "ATT_OP_AND", ":", "if", "count", "==", "len", ":", "answer", "=", "True", "elif", "operator", "==", "ATT_OP_OR", ":", "if", "count", ">", "0", ":", "answer", "=", "True", "operator_not", "=", "cpel_dom", ".", "getAttribute", "(", "ATT_NEGATE", ")", "if", "operator_not", ":", "if", "(", "(", "operator_not", ".", "upper", "(", ")", "==", "ATT_NEGATE_TRUE", ")", "and", "(", "answer", "!=", "ERROR", ")", ")", ":", "answer", "=", "not", "answer", "return", "answer", "else", ":", "return", "False" ]
Accepts a set of known CPE Names and an expression in the CPE language, and delivers the answer True if the expression matches with the set. Otherwise, it returns False. :param CPELanguage self: An expression in the CPE Applicability Language, represented as the XML infoset for the platform element. :param CPESet cpeset: CPE set object to match with self expression. :param string cpel_dom: An expression in the CPE Applicability Language, represented as DOM tree. :returns: True if self expression can be satisfied by language matching against cpeset, False otherwise. :rtype: boolean
[ "Accepts", "a", "set", "of", "known", "CPE", "Names", "and", "an", "expression", "in", "the", "CPE", "language", "and", "delivers", "the", "answer", "True", "if", "the", "expression", "matches", "with", "the", "set", ".", "Otherwise", "it", "returns", "False", "." ]
python
train
omtinez/pddb
pddb/pddb.py
https://github.com/omtinez/pddb/blob/a24cee0702c8286c5c466c51ca65cf8dbc2c183c/pddb/pddb.py#L142-L147
def get_table_schema(self, tname): ''' Returns a list of column names of the provided table name ''' tname = self._check_tname(tname, noload=True) if tname not in self._schemas: raise ValueError('Table "%s" not found in schema store' % tname) return list(self._schemas[tname])
[ "def", "get_table_schema", "(", "self", ",", "tname", ")", ":", "tname", "=", "self", ".", "_check_tname", "(", "tname", ",", "noload", "=", "True", ")", "if", "tname", "not", "in", "self", ".", "_schemas", ":", "raise", "ValueError", "(", "'Table \"%s\" not found in schema store'", "%", "tname", ")", "return", "list", "(", "self", ".", "_schemas", "[", "tname", "]", ")" ]
Returns a list of column names of the provided table name
[ "Returns", "a", "list", "of", "column", "names", "of", "the", "provided", "table", "name" ]
python
train
adafruit/Adafruit_Python_CharLCD
Adafruit_CharLCD/Adafruit_CharLCD.py
https://github.com/adafruit/Adafruit_Python_CharLCD/blob/c126e6b673074c12a03f4bd36afb2fe40272341e/Adafruit_CharLCD/Adafruit_CharLCD.py#L233-L241
def autoscroll(self, autoscroll): """Autoscroll will 'right justify' text from the cursor if set True, otherwise it will 'left justify' the text. """ if autoscroll: self.displaymode |= LCD_ENTRYSHIFTINCREMENT else: self.displaymode &= ~LCD_ENTRYSHIFTINCREMENT self.write8(LCD_ENTRYMODESET | self.displaymode)
[ "def", "autoscroll", "(", "self", ",", "autoscroll", ")", ":", "if", "autoscroll", ":", "self", ".", "displaymode", "|=", "LCD_ENTRYSHIFTINCREMENT", "else", ":", "self", ".", "displaymode", "&=", "~", "LCD_ENTRYSHIFTINCREMENT", "self", ".", "write8", "(", "LCD_ENTRYMODESET", "|", "self", ".", "displaymode", ")" ]
Autoscroll will 'right justify' text from the cursor if set True, otherwise it will 'left justify' the text.
[ "Autoscroll", "will", "right", "justify", "text", "from", "the", "cursor", "if", "set", "True", "otherwise", "it", "will", "left", "justify", "the", "text", "." ]
python
train
mattjj/pylds
pylds/util.py
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L163-L196
def logdet_symm_block_tridiag(H_diag, H_upper_diag): """ compute the log determinant of a positive definite, symmetric block tridiag matrix. Use the Kalman info filter to do so. Specifically, the KF computes the normalizer: log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from this we solve for log |J|. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) log_Z_init = 0 J_21 = np.swapaxes(H_upper_diag, -1, -2) log_Z_pair = 0 J_node = H_diag h_node = np.zeros((T, D)) log_Z_node = 0 logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init, J_11, J_21, J_22, h_1, h_2, log_Z_pair, J_node, h_node, log_Z_node) # logZ = -1/2 log |J| + n/2 log 2 \pi logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi)) return logdetJ
[ "def", "logdet_symm_block_tridiag", "(", "H_diag", ",", "H_upper_diag", ")", ":", "T", ",", "D", ",", "_", "=", "H_diag", ".", "shape", "assert", "H_diag", ".", "ndim", "==", "3", "and", "H_diag", ".", "shape", "[", "2", "]", "==", "D", "assert", "H_upper_diag", ".", "shape", "==", "(", "T", "-", "1", ",", "D", ",", "D", ")", "J_init", "=", "J_11", "=", "J_22", "=", "np", ".", "zeros", "(", "(", "D", ",", "D", ")", ")", "h_init", "=", "h_1", "=", "h_2", "=", "np", ".", "zeros", "(", "(", "D", ",", ")", ")", "log_Z_init", "=", "0", "J_21", "=", "np", ".", "swapaxes", "(", "H_upper_diag", ",", "-", "1", ",", "-", "2", ")", "log_Z_pair", "=", "0", "J_node", "=", "H_diag", "h_node", "=", "np", ".", "zeros", "(", "(", "T", ",", "D", ")", ")", "log_Z_node", "=", "0", "logZ", ",", "_", ",", "_", "=", "kalman_info_filter", "(", "J_init", ",", "h_init", ",", "log_Z_init", ",", "J_11", ",", "J_21", ",", "J_22", ",", "h_1", ",", "h_2", ",", "log_Z_pair", ",", "J_node", ",", "h_node", ",", "log_Z_node", ")", "# logZ = -1/2 log |J| + n/2 log 2 \\pi", "logdetJ", "=", "-", "2", "*", "(", "logZ", "-", "(", "T", "*", "D", ")", "/", "2", "*", "np", ".", "log", "(", "2", "*", "np", ".", "pi", ")", ")", "return", "logdetJ" ]
compute the log determinant of a positive definite, symmetric block tridiag matrix. Use the Kalman info filter to do so. Specifically, the KF computes the normalizer: log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from this we solve for log |J|.
[ "compute", "the", "log", "determinant", "of", "a", "positive", "definite", "symmetric", "block", "tridiag", "matrix", ".", "Use", "the", "Kalman", "info", "filter", "to", "do", "so", ".", "Specifically", "the", "KF", "computes", "the", "normalizer", ":" ]
python
train
saulpw/visidata
visidata/canvas.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/canvas.py#L417-L426
def qcurve(self, vertexes, attr=0, row=None): 'quadratic curve from vertexes[0] to vertexes[2] with control point at vertexes[1]' assert len(vertexes) == 3, len(vertexes) x1, y1 = vertexes[0] x2, y2 = vertexes[1] x3, y3 = vertexes[2] self.point(x1, y1, attr, row) self._recursive_bezier(x1, y1, x2, y2, x3, y3, attr, row) self.point(x3, y3, attr, row)
[ "def", "qcurve", "(", "self", ",", "vertexes", ",", "attr", "=", "0", ",", "row", "=", "None", ")", ":", "assert", "len", "(", "vertexes", ")", "==", "3", ",", "len", "(", "vertexes", ")", "x1", ",", "y1", "=", "vertexes", "[", "0", "]", "x2", ",", "y2", "=", "vertexes", "[", "1", "]", "x3", ",", "y3", "=", "vertexes", "[", "2", "]", "self", ".", "point", "(", "x1", ",", "y1", ",", "attr", ",", "row", ")", "self", ".", "_recursive_bezier", "(", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "x3", ",", "y3", ",", "attr", ",", "row", ")", "self", ".", "point", "(", "x3", ",", "y3", ",", "attr", ",", "row", ")" ]
quadratic curve from vertexes[0] to vertexes[2] with control point at vertexes[1]
[ "quadratic", "curve", "from", "vertexes", "[", "0", "]", "to", "vertexes", "[", "2", "]", "with", "control", "point", "at", "vertexes", "[", "1", "]" ]
python
train
readbeyond/aeneas
aeneas/ttswrappers/basettswrapper.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/ttswrappers/basettswrapper.py#L670-L767
def _synthesize_multiple_generic(self, helper_function, text_file, output_file_path, quit_after=None, backwards=False): """ Synthesize multiple fragments, generic function. The ``helper_function`` is a function that takes parameters ``(text, voice_code, output_file_path)`` and returns a tuple ``(result, (audio_length, audio_sample_rate, audio_format, audio_samples))``. :rtype: tuple (result, (anchors, current_time, num_chars)) """ self.log(u"Calling TTS engine using multiple generic function...") # get sample rate and codec self.log(u"Determining codec and sample rate...") if (self.OUTPUT_AUDIO_FORMAT is None) or (len(self.OUTPUT_AUDIO_FORMAT) != 3): self.log(u"Determining codec and sample rate with dummy text...") succeeded, data = helper_function( text=u"Dummy text to get sample_rate", voice_code=self._language_to_voice_code(self.DEFAULT_LANGUAGE), output_file_path=None ) if not succeeded: self.log_crit(u"An unexpected error occurred in helper_function") return (False, None) du_nu, sample_rate, codec, da_nu = data self.log(u"Determining codec and sample rate with dummy text... done") else: self.log(u"Reading codec and sample rate from OUTPUT_AUDIO_FORMAT") codec, channels_nu, sample_rate = self.OUTPUT_AUDIO_FORMAT self.log(u"Determining codec and sample rate... done") self.log([u" codec: %s", codec]) self.log([u" sample rate: %d", sample_rate]) # open output file output_file = AudioFile(rconf=self.rconf, logger=self.logger) output_file.audio_format = codec output_file.audio_channels = 1 output_file.audio_sample_rate = sample_rate # create output anchors = [] current_time = TimeValue("0.000") num_chars = 0 fragments = text_file.fragments if backwards: fragments = fragments[::-1] loop_function = self._loop_use_cache if self.use_cache else self._loop_no_cache for num, fragment in enumerate(fragments): succeeded, data = loop_function( helper_function=helper_function, num=num, fragment=fragment ) if not succeeded: self.log_crit(u"An unexpected error occurred in loop_function") return (False, None) duration, sr_nu, enc_nu, samples = data # store for later output anchors.append([current_time, fragment.identifier, fragment.text]) # increase the character counter num_chars += fragment.characters # concatenate new samples self.log([u"Fragment %d starts at: %.3f", num, current_time]) if duration > 0: self.log([u"Fragment %d duration: %.3f", num, duration]) current_time += duration output_file.add_samples(samples, reverse=backwards) else: self.log([u"Fragment %d has zero duration", num]) # check if we must stop synthesizing because we have enough audio if (quit_after is not None) and (current_time > quit_after): self.log([u"Quitting after reached duration %.3f", current_time]) break # minimize memory self.log(u"Minimizing memory...") output_file.minimize_memory() self.log(u"Minimizing memory... done") # if backwards, we need to reverse the audio samples again if backwards: self.log(u"Reversing audio samples...") output_file.reverse() self.log(u"Reversing audio samples... done") # write output file self.log([u"Writing audio file '%s'", output_file_path]) output_file.write(file_path=output_file_path) # return output if backwards: self.log_warn(u"Please note that anchor time values do not make sense since backwards=True") self.log([u"Returning %d time anchors", len(anchors)]) self.log([u"Current time %.3f", current_time]) self.log([u"Synthesized %d characters", num_chars]) self.log(u"Calling TTS engine using multiple generic function... done") return (True, (anchors, current_time, num_chars))
[ "def", "_synthesize_multiple_generic", "(", "self", ",", "helper_function", ",", "text_file", ",", "output_file_path", ",", "quit_after", "=", "None", ",", "backwards", "=", "False", ")", ":", "self", ".", "log", "(", "u\"Calling TTS engine using multiple generic function...\"", ")", "# get sample rate and codec", "self", ".", "log", "(", "u\"Determining codec and sample rate...\"", ")", "if", "(", "self", ".", "OUTPUT_AUDIO_FORMAT", "is", "None", ")", "or", "(", "len", "(", "self", ".", "OUTPUT_AUDIO_FORMAT", ")", "!=", "3", ")", ":", "self", ".", "log", "(", "u\"Determining codec and sample rate with dummy text...\"", ")", "succeeded", ",", "data", "=", "helper_function", "(", "text", "=", "u\"Dummy text to get sample_rate\"", ",", "voice_code", "=", "self", ".", "_language_to_voice_code", "(", "self", ".", "DEFAULT_LANGUAGE", ")", ",", "output_file_path", "=", "None", ")", "if", "not", "succeeded", ":", "self", ".", "log_crit", "(", "u\"An unexpected error occurred in helper_function\"", ")", "return", "(", "False", ",", "None", ")", "du_nu", ",", "sample_rate", ",", "codec", ",", "da_nu", "=", "data", "self", ".", "log", "(", "u\"Determining codec and sample rate with dummy text... done\"", ")", "else", ":", "self", ".", "log", "(", "u\"Reading codec and sample rate from OUTPUT_AUDIO_FORMAT\"", ")", "codec", ",", "channels_nu", ",", "sample_rate", "=", "self", ".", "OUTPUT_AUDIO_FORMAT", "self", ".", "log", "(", "u\"Determining codec and sample rate... done\"", ")", "self", ".", "log", "(", "[", "u\" codec: %s\"", ",", "codec", "]", ")", "self", ".", "log", "(", "[", "u\" sample rate: %d\"", ",", "sample_rate", "]", ")", "# open output file", "output_file", "=", "AudioFile", "(", "rconf", "=", "self", ".", "rconf", ",", "logger", "=", "self", ".", "logger", ")", "output_file", ".", "audio_format", "=", "codec", "output_file", ".", "audio_channels", "=", "1", "output_file", ".", "audio_sample_rate", "=", "sample_rate", "# create output", "anchors", "=", "[", "]", "current_time", "=", "TimeValue", "(", "\"0.000\"", ")", "num_chars", "=", "0", "fragments", "=", "text_file", ".", "fragments", "if", "backwards", ":", "fragments", "=", "fragments", "[", ":", ":", "-", "1", "]", "loop_function", "=", "self", ".", "_loop_use_cache", "if", "self", ".", "use_cache", "else", "self", ".", "_loop_no_cache", "for", "num", ",", "fragment", "in", "enumerate", "(", "fragments", ")", ":", "succeeded", ",", "data", "=", "loop_function", "(", "helper_function", "=", "helper_function", ",", "num", "=", "num", ",", "fragment", "=", "fragment", ")", "if", "not", "succeeded", ":", "self", ".", "log_crit", "(", "u\"An unexpected error occurred in loop_function\"", ")", "return", "(", "False", ",", "None", ")", "duration", ",", "sr_nu", ",", "enc_nu", ",", "samples", "=", "data", "# store for later output", "anchors", ".", "append", "(", "[", "current_time", ",", "fragment", ".", "identifier", ",", "fragment", ".", "text", "]", ")", "# increase the character counter", "num_chars", "+=", "fragment", ".", "characters", "# concatenate new samples", "self", ".", "log", "(", "[", "u\"Fragment %d starts at: %.3f\"", ",", "num", ",", "current_time", "]", ")", "if", "duration", ">", "0", ":", "self", ".", "log", "(", "[", "u\"Fragment %d duration: %.3f\"", ",", "num", ",", "duration", "]", ")", "current_time", "+=", "duration", "output_file", ".", "add_samples", "(", "samples", ",", "reverse", "=", "backwards", ")", "else", ":", "self", ".", "log", "(", "[", "u\"Fragment %d has zero duration\"", ",", "num", "]", ")", "# check if we must stop synthesizing because we have enough audio", "if", "(", "quit_after", "is", "not", "None", ")", "and", "(", "current_time", ">", "quit_after", ")", ":", "self", ".", "log", "(", "[", "u\"Quitting after reached duration %.3f\"", ",", "current_time", "]", ")", "break", "# minimize memory", "self", ".", "log", "(", "u\"Minimizing memory...\"", ")", "output_file", ".", "minimize_memory", "(", ")", "self", ".", "log", "(", "u\"Minimizing memory... done\"", ")", "# if backwards, we need to reverse the audio samples again", "if", "backwards", ":", "self", ".", "log", "(", "u\"Reversing audio samples...\"", ")", "output_file", ".", "reverse", "(", ")", "self", ".", "log", "(", "u\"Reversing audio samples... done\"", ")", "# write output file", "self", ".", "log", "(", "[", "u\"Writing audio file '%s'\"", ",", "output_file_path", "]", ")", "output_file", ".", "write", "(", "file_path", "=", "output_file_path", ")", "# return output", "if", "backwards", ":", "self", ".", "log_warn", "(", "u\"Please note that anchor time values do not make sense since backwards=True\"", ")", "self", ".", "log", "(", "[", "u\"Returning %d time anchors\"", ",", "len", "(", "anchors", ")", "]", ")", "self", ".", "log", "(", "[", "u\"Current time %.3f\"", ",", "current_time", "]", ")", "self", ".", "log", "(", "[", "u\"Synthesized %d characters\"", ",", "num_chars", "]", ")", "self", ".", "log", "(", "u\"Calling TTS engine using multiple generic function... done\"", ")", "return", "(", "True", ",", "(", "anchors", ",", "current_time", ",", "num_chars", ")", ")" ]
Synthesize multiple fragments, generic function. The ``helper_function`` is a function that takes parameters ``(text, voice_code, output_file_path)`` and returns a tuple ``(result, (audio_length, audio_sample_rate, audio_format, audio_samples))``. :rtype: tuple (result, (anchors, current_time, num_chars))
[ "Synthesize", "multiple", "fragments", "generic", "function", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L353-L359
def select_row(self, steps): """Move selected row a number of steps. Iterates in a cyclic behaviour. """ row = (self.currentRow() + steps) % self.count() self.setCurrentRow(row)
[ "def", "select_row", "(", "self", ",", "steps", ")", ":", "row", "=", "(", "self", ".", "currentRow", "(", ")", "+", "steps", ")", "%", "self", ".", "count", "(", ")", "self", ".", "setCurrentRow", "(", "row", ")" ]
Move selected row a number of steps. Iterates in a cyclic behaviour.
[ "Move", "selected", "row", "a", "number", "of", "steps", ".", "Iterates", "in", "a", "cyclic", "behaviour", "." ]
python
train
vslutov/turingmarkov
turingmarkov/__main__.py
https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/__main__.py#L29-L35
def load_turing(argv, stdin): """Load and return turing machine.""" if len(argv) > 3: with open(argv[3]) as input_file: return build_machine(input_file.readlines()) else: return build_machine(stdin.readlines())
[ "def", "load_turing", "(", "argv", ",", "stdin", ")", ":", "if", "len", "(", "argv", ")", ">", "3", ":", "with", "open", "(", "argv", "[", "3", "]", ")", "as", "input_file", ":", "return", "build_machine", "(", "input_file", ".", "readlines", "(", ")", ")", "else", ":", "return", "build_machine", "(", "stdin", ".", "readlines", "(", ")", ")" ]
Load and return turing machine.
[ "Load", "and", "return", "turing", "machine", "." ]
python
train
pettarin/ipapy
ipapy/__init__.py
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__init__.py#L26-L70
def split_using_dictionary(string, dictionary, max_key_length, single_char_parsing=False): """ Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest string starting at the current index that is a key in the dictionary, or 2. a single character that is not a key in the dictionary. If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. :param iterable string: the iterable object ("string") to split into atoms :param dict dictionary: the dictionary mapping atoms ("characters") to something else :param int max_key_length: the length of a longest key, in number of characters :param bool single_char_parsing: if ``True``, parse one Unicode character at a time """ def substring(string, i, j): if isinstance(string[i], tuple): # transform list of tuples with one element in a tuple with all elements return tuple([string[k][0] for k in range(i, j)]) # just return substring return string[i:j] if string is None: return None if (single_char_parsing) or (max_key_length < 2): return [c for c in string] acc = [] l = len(string) i = 0 while i < l: found = False for j in range(min(i + max_key_length, l), i, -1): sub = substring(string, i, j) if sub in dictionary: found = True acc.append(sub) i = j break if not found: acc.append(string[i]) i += 1 return acc
[ "def", "split_using_dictionary", "(", "string", ",", "dictionary", ",", "max_key_length", ",", "single_char_parsing", "=", "False", ")", ":", "def", "substring", "(", "string", ",", "i", ",", "j", ")", ":", "if", "isinstance", "(", "string", "[", "i", "]", ",", "tuple", ")", ":", "# transform list of tuples with one element in a tuple with all elements", "return", "tuple", "(", "[", "string", "[", "k", "]", "[", "0", "]", "for", "k", "in", "range", "(", "i", ",", "j", ")", "]", ")", "# just return substring", "return", "string", "[", "i", ":", "j", "]", "if", "string", "is", "None", ":", "return", "None", "if", "(", "single_char_parsing", ")", "or", "(", "max_key_length", "<", "2", ")", ":", "return", "[", "c", "for", "c", "in", "string", "]", "acc", "=", "[", "]", "l", "=", "len", "(", "string", ")", "i", "=", "0", "while", "i", "<", "l", ":", "found", "=", "False", "for", "j", "in", "range", "(", "min", "(", "i", "+", "max_key_length", ",", "l", ")", ",", "i", ",", "-", "1", ")", ":", "sub", "=", "substring", "(", "string", ",", "i", ",", "j", ")", "if", "sub", "in", "dictionary", ":", "found", "=", "True", "acc", ".", "append", "(", "sub", ")", "i", "=", "j", "break", "if", "not", "found", ":", "acc", ".", "append", "(", "string", "[", "i", "]", ")", "i", "+=", "1", "return", "acc" ]
Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest string starting at the current index that is a key in the dictionary, or 2. a single character that is not a key in the dictionary. If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. :param iterable string: the iterable object ("string") to split into atoms :param dict dictionary: the dictionary mapping atoms ("characters") to something else :param int max_key_length: the length of a longest key, in number of characters :param bool single_char_parsing: if ``True``, parse one Unicode character at a time
[ "Return", "a", "list", "of", "(", "non", "-", "empty", ")", "substrings", "of", "the", "given", "string", "where", "each", "substring", "is", "either", ":", "1", ".", "the", "longest", "string", "starting", "at", "the", "current", "index", "that", "is", "a", "key", "in", "the", "dictionary", "or", "2", ".", "a", "single", "character", "that", "is", "not", "a", "key", "in", "the", "dictionary", "." ]
python
train
ModisWorks/modis
modis/discord_modis/modules/music/api_music.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/music/api_music.py#L48-L64
def build_sc_api(): """Build the SoundCloud API for future use""" data = datatools.get_data() if "soundcloud_client_id" not in data["discord"]["keys"]: logger.warning("No API key found with name 'soundcloud_client_id'") logger.info("Please add your SoundCloud client id with name 'soundcloud_client_id' " "in data.json to use Soundcloud features of the music module") return False try: global scclient scclient = soundcloud.Client(client_id=data["discord"]["keys"]["soundcloud_client_id"]) logger.debug("SoundCloud build successful") return True except Exception as e: logger.exception(e) return False
[ "def", "build_sc_api", "(", ")", ":", "data", "=", "datatools", ".", "get_data", "(", ")", "if", "\"soundcloud_client_id\"", "not", "in", "data", "[", "\"discord\"", "]", "[", "\"keys\"", "]", ":", "logger", ".", "warning", "(", "\"No API key found with name 'soundcloud_client_id'\"", ")", "logger", ".", "info", "(", "\"Please add your SoundCloud client id with name 'soundcloud_client_id' \"", "\"in data.json to use Soundcloud features of the music module\"", ")", "return", "False", "try", ":", "global", "scclient", "scclient", "=", "soundcloud", ".", "Client", "(", "client_id", "=", "data", "[", "\"discord\"", "]", "[", "\"keys\"", "]", "[", "\"soundcloud_client_id\"", "]", ")", "logger", ".", "debug", "(", "\"SoundCloud build successful\"", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "False" ]
Build the SoundCloud API for future use
[ "Build", "the", "SoundCloud", "API", "for", "future", "use" ]
python
train
foutaise/texttable
texttable.py
https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L686-L714
def _splitit(self, line, isheader): """Split each element of line to fit the column width Each element is turned into a list, result of the wrapping of the string to the desired width """ line_wrapped = [] for cell, width in zip(line, self._width): array = [] for c in cell.split('\n'): if c.strip() == "": array.append("") else: array.extend(textwrapper(c, width)) line_wrapped.append(array) max_cell_lines = reduce(max, list(map(len, line_wrapped))) for cell, valign in zip(line_wrapped, self._valign): if isheader: valign = "t" if valign == "m": missing = max_cell_lines - len(cell) cell[:0] = [""] * int(missing / 2) cell.extend([""] * int(missing / 2 + missing % 2)) elif valign == "b": cell[:0] = [""] * (max_cell_lines - len(cell)) else: cell.extend([""] * (max_cell_lines - len(cell))) return line_wrapped
[ "def", "_splitit", "(", "self", ",", "line", ",", "isheader", ")", ":", "line_wrapped", "=", "[", "]", "for", "cell", ",", "width", "in", "zip", "(", "line", ",", "self", ".", "_width", ")", ":", "array", "=", "[", "]", "for", "c", "in", "cell", ".", "split", "(", "'\\n'", ")", ":", "if", "c", ".", "strip", "(", ")", "==", "\"\"", ":", "array", ".", "append", "(", "\"\"", ")", "else", ":", "array", ".", "extend", "(", "textwrapper", "(", "c", ",", "width", ")", ")", "line_wrapped", ".", "append", "(", "array", ")", "max_cell_lines", "=", "reduce", "(", "max", ",", "list", "(", "map", "(", "len", ",", "line_wrapped", ")", ")", ")", "for", "cell", ",", "valign", "in", "zip", "(", "line_wrapped", ",", "self", ".", "_valign", ")", ":", "if", "isheader", ":", "valign", "=", "\"t\"", "if", "valign", "==", "\"m\"", ":", "missing", "=", "max_cell_lines", "-", "len", "(", "cell", ")", "cell", "[", ":", "0", "]", "=", "[", "\"\"", "]", "*", "int", "(", "missing", "/", "2", ")", "cell", ".", "extend", "(", "[", "\"\"", "]", "*", "int", "(", "missing", "/", "2", "+", "missing", "%", "2", ")", ")", "elif", "valign", "==", "\"b\"", ":", "cell", "[", ":", "0", "]", "=", "[", "\"\"", "]", "*", "(", "max_cell_lines", "-", "len", "(", "cell", ")", ")", "else", ":", "cell", ".", "extend", "(", "[", "\"\"", "]", "*", "(", "max_cell_lines", "-", "len", "(", "cell", ")", ")", ")", "return", "line_wrapped" ]
Split each element of line to fit the column width Each element is turned into a list, result of the wrapping of the string to the desired width
[ "Split", "each", "element", "of", "line", "to", "fit", "the", "column", "width" ]
python
train
PredixDev/predixpy
predix/admin/cf/services.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/cf/services.py#L134-L142
def get_service_plan_for_service(self, service_name): """ Return the service plans available for a given service. """ services = self.get_services() for service in services['resources']: if service['entity']['label'] == service_name: response = self.api.get(service['entity']['service_plans_url']) return response['resources']
[ "def", "get_service_plan_for_service", "(", "self", ",", "service_name", ")", ":", "services", "=", "self", ".", "get_services", "(", ")", "for", "service", "in", "services", "[", "'resources'", "]", ":", "if", "service", "[", "'entity'", "]", "[", "'label'", "]", "==", "service_name", ":", "response", "=", "self", ".", "api", ".", "get", "(", "service", "[", "'entity'", "]", "[", "'service_plans_url'", "]", ")", "return", "response", "[", "'resources'", "]" ]
Return the service plans available for a given service.
[ "Return", "the", "service", "plans", "available", "for", "a", "given", "service", "." ]
python
train
tcalmant/ipopo
pelix/threadpool.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/threadpool.py#L402-L429
def enqueue(self, method, *args, **kwargs): """ Queues a task in the pool :param method: Method to call :return: A FutureResult object, to get the result of the task :raise ValueError: Invalid method :raise Full: The task queue is full """ if not hasattr(method, "__call__"): raise ValueError( "{0} has no __call__ member.".format(method.__name__) ) # Prepare the future result object future = FutureResult(self._logger) # Use a lock, as we might be "resetting" the queue with self.__lock: # Add the task to the queue self._queue.put((method, args, kwargs, future), True, self._timeout) self.__nb_pending_task += 1 if self.__nb_pending_task > self.__nb_threads: # All threads are taken: start a new one self.__start_thread() return future
[ "def", "enqueue", "(", "self", ",", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "method", ",", "\"__call__\"", ")", ":", "raise", "ValueError", "(", "\"{0} has no __call__ member.\"", ".", "format", "(", "method", ".", "__name__", ")", ")", "# Prepare the future result object", "future", "=", "FutureResult", "(", "self", ".", "_logger", ")", "# Use a lock, as we might be \"resetting\" the queue", "with", "self", ".", "__lock", ":", "# Add the task to the queue", "self", ".", "_queue", ".", "put", "(", "(", "method", ",", "args", ",", "kwargs", ",", "future", ")", ",", "True", ",", "self", ".", "_timeout", ")", "self", ".", "__nb_pending_task", "+=", "1", "if", "self", ".", "__nb_pending_task", ">", "self", ".", "__nb_threads", ":", "# All threads are taken: start a new one", "self", ".", "__start_thread", "(", ")", "return", "future" ]
Queues a task in the pool :param method: Method to call :return: A FutureResult object, to get the result of the task :raise ValueError: Invalid method :raise Full: The task queue is full
[ "Queues", "a", "task", "in", "the", "pool" ]
python
train
saltstack/salt
salt/states/host.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/host.py#L206-L254
def only(name, hostnames): ''' Ensure that only the given hostnames are associated with the given IP address. .. versionadded:: 2016.3.0 name The IP address to associate with the given hostnames. hostnames Either a single hostname or a list of hostnames to associate with the given IP address in the given order. Any other hostname associated with the IP address is removed. If no hostnames are specified, all hostnames associated with the given IP address are removed. ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if isinstance(hostnames, six.string_types): hostnames = [hostnames] old = ' '.join(__salt__['hosts.get_alias'](name)) new = ' '.join((x.strip() for x in hostnames)) if old == new: ret['comment'] = 'IP address {0} already set to "{1}"'.format( name, new) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'Would change {0} from "{1}" to "{2}"'.format( name, old, new) return ret ret['result'] = __salt__['hosts.set_host'](name, new) if not ret['result']: ret['comment'] = ('hosts.set_host failed to change {0}' + ' from "{1}" to "{2}"').format(name, old, new) return ret ret['comment'] = 'successfully changed {0} from "{1}" to "{2}"'.format( name, old, new) ret['changes'] = {name: {'old': old, 'new': new}} return ret
[ "def", "only", "(", "name", ",", "hostnames", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "isinstance", "(", "hostnames", ",", "six", ".", "string_types", ")", ":", "hostnames", "=", "[", "hostnames", "]", "old", "=", "' '", ".", "join", "(", "__salt__", "[", "'hosts.get_alias'", "]", "(", "name", ")", ")", "new", "=", "' '", ".", "join", "(", "(", "x", ".", "strip", "(", ")", "for", "x", "in", "hostnames", ")", ")", "if", "old", "==", "new", ":", "ret", "[", "'comment'", "]", "=", "'IP address {0} already set to \"{1}\"'", ".", "format", "(", "name", ",", "new", ")", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Would change {0} from \"{1}\" to \"{2}\"'", ".", "format", "(", "name", ",", "old", ",", "new", ")", "return", "ret", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'hosts.set_host'", "]", "(", "name", ",", "new", ")", "if", "not", "ret", "[", "'result'", "]", ":", "ret", "[", "'comment'", "]", "=", "(", "'hosts.set_host failed to change {0}'", "+", "' from \"{1}\" to \"{2}\"'", ")", ".", "format", "(", "name", ",", "old", ",", "new", ")", "return", "ret", "ret", "[", "'comment'", "]", "=", "'successfully changed {0} from \"{1}\" to \"{2}\"'", ".", "format", "(", "name", ",", "old", ",", "new", ")", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "{", "'old'", ":", "old", ",", "'new'", ":", "new", "}", "}", "return", "ret" ]
Ensure that only the given hostnames are associated with the given IP address. .. versionadded:: 2016.3.0 name The IP address to associate with the given hostnames. hostnames Either a single hostname or a list of hostnames to associate with the given IP address in the given order. Any other hostname associated with the IP address is removed. If no hostnames are specified, all hostnames associated with the given IP address are removed.
[ "Ensure", "that", "only", "the", "given", "hostnames", "are", "associated", "with", "the", "given", "IP", "address", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAARP/QAAccount.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L558-L577
def start_date(self): """账户的起始交易日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description] """ if self.start_==None: if len(self.time_index_max) > 0: return str(min(self.time_index_max))[0:10] else: print( RuntimeWarning( 'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE' ) ) else: return self.start_
[ "def", "start_date", "(", "self", ")", ":", "if", "self", ".", "start_", "==", "None", ":", "if", "len", "(", "self", ".", "time_index_max", ")", ">", "0", ":", "return", "str", "(", "min", "(", "self", ".", "time_index_max", ")", ")", "[", "0", ":", "10", "]", "else", ":", "print", "(", "RuntimeWarning", "(", "'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'", ")", ")", "else", ":", "return", "self", ".", "start_" ]
账户的起始交易日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description]
[ "账户的起始交易日期", "(", "只在回测中使用", ")" ]
python
train
saltstack/salt
salt/grains/core.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/core.py#L465-L485
def _osx_memdata(): ''' Return the memory information for BSD-like systems ''' grains = {'mem_total': 0, 'swap_total': 0} sysctl = salt.utils.path.which('sysctl') if sysctl: mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl)) swap_total = __salt__['cmd.run']('{0} -n vm.swapusage'.format(sysctl)).split()[2].replace(',', '.') if swap_total.endswith('K'): _power = 2**10 elif swap_total.endswith('M'): _power = 2**20 elif swap_total.endswith('G'): _power = 2**30 swap_total = float(swap_total[:-1]) * _power grains['mem_total'] = int(mem) // 1024 // 1024 grains['swap_total'] = int(swap_total) // 1024 // 1024 return grains
[ "def", "_osx_memdata", "(", ")", ":", "grains", "=", "{", "'mem_total'", ":", "0", ",", "'swap_total'", ":", "0", "}", "sysctl", "=", "salt", ".", "utils", ".", "path", ".", "which", "(", "'sysctl'", ")", "if", "sysctl", ":", "mem", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'{0} -n hw.memsize'", ".", "format", "(", "sysctl", ")", ")", "swap_total", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'{0} -n vm.swapusage'", ".", "format", "(", "sysctl", ")", ")", ".", "split", "(", ")", "[", "2", "]", ".", "replace", "(", "','", ",", "'.'", ")", "if", "swap_total", ".", "endswith", "(", "'K'", ")", ":", "_power", "=", "2", "**", "10", "elif", "swap_total", ".", "endswith", "(", "'M'", ")", ":", "_power", "=", "2", "**", "20", "elif", "swap_total", ".", "endswith", "(", "'G'", ")", ":", "_power", "=", "2", "**", "30", "swap_total", "=", "float", "(", "swap_total", "[", ":", "-", "1", "]", ")", "*", "_power", "grains", "[", "'mem_total'", "]", "=", "int", "(", "mem", ")", "//", "1024", "//", "1024", "grains", "[", "'swap_total'", "]", "=", "int", "(", "swap_total", ")", "//", "1024", "//", "1024", "return", "grains" ]
Return the memory information for BSD-like systems
[ "Return", "the", "memory", "information", "for", "BSD", "-", "like", "systems" ]
python
train
openstack/monasca-common
monasca_common/kafka_lib/partitioner/hashed.py
https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/partitioner/hashed.py#L54-L122
def murmur2(key): """Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 Args: key: if not a bytes type, encoded using default encoding Returns: MurmurHash2 of key bytearray """ # Convert key to bytes or bytearray if isinstance(key, bytearray) or (six.PY3 and isinstance(key, bytes)): data = key else: data = bytearray(str(key).encode()) length = len(data) seed = 0x9747b28c # 'm' and 'r' are mixing constants generated offline. # They're not really 'magic', they just happen to work well. m = 0x5bd1e995 r = 24 # Initialize the hash to a random value h = seed ^ length length4 = length // 4 for i in range(length4): i4 = i * 4 k = ((data[i4 + 0] & 0xff) + ((data[i4 + 1] & 0xff) << 8) + ((data[i4 + 2] & 0xff) << 16) + ((data[i4 + 3] & 0xff) << 24)) k &= 0xffffffff k *= m k &= 0xffffffff k ^= (k % 0x100000000) >> r # k ^= k >>> r k &= 0xffffffff k *= m k &= 0xffffffff h *= m h &= 0xffffffff h ^= k h &= 0xffffffff # Handle the last few bytes of the input array extra_bytes = length % 4 if extra_bytes >= 3: h ^= (data[(length & ~3) + 2] & 0xff) << 16 h &= 0xffffffff if extra_bytes >= 2: h ^= (data[(length & ~3) + 1] & 0xff) << 8 h &= 0xffffffff if extra_bytes >= 1: h ^= (data[length & ~3] & 0xff) h &= 0xffffffff h *= m h &= 0xffffffff h ^= (h % 0x100000000) >> 13 # h >>> 13; h &= 0xffffffff h *= m h &= 0xffffffff h ^= (h % 0x100000000) >> 15 # h >>> 15; h &= 0xffffffff return h
[ "def", "murmur2", "(", "key", ")", ":", "# Convert key to bytes or bytearray", "if", "isinstance", "(", "key", ",", "bytearray", ")", "or", "(", "six", ".", "PY3", "and", "isinstance", "(", "key", ",", "bytes", ")", ")", ":", "data", "=", "key", "else", ":", "data", "=", "bytearray", "(", "str", "(", "key", ")", ".", "encode", "(", ")", ")", "length", "=", "len", "(", "data", ")", "seed", "=", "0x9747b28c", "# 'm' and 'r' are mixing constants generated offline.", "# They're not really 'magic', they just happen to work well.", "m", "=", "0x5bd1e995", "r", "=", "24", "# Initialize the hash to a random value", "h", "=", "seed", "^", "length", "length4", "=", "length", "//", "4", "for", "i", "in", "range", "(", "length4", ")", ":", "i4", "=", "i", "*", "4", "k", "=", "(", "(", "data", "[", "i4", "+", "0", "]", "&", "0xff", ")", "+", "(", "(", "data", "[", "i4", "+", "1", "]", "&", "0xff", ")", "<<", "8", ")", "+", "(", "(", "data", "[", "i4", "+", "2", "]", "&", "0xff", ")", "<<", "16", ")", "+", "(", "(", "data", "[", "i4", "+", "3", "]", "&", "0xff", ")", "<<", "24", ")", ")", "k", "&=", "0xffffffff", "k", "*=", "m", "k", "&=", "0xffffffff", "k", "^=", "(", "k", "%", "0x100000000", ")", ">>", "r", "# k ^= k >>> r", "k", "&=", "0xffffffff", "k", "*=", "m", "k", "&=", "0xffffffff", "h", "*=", "m", "h", "&=", "0xffffffff", "h", "^=", "k", "h", "&=", "0xffffffff", "# Handle the last few bytes of the input array", "extra_bytes", "=", "length", "%", "4", "if", "extra_bytes", ">=", "3", ":", "h", "^=", "(", "data", "[", "(", "length", "&", "~", "3", ")", "+", "2", "]", "&", "0xff", ")", "<<", "16", "h", "&=", "0xffffffff", "if", "extra_bytes", ">=", "2", ":", "h", "^=", "(", "data", "[", "(", "length", "&", "~", "3", ")", "+", "1", "]", "&", "0xff", ")", "<<", "8", "h", "&=", "0xffffffff", "if", "extra_bytes", ">=", "1", ":", "h", "^=", "(", "data", "[", "length", "&", "~", "3", "]", "&", "0xff", ")", "h", "&=", "0xffffffff", "h", "*=", "m", "h", "&=", "0xffffffff", "h", "^=", "(", "h", "%", "0x100000000", ")", ">>", "13", "# h >>> 13;", "h", "&=", "0xffffffff", "h", "*=", "m", "h", "&=", "0xffffffff", "h", "^=", "(", "h", "%", "0x100000000", ")", ">>", "15", "# h >>> 15;", "h", "&=", "0xffffffff", "return", "h" ]
Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 Args: key: if not a bytes type, encoded using default encoding Returns: MurmurHash2 of key bytearray
[ "Pure", "-", "python", "Murmur2", "implementation", "." ]
python
train
crytic/slither
examples/scripts/possible_paths.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/examples/scripts/possible_paths.py#L65-L73
def all_function_definitions(function): """ Obtains a list of representing this function and any base definitions :param function: The function to obtain all definitions at and beneath. :return: Returns a list composed of the provided function definition and any base definitions. """ return [function] + [f for c in function.contract.inheritance for f in c.functions_and_modifiers_not_inherited if f.full_name == function.full_name]
[ "def", "all_function_definitions", "(", "function", ")", ":", "return", "[", "function", "]", "+", "[", "f", "for", "c", "in", "function", ".", "contract", ".", "inheritance", "for", "f", "in", "c", ".", "functions_and_modifiers_not_inherited", "if", "f", ".", "full_name", "==", "function", ".", "full_name", "]" ]
Obtains a list of representing this function and any base definitions :param function: The function to obtain all definitions at and beneath. :return: Returns a list composed of the provided function definition and any base definitions.
[ "Obtains", "a", "list", "of", "representing", "this", "function", "and", "any", "base", "definitions", ":", "param", "function", ":", "The", "function", "to", "obtain", "all", "definitions", "at", "and", "beneath", ".", ":", "return", ":", "Returns", "a", "list", "composed", "of", "the", "provided", "function", "definition", "and", "any", "base", "definitions", "." ]
python
train
yyuu/botornado
boto/dynamodb/layer2.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/layer2.py#L75-L89
def dynamize_attribute_updates(self, pending_updates): """ Convert a set of pending item updates into the structure required by Layer1. """ d = {} for attr_name in pending_updates: action, value = pending_updates[attr_name] if value is None: # DELETE without an attribute value d[attr_name] = {"Action": action} else: d[attr_name] = {"Action": action, "Value": self.dynamize_value(value)} return d
[ "def", "dynamize_attribute_updates", "(", "self", ",", "pending_updates", ")", ":", "d", "=", "{", "}", "for", "attr_name", "in", "pending_updates", ":", "action", ",", "value", "=", "pending_updates", "[", "attr_name", "]", "if", "value", "is", "None", ":", "# DELETE without an attribute value", "d", "[", "attr_name", "]", "=", "{", "\"Action\"", ":", "action", "}", "else", ":", "d", "[", "attr_name", "]", "=", "{", "\"Action\"", ":", "action", ",", "\"Value\"", ":", "self", ".", "dynamize_value", "(", "value", ")", "}", "return", "d" ]
Convert a set of pending item updates into the structure required by Layer1.
[ "Convert", "a", "set", "of", "pending", "item", "updates", "into", "the", "structure", "required", "by", "Layer1", "." ]
python
train
cs50/lib50
lib50/_api.py
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L700-L710
def _prompt_username(prompt="Username: ", prefill=None): """Prompt the user for username.""" if prefill: readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt).strip() except EOFError: print() finally: readline.set_startup_hook()
[ "def", "_prompt_username", "(", "prompt", "=", "\"Username: \"", ",", "prefill", "=", "None", ")", ":", "if", "prefill", ":", "readline", ".", "set_startup_hook", "(", "lambda", ":", "readline", ".", "insert_text", "(", "prefill", ")", ")", "try", ":", "return", "input", "(", "prompt", ")", ".", "strip", "(", ")", "except", "EOFError", ":", "print", "(", ")", "finally", ":", "readline", ".", "set_startup_hook", "(", ")" ]
Prompt the user for username.
[ "Prompt", "the", "user", "for", "username", "." ]
python
train
sloria/sphinx-issues
sphinx_issues.py
https://github.com/sloria/sphinx-issues/blob/0a9597472645dc728c2aef12e0653aabfdb68ab2/sphinx_issues.py#L13-L43
def user_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Sphinx role for linking to a user profile. Defaults to linking to Github profiles, but the profile URIS can be configured via the ``issues_user_uri`` config value. Examples: :: :user:`sloria` Anchor text also works: :: :user:`Steven Loria <sloria>` """ options = options or {} content = content or [] has_explicit_title, title, target = split_explicit_title(text) target = utils.unescape(target).strip() title = utils.unescape(title).strip() config = inliner.document.settings.env.app.config if config.issues_user_uri: ref = config.issues_user_uri.format(user=target) else: ref = "https://github.com/{0}".format(target) if has_explicit_title: text = title else: text = "@{0}".format(target) link = nodes.reference(text=text, refuri=ref, **options) return [link], []
[ "def", "user_role", "(", "name", ",", "rawtext", ",", "text", ",", "lineno", ",", "inliner", ",", "options", "=", "None", ",", "content", "=", "None", ")", ":", "options", "=", "options", "or", "{", "}", "content", "=", "content", "or", "[", "]", "has_explicit_title", ",", "title", ",", "target", "=", "split_explicit_title", "(", "text", ")", "target", "=", "utils", ".", "unescape", "(", "target", ")", ".", "strip", "(", ")", "title", "=", "utils", ".", "unescape", "(", "title", ")", ".", "strip", "(", ")", "config", "=", "inliner", ".", "document", ".", "settings", ".", "env", ".", "app", ".", "config", "if", "config", ".", "issues_user_uri", ":", "ref", "=", "config", ".", "issues_user_uri", ".", "format", "(", "user", "=", "target", ")", "else", ":", "ref", "=", "\"https://github.com/{0}\"", ".", "format", "(", "target", ")", "if", "has_explicit_title", ":", "text", "=", "title", "else", ":", "text", "=", "\"@{0}\"", ".", "format", "(", "target", ")", "link", "=", "nodes", ".", "reference", "(", "text", "=", "text", ",", "refuri", "=", "ref", ",", "*", "*", "options", ")", "return", "[", "link", "]", ",", "[", "]" ]
Sphinx role for linking to a user profile. Defaults to linking to Github profiles, but the profile URIS can be configured via the ``issues_user_uri`` config value. Examples: :: :user:`sloria` Anchor text also works: :: :user:`Steven Loria <sloria>`
[ "Sphinx", "role", "for", "linking", "to", "a", "user", "profile", ".", "Defaults", "to", "linking", "to", "Github", "profiles", "but", "the", "profile", "URIS", "can", "be", "configured", "via", "the", "issues_user_uri", "config", "value", "." ]
python
train
ray-project/ray
python/ray/tune/automl/genetic_searcher.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automl/genetic_searcher.py#L181-L220
def _crossover(candidate): """Perform crossover action to candidates. For example, new gene = 60% sample_1 + 40% sample_2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _crossover([gene1, gene2]) >>> # new_gene could be the first [n=1] parameters of >>> # gene1 + the rest of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[1] Returns: New gene (encoding) """ sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7)) logger.info( LOGGING_PREFIX + "Perform crossover between %sth and %sth at index=%s", sample_index1, sample_index2, cross_index) next_gen = [] for i in range(len(sample_1)): if i > cross_index: next_gen.append(sample_2[i]) else: next_gen.append(sample_1[i]) return next_gen
[ "def", "_crossover", "(", "candidate", ")", ":", "sample_index1", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "candidate", ")", ")", "sample_index2", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "candidate", ")", ")", "sample_1", "=", "candidate", "[", "sample_index1", "]", "sample_2", "=", "candidate", "[", "sample_index2", "]", "cross_index", "=", "int", "(", "len", "(", "sample_1", ")", "*", "np", ".", "random", ".", "uniform", "(", "low", "=", "0.3", ",", "high", "=", "0.7", ")", ")", "logger", ".", "info", "(", "LOGGING_PREFIX", "+", "\"Perform crossover between %sth and %sth at index=%s\"", ",", "sample_index1", ",", "sample_index2", ",", "cross_index", ")", "next_gen", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "sample_1", ")", ")", ":", "if", "i", ">", "cross_index", ":", "next_gen", ".", "append", "(", "sample_2", "[", "i", "]", ")", "else", ":", "next_gen", ".", "append", "(", "sample_1", "[", "i", "]", ")", "return", "next_gen" ]
Perform crossover action to candidates. For example, new gene = 60% sample_1 + 40% sample_2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _crossover([gene1, gene2]) >>> # new_gene could be the first [n=1] parameters of >>> # gene1 + the rest of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[1] Returns: New gene (encoding)
[ "Perform", "crossover", "action", "to", "candidates", "." ]
python
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/gui/qt_b26_gui.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/qt_b26_gui.py#L413-L452
def plot_clicked(self, mouse_event): """ gets activated when the user clicks on a plot Args: mouse_event: """ if isinstance(self.current_script, SelectPoints) and self.current_script.is_running: if (not (mouse_event.xdata == None)): if (mouse_event.button == 1): pt = np.array([mouse_event.xdata, mouse_event.ydata]) self.current_script.toggle_NV(pt) self.current_script.plot([self.matplotlibwidget_1.figure]) self.matplotlibwidget_1.draw() item = self.tree_scripts.currentItem() if item is not None: if item.is_point(): item_x = item.child(1) if mouse_event.xdata is not None: self.tree_scripts.setCurrentItem(item_x) item_x.value = float(mouse_event.xdata) item_x.setText(1, '{:0.3f}'.format(float(mouse_event.xdata))) item_y = item.child(0) if mouse_event.ydata is not None: self.tree_scripts.setCurrentItem(item_y) item_y.value = float(mouse_event.ydata) item_y.setText(1, '{:0.3f}'.format(float(mouse_event.ydata))) # focus back on item self.tree_scripts.setCurrentItem(item) else: if item.parent() is not None: if item.parent().is_point(): if item == item.parent().child(1): if mouse_event.xdata is not None: item.setData(1, 2, float(mouse_event.xdata)) if item == item.parent().child(0): if mouse_event.ydata is not None: item.setData(1, 2, float(mouse_event.ydata))
[ "def", "plot_clicked", "(", "self", ",", "mouse_event", ")", ":", "if", "isinstance", "(", "self", ".", "current_script", ",", "SelectPoints", ")", "and", "self", ".", "current_script", ".", "is_running", ":", "if", "(", "not", "(", "mouse_event", ".", "xdata", "==", "None", ")", ")", ":", "if", "(", "mouse_event", ".", "button", "==", "1", ")", ":", "pt", "=", "np", ".", "array", "(", "[", "mouse_event", ".", "xdata", ",", "mouse_event", ".", "ydata", "]", ")", "self", ".", "current_script", ".", "toggle_NV", "(", "pt", ")", "self", ".", "current_script", ".", "plot", "(", "[", "self", ".", "matplotlibwidget_1", ".", "figure", "]", ")", "self", ".", "matplotlibwidget_1", ".", "draw", "(", ")", "item", "=", "self", ".", "tree_scripts", ".", "currentItem", "(", ")", "if", "item", "is", "not", "None", ":", "if", "item", ".", "is_point", "(", ")", ":", "item_x", "=", "item", ".", "child", "(", "1", ")", "if", "mouse_event", ".", "xdata", "is", "not", "None", ":", "self", ".", "tree_scripts", ".", "setCurrentItem", "(", "item_x", ")", "item_x", ".", "value", "=", "float", "(", "mouse_event", ".", "xdata", ")", "item_x", ".", "setText", "(", "1", ",", "'{:0.3f}'", ".", "format", "(", "float", "(", "mouse_event", ".", "xdata", ")", ")", ")", "item_y", "=", "item", ".", "child", "(", "0", ")", "if", "mouse_event", ".", "ydata", "is", "not", "None", ":", "self", ".", "tree_scripts", ".", "setCurrentItem", "(", "item_y", ")", "item_y", ".", "value", "=", "float", "(", "mouse_event", ".", "ydata", ")", "item_y", ".", "setText", "(", "1", ",", "'{:0.3f}'", ".", "format", "(", "float", "(", "mouse_event", ".", "ydata", ")", ")", ")", "# focus back on item", "self", ".", "tree_scripts", ".", "setCurrentItem", "(", "item", ")", "else", ":", "if", "item", ".", "parent", "(", ")", "is", "not", "None", ":", "if", "item", ".", "parent", "(", ")", ".", "is_point", "(", ")", ":", "if", "item", "==", "item", ".", "parent", "(", ")", ".", "child", "(", "1", ")", ":", "if", "mouse_event", ".", "xdata", "is", "not", "None", ":", "item", ".", "setData", "(", "1", ",", "2", ",", "float", "(", "mouse_event", ".", "xdata", ")", ")", "if", "item", "==", "item", ".", "parent", "(", ")", ".", "child", "(", "0", ")", ":", "if", "mouse_event", ".", "ydata", "is", "not", "None", ":", "item", ".", "setData", "(", "1", ",", "2", ",", "float", "(", "mouse_event", ".", "ydata", ")", ")" ]
gets activated when the user clicks on a plot Args: mouse_event:
[ "gets", "activated", "when", "the", "user", "clicks", "on", "a", "plot", "Args", ":", "mouse_event", ":" ]
python
train
deepmind/sonnet
sonnet/python/modules/relational_memory.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/relational_memory.py#L120-L161
def _multihead_attention(self, memory): """Perform multi-head attention from 'Attention is All You Need'. Implementation of the attention mechanism from https://arxiv.org/abs/1706.03762. Args: memory: Memory tensor to perform attention on. Returns: new_memory: New memory tensor. """ key_size = self._key_size value_size = self._head_size qkv_size = 2 * key_size + value_size total_size = qkv_size * self._num_heads # Denote as F. qkv = basic.BatchApply(basic.Linear(total_size))(memory) qkv = basic.BatchApply(layer_norm.LayerNorm())(qkv) mem_slots = memory.get_shape().as_list()[1] # Denoted as N. # [B, N, F] -> [B, N, H, F/H] qkv_reshape = basic.BatchReshape([mem_slots, self._num_heads, qkv_size])(qkv) # [B, N, H, F/H] -> [B, H, N, F/H] qkv_transpose = tf.transpose(qkv_reshape, [0, 2, 1, 3]) q, k, v = tf.split(qkv_transpose, [key_size, key_size, value_size], -1) q *= key_size ** -0.5 dot_product = tf.matmul(q, k, transpose_b=True) # [B, H, N, N] weights = tf.nn.softmax(dot_product) output = tf.matmul(weights, v) # [B, H, N, V] # [B, H, N, V] -> [B, N, H, V] output_transpose = tf.transpose(output, [0, 2, 1, 3]) # [B, N, H, V] -> [B, N, H * V] new_memory = basic.BatchFlatten(preserve_dims=2)(output_transpose) return new_memory
[ "def", "_multihead_attention", "(", "self", ",", "memory", ")", ":", "key_size", "=", "self", ".", "_key_size", "value_size", "=", "self", ".", "_head_size", "qkv_size", "=", "2", "*", "key_size", "+", "value_size", "total_size", "=", "qkv_size", "*", "self", ".", "_num_heads", "# Denote as F.", "qkv", "=", "basic", ".", "BatchApply", "(", "basic", ".", "Linear", "(", "total_size", ")", ")", "(", "memory", ")", "qkv", "=", "basic", ".", "BatchApply", "(", "layer_norm", ".", "LayerNorm", "(", ")", ")", "(", "qkv", ")", "mem_slots", "=", "memory", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "1", "]", "# Denoted as N.", "# [B, N, F] -> [B, N, H, F/H]", "qkv_reshape", "=", "basic", ".", "BatchReshape", "(", "[", "mem_slots", ",", "self", ".", "_num_heads", ",", "qkv_size", "]", ")", "(", "qkv", ")", "# [B, N, H, F/H] -> [B, H, N, F/H]", "qkv_transpose", "=", "tf", ".", "transpose", "(", "qkv_reshape", ",", "[", "0", ",", "2", ",", "1", ",", "3", "]", ")", "q", ",", "k", ",", "v", "=", "tf", ".", "split", "(", "qkv_transpose", ",", "[", "key_size", ",", "key_size", ",", "value_size", "]", ",", "-", "1", ")", "q", "*=", "key_size", "**", "-", "0.5", "dot_product", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "# [B, H, N, N]", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "dot_product", ")", "output", "=", "tf", ".", "matmul", "(", "weights", ",", "v", ")", "# [B, H, N, V]", "# [B, H, N, V] -> [B, N, H, V]", "output_transpose", "=", "tf", ".", "transpose", "(", "output", ",", "[", "0", ",", "2", ",", "1", ",", "3", "]", ")", "# [B, N, H, V] -> [B, N, H * V]", "new_memory", "=", "basic", ".", "BatchFlatten", "(", "preserve_dims", "=", "2", ")", "(", "output_transpose", ")", "return", "new_memory" ]
Perform multi-head attention from 'Attention is All You Need'. Implementation of the attention mechanism from https://arxiv.org/abs/1706.03762. Args: memory: Memory tensor to perform attention on. Returns: new_memory: New memory tensor.
[ "Perform", "multi", "-", "head", "attention", "from", "Attention", "is", "All", "You", "Need", "." ]
python
train
sorgerlab/indra
indra/belief/__init__.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L434-L462
def sample_statements(stmts, seed=None): """Return statements sampled according to belief. Statements are sampled independently according to their belief scores. For instance, a Staement with a belief score of 0.7 will end up in the returned Statement list with probability 0.7. Parameters ---------- stmts : list[indra.statements.Statement] A list of INDRA Statements to sample. seed : Optional[int] A seed for the random number generator used for sampling. Returns ------- new_stmts : list[indra.statements.Statement] A list of INDRA Statements that were chosen by random sampling according to their respective belief scores. """ if seed: numpy.random.seed(seed) new_stmts = [] r = numpy.random.rand(len(stmts)) for i, stmt in enumerate(stmts): if r[i] < stmt.belief: new_stmts.append(stmt) return new_stmts
[ "def", "sample_statements", "(", "stmts", ",", "seed", "=", "None", ")", ":", "if", "seed", ":", "numpy", ".", "random", ".", "seed", "(", "seed", ")", "new_stmts", "=", "[", "]", "r", "=", "numpy", ".", "random", ".", "rand", "(", "len", "(", "stmts", ")", ")", "for", "i", ",", "stmt", "in", "enumerate", "(", "stmts", ")", ":", "if", "r", "[", "i", "]", "<", "stmt", ".", "belief", ":", "new_stmts", ".", "append", "(", "stmt", ")", "return", "new_stmts" ]
Return statements sampled according to belief. Statements are sampled independently according to their belief scores. For instance, a Staement with a belief score of 0.7 will end up in the returned Statement list with probability 0.7. Parameters ---------- stmts : list[indra.statements.Statement] A list of INDRA Statements to sample. seed : Optional[int] A seed for the random number generator used for sampling. Returns ------- new_stmts : list[indra.statements.Statement] A list of INDRA Statements that were chosen by random sampling according to their respective belief scores.
[ "Return", "statements", "sampled", "according", "to", "belief", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_adapter.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_adapter.py#L321-L334
def port_uris_prop(self): """ :term:`string`: Name of adapter property that specifies the adapter port URIs, or the empty string ('') for adapters without ports. For example, 'network-port-uris' for a network adapter. """ if self._port_uris_prop is None: family = self.get_property('adapter-family') try: self._port_uris_prop = self.port_uris_prop_by_family[family] except KeyError: self._port_uris_prop = '' return self._port_uris_prop
[ "def", "port_uris_prop", "(", "self", ")", ":", "if", "self", ".", "_port_uris_prop", "is", "None", ":", "family", "=", "self", ".", "get_property", "(", "'adapter-family'", ")", "try", ":", "self", ".", "_port_uris_prop", "=", "self", ".", "port_uris_prop_by_family", "[", "family", "]", "except", "KeyError", ":", "self", ".", "_port_uris_prop", "=", "''", "return", "self", ".", "_port_uris_prop" ]
:term:`string`: Name of adapter property that specifies the adapter port URIs, or the empty string ('') for adapters without ports. For example, 'network-port-uris' for a network adapter.
[ ":", "term", ":", "string", ":", "Name", "of", "adapter", "property", "that", "specifies", "the", "adapter", "port", "URIs", "or", "the", "empty", "string", "(", ")", "for", "adapters", "without", "ports", "." ]
python
train
erdewit/ib_insync
ib_insync/util.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/util.py#L171-L197
def formatSI(n) -> str: """ Format the integer or float n to 3 significant digits + SI prefix. """ s = '' if n < 0: n = -n s += '-' if type(n) is int and n < 1000: s = str(n) + ' ' elif n < 1e-22: s = '0.00 ' else: assert n < 9.99e26 log = int(math.floor(math.log10(n))) i, j = divmod(log, 3) for _try in range(2): templ = '%.{}f'.format(2 - j) val = templ % (n * 10 ** (-3 * i)) if val != '1000': break i += 1 j = 0 s += val + ' ' if i != 0: s += 'yzafpnum kMGTPEZY'[i + 8] return s
[ "def", "formatSI", "(", "n", ")", "->", "str", ":", "s", "=", "''", "if", "n", "<", "0", ":", "n", "=", "-", "n", "s", "+=", "'-'", "if", "type", "(", "n", ")", "is", "int", "and", "n", "<", "1000", ":", "s", "=", "str", "(", "n", ")", "+", "' '", "elif", "n", "<", "1e-22", ":", "s", "=", "'0.00 '", "else", ":", "assert", "n", "<", "9.99e26", "log", "=", "int", "(", "math", ".", "floor", "(", "math", ".", "log10", "(", "n", ")", ")", ")", "i", ",", "j", "=", "divmod", "(", "log", ",", "3", ")", "for", "_try", "in", "range", "(", "2", ")", ":", "templ", "=", "'%.{}f'", ".", "format", "(", "2", "-", "j", ")", "val", "=", "templ", "%", "(", "n", "*", "10", "**", "(", "-", "3", "*", "i", ")", ")", "if", "val", "!=", "'1000'", ":", "break", "i", "+=", "1", "j", "=", "0", "s", "+=", "val", "+", "' '", "if", "i", "!=", "0", ":", "s", "+=", "'yzafpnum kMGTPEZY'", "[", "i", "+", "8", "]", "return", "s" ]
Format the integer or float n to 3 significant digits + SI prefix.
[ "Format", "the", "integer", "or", "float", "n", "to", "3", "significant", "digits", "+", "SI", "prefix", "." ]
python
train