repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
cloudera/cm_api
python/src/cm_api/endpoints/role_config_groups.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/role_config_groups.py#L82-L94
def get_all_role_config_groups(resource_root, service_name, cluster_name="default"): """ Get all role config groups in the specified service. @param resource_root: The root Resource object. @param service_name: Service name. @param cluster_name: Cluster name. @return: A list of ApiRoleConfigGroup objects. @since: API v3 """ return call(resource_root.get, _get_role_config_groups_path(cluster_name, service_name), ApiRoleConfigGroup, True, api_version=3)
[ "def", "get_all_role_config_groups", "(", "resource_root", ",", "service_name", ",", "cluster_name", "=", "\"default\"", ")", ":", "return", "call", "(", "resource_root", ".", "get", ",", "_get_role_config_groups_path", "(", "cluster_name", ",", "service_name", ")", ",", "ApiRoleConfigGroup", ",", "True", ",", "api_version", "=", "3", ")" ]
Get all role config groups in the specified service. @param resource_root: The root Resource object. @param service_name: Service name. @param cluster_name: Cluster name. @return: A list of ApiRoleConfigGroup objects. @since: API v3
[ "Get", "all", "role", "config", "groups", "in", "the", "specified", "service", "." ]
python
train
coremke/django-quill
quill/widgets.py
https://github.com/coremke/django-quill/blob/6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f/quill/widgets.py#L35-L48
def render(self, name, value, attrs={}): """Render the Quill WYSIWYG.""" if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) quill_app = apps.get_app_config('quill') quill_config = getattr(quill_app, self.config) return mark_safe(render_to_string(quill_config['template'], { 'final_attrs': flatatt(final_attrs), 'value': value, 'id': final_attrs['id'], 'config': self.config, }))
[ "def", "render", "(", "self", ",", "name", ",", "value", ",", "attrs", "=", "{", "}", ")", ":", "if", "value", "is", "None", ":", "value", "=", "''", "final_attrs", "=", "self", ".", "build_attrs", "(", "attrs", ",", "name", "=", "name", ")", "quill_app", "=", "apps", ".", "get_app_config", "(", "'quill'", ")", "quill_config", "=", "getattr", "(", "quill_app", ",", "self", ".", "config", ")", "return", "mark_safe", "(", "render_to_string", "(", "quill_config", "[", "'template'", "]", ",", "{", "'final_attrs'", ":", "flatatt", "(", "final_attrs", ")", ",", "'value'", ":", "value", ",", "'id'", ":", "final_attrs", "[", "'id'", "]", ",", "'config'", ":", "self", ".", "config", ",", "}", ")", ")" ]
Render the Quill WYSIWYG.
[ "Render", "the", "Quill", "WYSIWYG", "." ]
python
valid
alvinwan/TexSoup
TexSoup/utils.py
https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/utils.py#L10-L20
def to_buffer(f): """ Decorator converting all strings and iterators/iterables into Buffers. """ @functools.wraps(f) def wrap(*args, **kwargs): iterator = kwargs.get('iterator', args[0]) if not isinstance(iterator, Buffer): iterator = Buffer(iterator) return f(iterator, *args[1:], **kwargs) return wrap
[ "def", "to_buffer", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "iterator", "=", "kwargs", ".", "get", "(", "'iterator'", ",", "args", "[", "0", "]", ")", "if", "not", "isinstance", "(", "iterator", ",", "Buffer", ")", ":", "iterator", "=", "Buffer", "(", "iterator", ")", "return", "f", "(", "iterator", ",", "*", "args", "[", "1", ":", "]", ",", "*", "*", "kwargs", ")", "return", "wrap" ]
Decorator converting all strings and iterators/iterables into Buffers.
[ "Decorator", "converting", "all", "strings", "and", "iterators", "/", "iterables", "into", "Buffers", "." ]
python
train
raymondEhlers/pachyderm
pachyderm/utils.py
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/utils.py#L97-L115
def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D: """ Get a Histogram1D associated with the selected jet and track pt bins. This is often used to retrieve data for fitting. Args: observables (dict): The observables from which the hist should be retrieved. track_pt_bin (int): Track pt bin of the desired hist. jet_ptbin (int): Jet pt bin of the desired hist. Returns: Histogram1D: Converted TH1 or uproot histogram. Raises: ValueError: If the requested observable couldn't be found. """ for name, observable in observables.items(): if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin: return histogram.Histogram1D.from_existing_hist(observable.hist) raise ValueError("Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}")
[ "def", "get_array_for_fit", "(", "observables", ":", "dict", ",", "track_pt_bin", ":", "int", ",", "jet_pt_bin", ":", "int", ")", "->", "histogram", ".", "Histogram1D", ":", "for", "name", ",", "observable", "in", "observables", ".", "items", "(", ")", ":", "if", "observable", ".", "track_pt_bin", "==", "track_pt_bin", "and", "observable", ".", "jet_pt_bin", "==", "jet_pt_bin", ":", "return", "histogram", ".", "Histogram1D", ".", "from_existing_hist", "(", "observable", ".", "hist", ")", "raise", "ValueError", "(", "\"Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}\"", ")" ]
Get a Histogram1D associated with the selected jet and track pt bins. This is often used to retrieve data for fitting. Args: observables (dict): The observables from which the hist should be retrieved. track_pt_bin (int): Track pt bin of the desired hist. jet_ptbin (int): Jet pt bin of the desired hist. Returns: Histogram1D: Converted TH1 or uproot histogram. Raises: ValueError: If the requested observable couldn't be found.
[ "Get", "a", "Histogram1D", "associated", "with", "the", "selected", "jet", "and", "track", "pt", "bins", "." ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L599-L625
def f1_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification F1 score. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.f1_score) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.f1_score) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "f1_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "f1_score", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "f1_score", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification F1 score.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "F1", "score", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L639-L672
def create_output(decoder_output, rows, cols, targets, hparams): """Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5. """ del targets # unused arg decoded_image = postprocess_image(decoder_output, rows, cols, hparams) batch = common_layers.shape_list(decoded_image)[0] depth = common_layers.shape_list(decoded_image)[-1] likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if hparams.mode == tf.estimator.ModeKeys.PREDICT: y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth]) output = y[:, :rows, :, :, :] elif likelihood == DistributionType.CAT: # Unpack the cols dimension of the Categorical. channels = hparams.num_channels output = tf.reshape(decoded_image, [batch, rows, cols // channels, channels, depth]) else: output = decoded_image return output
[ "def", "create_output", "(", "decoder_output", ",", "rows", ",", "cols", ",", "targets", ",", "hparams", ")", ":", "del", "targets", "# unused arg", "decoded_image", "=", "postprocess_image", "(", "decoder_output", ",", "rows", ",", "cols", ",", "hparams", ")", "batch", "=", "common_layers", ".", "shape_list", "(", "decoded_image", ")", "[", "0", "]", "depth", "=", "common_layers", ".", "shape_list", "(", "decoded_image", ")", "[", "-", "1", "]", "likelihood", "=", "getattr", "(", "hparams", ",", "\"likelihood\"", ",", "DistributionType", ".", "CAT", ")", "if", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ":", "y", "=", "tf", ".", "reshape", "(", "decoded_image", ",", "[", "batch", ",", "-", "1", ",", "1", ",", "1", ",", "depth", "]", ")", "output", "=", "y", "[", ":", ",", ":", "rows", ",", ":", ",", ":", ",", ":", "]", "elif", "likelihood", "==", "DistributionType", ".", "CAT", ":", "# Unpack the cols dimension of the Categorical.", "channels", "=", "hparams", ".", "num_channels", "output", "=", "tf", ".", "reshape", "(", "decoded_image", ",", "[", "batch", ",", "rows", ",", "cols", "//", "channels", ",", "channels", ",", "depth", "]", ")", "else", ":", "output", "=", "decoded_image", "return", "output" ]
Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5.
[ "Creates", "output", "from", "decoder", "output", "and", "vars", "." ]
python
train
Azure/azure-uamqp-python
uamqp/__init__.py
https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/__init__.py#L43-L67
def send_message(target, data, auth=None, debug=False): """Send a single message to AMQP endpoint. :param target: The target AMQP endpoint. :type target: str, bytes or ~uamqp.address.Target :param data: The contents of the message to send. :type data: str, bytes or ~uamqp.message.Message :param auth: The authentication credentials for the endpoint. This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently this includes: - uamqp.authentication.SASLAnonymous - uamqp.authentication.SASLPlain - uamqp.authentication.SASTokenAuth If no authentication is supplied, SASLAnnoymous will be used by default. :type auth: ~uamqp.authentication.common.AMQPAuth :param debug: Whether to turn on network trace logs. If `True`, trace logs will be logged at INFO level. Default is `False`. :type debug: bool :return: A list of states for each message sent. :rtype: list[~uamqp.constants.MessageState] """ message = data if isinstance(data, Message) else Message(body=data) with SendClient(target, auth=auth, debug=debug) as send_client: send_client.queue_message(message) return send_client.send_all_messages()
[ "def", "send_message", "(", "target", ",", "data", ",", "auth", "=", "None", ",", "debug", "=", "False", ")", ":", "message", "=", "data", "if", "isinstance", "(", "data", ",", "Message", ")", "else", "Message", "(", "body", "=", "data", ")", "with", "SendClient", "(", "target", ",", "auth", "=", "auth", ",", "debug", "=", "debug", ")", "as", "send_client", ":", "send_client", ".", "queue_message", "(", "message", ")", "return", "send_client", ".", "send_all_messages", "(", ")" ]
Send a single message to AMQP endpoint. :param target: The target AMQP endpoint. :type target: str, bytes or ~uamqp.address.Target :param data: The contents of the message to send. :type data: str, bytes or ~uamqp.message.Message :param auth: The authentication credentials for the endpoint. This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently this includes: - uamqp.authentication.SASLAnonymous - uamqp.authentication.SASLPlain - uamqp.authentication.SASTokenAuth If no authentication is supplied, SASLAnnoymous will be used by default. :type auth: ~uamqp.authentication.common.AMQPAuth :param debug: Whether to turn on network trace logs. If `True`, trace logs will be logged at INFO level. Default is `False`. :type debug: bool :return: A list of states for each message sent. :rtype: list[~uamqp.constants.MessageState]
[ "Send", "a", "single", "message", "to", "AMQP", "endpoint", "." ]
python
train
SuperCowPowers/bat
bat/utils/vt_query.py
https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/utils/vt_query.py#L43-L56
def query_file(self, file_sha, verbose=False): """Query the VirusTotal Service Args: file_sha (str): The file sha1 or sha256 hash url (str): The domain/url to be queried (default=None) """ # Sanity check sha hash input if len(file_sha) not in [64, 40]: # sha256 and sha1 lengths print('File sha looks malformed: {:s}'.format(file_sha)) return {'file_sha': file_sha, 'malformed': True} # Call and return the internal query method return self._query('file', file_sha, verbose)
[ "def", "query_file", "(", "self", ",", "file_sha", ",", "verbose", "=", "False", ")", ":", "# Sanity check sha hash input", "if", "len", "(", "file_sha", ")", "not", "in", "[", "64", ",", "40", "]", ":", "# sha256 and sha1 lengths", "print", "(", "'File sha looks malformed: {:s}'", ".", "format", "(", "file_sha", ")", ")", "return", "{", "'file_sha'", ":", "file_sha", ",", "'malformed'", ":", "True", "}", "# Call and return the internal query method", "return", "self", ".", "_query", "(", "'file'", ",", "file_sha", ",", "verbose", ")" ]
Query the VirusTotal Service Args: file_sha (str): The file sha1 or sha256 hash url (str): The domain/url to be queried (default=None)
[ "Query", "the", "VirusTotal", "Service", "Args", ":", "file_sha", "(", "str", ")", ":", "The", "file", "sha1", "or", "sha256", "hash", "url", "(", "str", ")", ":", "The", "domain", "/", "url", "to", "be", "queried", "(", "default", "=", "None", ")" ]
python
train
jslang/responsys
responsys/client.py
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L352-L367
def delete_table_records(self, table, query_column, ids_to_delete): """ Responsys.deleteTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances """ table = table.get_soap_object(self.client) result = self.call('deleteTableRecords', table, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
[ "def", "delete_table_records", "(", "self", ",", "table", ",", "query_column", ",", "ids_to_delete", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "result", "=", "self", ".", "call", "(", "'deleteTableRecords'", ",", "table", ",", "query_column", ",", "ids_to_delete", ")", "if", "hasattr", "(", "result", ",", "'__iter__'", ")", ":", "return", "[", "DeleteResult", "(", "delete_result", ")", "for", "delete_result", "in", "result", "]", "return", "[", "DeleteResult", "(", "result", ")", "]" ]
Responsys.deleteTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances
[ "Responsys", ".", "deleteTableRecords", "call" ]
python
train
senaite/senaite.core
bika/lims/content/analysisrequest.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisrequest.py#L1584-L1593
def getLate(self): """Return True if there is at least one late analysis in this Request """ for analysis in self.getAnalyses(): if analysis.review_state == "retracted": continue analysis_obj = api.get_object(analysis) if analysis_obj.isLateAnalysis(): return True return False
[ "def", "getLate", "(", "self", ")", ":", "for", "analysis", "in", "self", ".", "getAnalyses", "(", ")", ":", "if", "analysis", ".", "review_state", "==", "\"retracted\"", ":", "continue", "analysis_obj", "=", "api", ".", "get_object", "(", "analysis", ")", "if", "analysis_obj", ".", "isLateAnalysis", "(", ")", ":", "return", "True", "return", "False" ]
Return True if there is at least one late analysis in this Request
[ "Return", "True", "if", "there", "is", "at", "least", "one", "late", "analysis", "in", "this", "Request" ]
python
train
juju/charm-helpers
charmhelpers/contrib/network/ufw.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/network/ufw.py#L72-L106
def is_ipv6_ok(soft_fail=False): """ Check if IPv6 support is present and ip6tables functional :param soft_fail: If set to True and IPv6 support is broken, then reports that the host doesn't have IPv6 support, otherwise a UFWIPv6Error exception is raised. :returns: True if IPv6 is working, False otherwise """ # do we have IPv6 in the machine? if os.path.isdir('/proc/sys/net/ipv6'): # is ip6tables kernel module loaded? if not is_module_loaded('ip6_tables'): # ip6tables support isn't complete, let's try to load it try: modprobe('ip6_tables') # great, we can load the module return True except subprocess.CalledProcessError as ex: hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, level="WARN") # we are in a world where ip6tables isn't working if soft_fail: # so we inform that the machine doesn't have IPv6 return False else: raise UFWIPv6Error("IPv6 firewall support broken") else: # the module is present :) return True else: # the system doesn't have IPv6 return False
[ "def", "is_ipv6_ok", "(", "soft_fail", "=", "False", ")", ":", "# do we have IPv6 in the machine?", "if", "os", ".", "path", ".", "isdir", "(", "'/proc/sys/net/ipv6'", ")", ":", "# is ip6tables kernel module loaded?", "if", "not", "is_module_loaded", "(", "'ip6_tables'", ")", ":", "# ip6tables support isn't complete, let's try to load it", "try", ":", "modprobe", "(", "'ip6_tables'", ")", "# great, we can load the module", "return", "True", "except", "subprocess", ".", "CalledProcessError", "as", "ex", ":", "hookenv", ".", "log", "(", "\"Couldn't load ip6_tables module: %s\"", "%", "ex", ".", "output", ",", "level", "=", "\"WARN\"", ")", "# we are in a world where ip6tables isn't working", "if", "soft_fail", ":", "# so we inform that the machine doesn't have IPv6", "return", "False", "else", ":", "raise", "UFWIPv6Error", "(", "\"IPv6 firewall support broken\"", ")", "else", ":", "# the module is present :)", "return", "True", "else", ":", "# the system doesn't have IPv6", "return", "False" ]
Check if IPv6 support is present and ip6tables functional :param soft_fail: If set to True and IPv6 support is broken, then reports that the host doesn't have IPv6 support, otherwise a UFWIPv6Error exception is raised. :returns: True if IPv6 is working, False otherwise
[ "Check", "if", "IPv6", "support", "is", "present", "and", "ip6tables", "functional" ]
python
train
totalgood/pugnlp
src/pugnlp/plots.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L284-L289
def show(self, block=False): """ Display the last image drawn """ try: plt.show(block=block) except ValueError: plt.show()
[ "def", "show", "(", "self", ",", "block", "=", "False", ")", ":", "try", ":", "plt", ".", "show", "(", "block", "=", "block", ")", "except", "ValueError", ":", "plt", ".", "show", "(", ")" ]
Display the last image drawn
[ "Display", "the", "last", "image", "drawn" ]
python
train
asascience-open/paegan-transport
paegan/transport/models/behaviors/lifestage.py
https://github.com/asascience-open/paegan-transport/blob/99a7f4ea24f0f42d9b34d1fb0e87ab2c49315bd3/paegan/transport/models/behaviors/lifestage.py#L141-L168
def move(self, particle, u, v, w, modelTimestep, **kwargs): """ I'm dead, so no behaviors should act on me """ # Kill the particle if it isn't settled and isn't already dead. if not particle.settled and not particle.dead: particle.die() # Still save the temperature and salinity for the model output temp = kwargs.get('temperature', None) if temp is not None and math.isnan(temp): temp = None particle.temp = temp salt = kwargs.get('salinity', None) if salt is not None and math.isnan(salt): salt = None particle.salt = salt u = 0 v = 0 w = 0 # Do the calculation to determine the new location result = AsaTransport.distance_from_location_using_u_v_w(u=u, v=v, w=w, timestep=modelTimestep, location=particle.location) result['u'] = u result['v'] = v result['w'] = w return result
[ "def", "move", "(", "self", ",", "particle", ",", "u", ",", "v", ",", "w", ",", "modelTimestep", ",", "*", "*", "kwargs", ")", ":", "# Kill the particle if it isn't settled and isn't already dead.", "if", "not", "particle", ".", "settled", "and", "not", "particle", ".", "dead", ":", "particle", ".", "die", "(", ")", "# Still save the temperature and salinity for the model output", "temp", "=", "kwargs", ".", "get", "(", "'temperature'", ",", "None", ")", "if", "temp", "is", "not", "None", "and", "math", ".", "isnan", "(", "temp", ")", ":", "temp", "=", "None", "particle", ".", "temp", "=", "temp", "salt", "=", "kwargs", ".", "get", "(", "'salinity'", ",", "None", ")", "if", "salt", "is", "not", "None", "and", "math", ".", "isnan", "(", "salt", ")", ":", "salt", "=", "None", "particle", ".", "salt", "=", "salt", "u", "=", "0", "v", "=", "0", "w", "=", "0", "# Do the calculation to determine the new location", "result", "=", "AsaTransport", ".", "distance_from_location_using_u_v_w", "(", "u", "=", "u", ",", "v", "=", "v", ",", "w", "=", "w", ",", "timestep", "=", "modelTimestep", ",", "location", "=", "particle", ".", "location", ")", "result", "[", "'u'", "]", "=", "u", "result", "[", "'v'", "]", "=", "v", "result", "[", "'w'", "]", "=", "w", "return", "result" ]
I'm dead, so no behaviors should act on me
[ "I", "m", "dead", "so", "no", "behaviors", "should", "act", "on", "me" ]
python
train
dtmilano/AndroidViewClient
src/com/dtmilano/android/adb/adbclient.py
https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/adb/adbclient.py#L1037-L1064
def percentSame(image1, image2): ''' Returns the percent of pixels that are equal @author: catshoes ''' # If the images differ in size, return 0% same. size_x1, size_y1 = image1.size size_x2, size_y2 = image2.size if (size_x1 != size_x2 or size_y1 != size_y2): return 0 # Images are the same size # Return the percent of pixels that are equal. numPixelsSame = 0 numPixelsTotal = size_x1 * size_y1 image1Pixels = image1.load() image2Pixels = image2.load() # Loop over all pixels, comparing pixel in image1 to image2 for x in range(size_x1): for y in range(size_y1): if image1Pixels[x, y] == image2Pixels[x, y]: numPixelsSame += 1 return numPixelsSame / float(numPixelsTotal)
[ "def", "percentSame", "(", "image1", ",", "image2", ")", ":", "# If the images differ in size, return 0% same.", "size_x1", ",", "size_y1", "=", "image1", ".", "size", "size_x2", ",", "size_y2", "=", "image2", ".", "size", "if", "(", "size_x1", "!=", "size_x2", "or", "size_y1", "!=", "size_y2", ")", ":", "return", "0", "# Images are the same size", "# Return the percent of pixels that are equal.", "numPixelsSame", "=", "0", "numPixelsTotal", "=", "size_x1", "*", "size_y1", "image1Pixels", "=", "image1", ".", "load", "(", ")", "image2Pixels", "=", "image2", ".", "load", "(", ")", "# Loop over all pixels, comparing pixel in image1 to image2", "for", "x", "in", "range", "(", "size_x1", ")", ":", "for", "y", "in", "range", "(", "size_y1", ")", ":", "if", "image1Pixels", "[", "x", ",", "y", "]", "==", "image2Pixels", "[", "x", ",", "y", "]", ":", "numPixelsSame", "+=", "1", "return", "numPixelsSame", "/", "float", "(", "numPixelsTotal", ")" ]
Returns the percent of pixels that are equal @author: catshoes
[ "Returns", "the", "percent", "of", "pixels", "that", "are", "equal" ]
python
train
GluuFederation/oxd-python
oxdpython/client.py
https://github.com/GluuFederation/oxd-python/blob/a0448cda03b4384bc50a8c20bd65eacd983bceb8/oxdpython/client.py#L513-L536
def uma_rp_get_claims_gathering_url(self, ticket): """UMA RP function to get the claims gathering URL. Parameters: * **ticket (str):** ticket to pass to the auth server. for 90% of the cases, this will be obtained from 'need_info' error of get_rpt Returns: **string** specifying the claims gathering url """ params = { 'oxd_id': self.oxd_id, 'claims_redirect_uri': self.config.get('client', 'claims_redirect_uri'), 'ticket': ticket } logger.debug("Sending command `uma_rp_get_claims_gathering_url` with " "params %s", params) response = self.msgr.request("uma_rp_get_claims_gathering_url", **params) logger.debug("Received response: %s", response) if response['status'] == 'error': raise OxdServerError(response['data']) return response['data']['url']
[ "def", "uma_rp_get_claims_gathering_url", "(", "self", ",", "ticket", ")", ":", "params", "=", "{", "'oxd_id'", ":", "self", ".", "oxd_id", ",", "'claims_redirect_uri'", ":", "self", ".", "config", ".", "get", "(", "'client'", ",", "'claims_redirect_uri'", ")", ",", "'ticket'", ":", "ticket", "}", "logger", ".", "debug", "(", "\"Sending command `uma_rp_get_claims_gathering_url` with \"", "\"params %s\"", ",", "params", ")", "response", "=", "self", ".", "msgr", ".", "request", "(", "\"uma_rp_get_claims_gathering_url\"", ",", "*", "*", "params", ")", "logger", ".", "debug", "(", "\"Received response: %s\"", ",", "response", ")", "if", "response", "[", "'status'", "]", "==", "'error'", ":", "raise", "OxdServerError", "(", "response", "[", "'data'", "]", ")", "return", "response", "[", "'data'", "]", "[", "'url'", "]" ]
UMA RP function to get the claims gathering URL. Parameters: * **ticket (str):** ticket to pass to the auth server. for 90% of the cases, this will be obtained from 'need_info' error of get_rpt Returns: **string** specifying the claims gathering url
[ "UMA", "RP", "function", "to", "get", "the", "claims", "gathering", "URL", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_metrics.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_metrics.py#L200-L232
def create(self, properties): """ Create a :term:`Metrics Context` resource in the HMC this client is connected to. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create Metrics Context' in the :term:`HMC API` book. Returns: :class:`~zhmcclient.MetricsContext`: The resource object for the new :term:`Metrics Context` resource. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ result = self.session.post('/api/services/metrics/context', body=properties) mc_properties = properties.copy() mc_properties.update(result) new_metrics_context = MetricsContext(self, result['metrics-context-uri'], None, mc_properties) self._metrics_contexts.append(new_metrics_context) return new_metrics_context
[ "def", "create", "(", "self", ",", "properties", ")", ":", "result", "=", "self", ".", "session", ".", "post", "(", "'/api/services/metrics/context'", ",", "body", "=", "properties", ")", "mc_properties", "=", "properties", ".", "copy", "(", ")", "mc_properties", ".", "update", "(", "result", ")", "new_metrics_context", "=", "MetricsContext", "(", "self", ",", "result", "[", "'metrics-context-uri'", "]", ",", "None", ",", "mc_properties", ")", "self", ".", "_metrics_contexts", ".", "append", "(", "new_metrics_context", ")", "return", "new_metrics_context" ]
Create a :term:`Metrics Context` resource in the HMC this client is connected to. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create Metrics Context' in the :term:`HMC API` book. Returns: :class:`~zhmcclient.MetricsContext`: The resource object for the new :term:`Metrics Context` resource. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
[ "Create", "a", ":", "term", ":", "Metrics", "Context", "resource", "in", "the", "HMC", "this", "client", "is", "connected", "to", "." ]
python
train
Contraz/demosys-py
demosys/opengl/vao.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/opengl/vao.py#L246-L294
def instance(self, program: moderngl.Program) -> moderngl.VertexArray: """ Obtain the ``moderngl.VertexArray`` instance for the program. The instance is only created once and cached internally. Returns: ``moderngl.VertexArray`` instance """ vao = self.vaos.get(program.glo) if vao: return vao program_attributes = [name for name, attr in program._members.items() if isinstance(attr, moderngl.Attribute)] # Make sure all attributes are covered for attrib_name in program_attributes: # Ignore built in attributes for now if attrib_name.startswith('gl_'): continue # Do we have a buffer mapping to this attribute? if not sum(buffer.has_attribute(attrib_name) for buffer in self.buffers): raise VAOError("VAO {} doesn't have attribute {} for program {}".format( self.name, attrib_name, program.name)) vao_content = [] # Pick out the attributes we can actually map for buffer in self.buffers: content = buffer.content(program_attributes) if content: vao_content.append(content) # Any attribute left is not accounted for if program_attributes: for attrib_name in program_attributes: if attrib_name.startswith('gl_'): continue raise VAOError("Did not find a buffer mapping for {}".format([n for n in program_attributes])) # Create the vao if self._index_buffer: vao = context.ctx().vertex_array(program, vao_content, self._index_buffer, self._index_element_size) else: vao = context.ctx().vertex_array(program, vao_content) self.vaos[program.glo] = vao return vao
[ "def", "instance", "(", "self", ",", "program", ":", "moderngl", ".", "Program", ")", "->", "moderngl", ".", "VertexArray", ":", "vao", "=", "self", ".", "vaos", ".", "get", "(", "program", ".", "glo", ")", "if", "vao", ":", "return", "vao", "program_attributes", "=", "[", "name", "for", "name", ",", "attr", "in", "program", ".", "_members", ".", "items", "(", ")", "if", "isinstance", "(", "attr", ",", "moderngl", ".", "Attribute", ")", "]", "# Make sure all attributes are covered", "for", "attrib_name", "in", "program_attributes", ":", "# Ignore built in attributes for now", "if", "attrib_name", ".", "startswith", "(", "'gl_'", ")", ":", "continue", "# Do we have a buffer mapping to this attribute?", "if", "not", "sum", "(", "buffer", ".", "has_attribute", "(", "attrib_name", ")", "for", "buffer", "in", "self", ".", "buffers", ")", ":", "raise", "VAOError", "(", "\"VAO {} doesn't have attribute {} for program {}\"", ".", "format", "(", "self", ".", "name", ",", "attrib_name", ",", "program", ".", "name", ")", ")", "vao_content", "=", "[", "]", "# Pick out the attributes we can actually map", "for", "buffer", "in", "self", ".", "buffers", ":", "content", "=", "buffer", ".", "content", "(", "program_attributes", ")", "if", "content", ":", "vao_content", ".", "append", "(", "content", ")", "# Any attribute left is not accounted for", "if", "program_attributes", ":", "for", "attrib_name", "in", "program_attributes", ":", "if", "attrib_name", ".", "startswith", "(", "'gl_'", ")", ":", "continue", "raise", "VAOError", "(", "\"Did not find a buffer mapping for {}\"", ".", "format", "(", "[", "n", "for", "n", "in", "program_attributes", "]", ")", ")", "# Create the vao", "if", "self", ".", "_index_buffer", ":", "vao", "=", "context", ".", "ctx", "(", ")", ".", "vertex_array", "(", "program", ",", "vao_content", ",", "self", ".", "_index_buffer", ",", "self", ".", "_index_element_size", ")", "else", ":", "vao", "=", "context", ".", "ctx", "(", ")", ".", "vertex_array", "(", "program", ",", "vao_content", ")", "self", ".", "vaos", "[", "program", ".", "glo", "]", "=", "vao", "return", "vao" ]
Obtain the ``moderngl.VertexArray`` instance for the program. The instance is only created once and cached internally. Returns: ``moderngl.VertexArray`` instance
[ "Obtain", "the", "moderngl", ".", "VertexArray", "instance", "for", "the", "program", ".", "The", "instance", "is", "only", "created", "once", "and", "cached", "internally", "." ]
python
valid
hobson/pug-dj
pug/dj/crawlnmine/management/__init__.py
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/management/__init__.py#L166-L187
def fetch_command(self, subcommand): """ Tries to fetch the given subcommand, printing a message with the appropriate command called from the command line (usually "jira.py") if it can't be found. """ # Get commands outside of try block to prevent swallowing exceptions commands = get_commands() try: app_name = commands[subcommand] except KeyError: # This might trigger ImproperlyConfigured (masked in get_commands) settings.INSTALLED_APPS sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % (subcommand, self.prog_name)) sys.exit(1) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, subcommand) return klass
[ "def", "fetch_command", "(", "self", ",", "subcommand", ")", ":", "# Get commands outside of try block to prevent swallowing exceptions", "commands", "=", "get_commands", "(", ")", "try", ":", "app_name", "=", "commands", "[", "subcommand", "]", "except", "KeyError", ":", "# This might trigger ImproperlyConfigured (masked in get_commands)", "settings", ".", "INSTALLED_APPS", "sys", ".", "stderr", ".", "write", "(", "\"Unknown command: %r\\nType '%s help' for usage.\\n\"", "%", "(", "subcommand", ",", "self", ".", "prog_name", ")", ")", "sys", ".", "exit", "(", "1", ")", "if", "isinstance", "(", "app_name", ",", "BaseCommand", ")", ":", "# If the command is already loaded, use it directly.", "klass", "=", "app_name", "else", ":", "klass", "=", "load_command_class", "(", "app_name", ",", "subcommand", ")", "return", "klass" ]
Tries to fetch the given subcommand, printing a message with the appropriate command called from the command line (usually "jira.py") if it can't be found.
[ "Tries", "to", "fetch", "the", "given", "subcommand", "printing", "a", "message", "with", "the", "appropriate", "command", "called", "from", "the", "command", "line", "(", "usually", "jira", ".", "py", ")", "if", "it", "can", "t", "be", "found", "." ]
python
train
jjgomera/iapws
iapws/iapws97.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/iapws97.py#L706-L800
def _Region1(T, P): """Basic equation for region 1 Parameters ---------- T : float Temperature, [K] P : float Pressure, [MPa] Returns ------- prop : dict Dict with calculated properties. The available properties are: * v: Specific volume, [m³/kg] * h: Specific enthalpy, [kJ/kg] * s: Specific entropy, [kJ/kgK] * cp: Specific isobaric heat capacity, [kJ/kgK] * cv: Specific isocoric heat capacity, [kJ/kgK] * w: Speed of sound, [m/s] * alfav: Cubic expansion coefficient, [1/K] * kt: Isothermal compressibility, [1/MPa] References ---------- IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam August 2007, http://www.iapws.org/relguide/IF97-Rev.html, Eq 7 Examples -------- >>> _Region1(300,3)["v"] 0.00100215168 >>> _Region1(300,3)["h"] 115.331273 >>> _Region1(300,3)["h"]-3000*_Region1(300,3)["v"] 112.324818 >>> _Region1(300,80)["s"] 0.368563852 >>> _Region1(300,80)["cp"] 4.01008987 >>> _Region1(300,80)["cv"] 3.91736606 >>> _Region1(500,3)["w"] 1240.71337 >>> _Region1(500,3)["alfav"] 0.00164118128 >>> _Region1(500,3)["kt"] 0.00112892188 """ if P < 0: P = Pmin I = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 8, 8, 21, 23, 29, 30, 31, 32] J = [-2, -1, 0, 1, 2, 3, 4, 5, -9, -7, -1, 0, 1, 3, -3, 0, 1, 3, 17, -4, 0, 6, -5, -2, 10, -8, -11, -6, -29, -31, -38, -39, -40, -41] n = [0.14632971213167, -0.84548187169114, -0.37563603672040e1, 0.33855169168385e1, -0.95791963387872, 0.15772038513228, -0.16616417199501e-1, 0.81214629983568e-3, 0.28319080123804e-3, -0.60706301565874e-3, -0.18990068218419e-1, -0.32529748770505e-1, -0.21841717175414e-1, -0.52838357969930e-4, -0.47184321073267e-3, -0.30001780793026e-3, 0.47661393906987e-4, -0.44141845330846e-5, -0.72694996297594e-15, -0.31679644845054e-4, -0.28270797985312e-5, -0.85205128120103e-9, -0.22425281908000e-5, -0.65171222895601e-6, -0.14341729937924e-12, -0.40516996860117e-6, -0.12734301741641e-8, -0.17424871230634e-9, -0.68762131295531e-18, 0.14478307828521e-19, 0.26335781662795e-22, -0.11947622640071e-22, 0.18228094581404e-23, -0.93537087292458e-25] Tr = 1386/T Pr = P/16.53 g = gp = gpp = gt = gtt = gpt = 0 for i, j, ni in zip(I, J, n): g += ni * (7.1-Pr)**i * (Tr-1.222)**j gp -= ni*i * (7.1-Pr)**(i-1) * (Tr-1.222)**j gpp += ni*i*(i-1) * (7.1-Pr)**(i-2) * (Tr-1.222)**j gt += ni*j * (7.1-Pr)**i * (Tr-1.222)**(j-1) gtt += ni*j*(j-1) * (7.1-Pr)**i * (Tr-1.222)**(j-2) gpt -= ni*i*j * (7.1-Pr)**(i-1) * (Tr-1.222)**(j-1) propiedades = {} propiedades["T"] = T propiedades["P"] = P propiedades["v"] = Pr*gp*R*T/P/1000 propiedades["h"] = Tr*gt*R*T propiedades["s"] = R*(Tr*gt-g) propiedades["cp"] = -R*Tr**2*gtt propiedades["cv"] = R*(-Tr**2*gtt+(gp-Tr*gpt)**2/gpp) propiedades["w"] = sqrt(R*T*1000*gp**2/((gp-Tr*gpt)**2/(Tr**2*gtt)-gpp)) propiedades["alfav"] = (1-Tr*gpt/gp)/T propiedades["kt"] = -Pr*gpp/gp/P propiedades["region"] = 1 propiedades["x"] = 0 return propiedades
[ "def", "_Region1", "(", "T", ",", "P", ")", ":", "if", "P", "<", "0", ":", "P", "=", "Pmin", "I", "=", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "2", ",", "2", ",", "2", ",", "2", ",", "2", ",", "3", ",", "3", ",", "3", ",", "4", ",", "4", ",", "4", ",", "5", ",", "8", ",", "8", ",", "21", ",", "23", ",", "29", ",", "30", ",", "31", ",", "32", "]", "J", "=", "[", "-", "2", ",", "-", "1", ",", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "-", "9", ",", "-", "7", ",", "-", "1", ",", "0", ",", "1", ",", "3", ",", "-", "3", ",", "0", ",", "1", ",", "3", ",", "17", ",", "-", "4", ",", "0", ",", "6", ",", "-", "5", ",", "-", "2", ",", "10", ",", "-", "8", ",", "-", "11", ",", "-", "6", ",", "-", "29", ",", "-", "31", ",", "-", "38", ",", "-", "39", ",", "-", "40", ",", "-", "41", "]", "n", "=", "[", "0.14632971213167", ",", "-", "0.84548187169114", ",", "-", "0.37563603672040e1", ",", "0.33855169168385e1", ",", "-", "0.95791963387872", ",", "0.15772038513228", ",", "-", "0.16616417199501e-1", ",", "0.81214629983568e-3", ",", "0.28319080123804e-3", ",", "-", "0.60706301565874e-3", ",", "-", "0.18990068218419e-1", ",", "-", "0.32529748770505e-1", ",", "-", "0.21841717175414e-1", ",", "-", "0.52838357969930e-4", ",", "-", "0.47184321073267e-3", ",", "-", "0.30001780793026e-3", ",", "0.47661393906987e-4", ",", "-", "0.44141845330846e-5", ",", "-", "0.72694996297594e-15", ",", "-", "0.31679644845054e-4", ",", "-", "0.28270797985312e-5", ",", "-", "0.85205128120103e-9", ",", "-", "0.22425281908000e-5", ",", "-", "0.65171222895601e-6", ",", "-", "0.14341729937924e-12", ",", "-", "0.40516996860117e-6", ",", "-", "0.12734301741641e-8", ",", "-", "0.17424871230634e-9", ",", "-", "0.68762131295531e-18", ",", "0.14478307828521e-19", ",", "0.26335781662795e-22", ",", "-", "0.11947622640071e-22", ",", "0.18228094581404e-23", ",", "-", "0.93537087292458e-25", "]", "Tr", "=", "1386", "/", "T", "Pr", "=", "P", "/", "16.53", "g", "=", "gp", "=", "gpp", "=", "gt", "=", "gtt", "=", "gpt", "=", "0", "for", "i", ",", "j", ",", "ni", "in", "zip", "(", "I", ",", "J", ",", "n", ")", ":", "g", "+=", "ni", "*", "(", "7.1", "-", "Pr", ")", "**", "i", "*", "(", "Tr", "-", "1.222", ")", "**", "j", "gp", "-=", "ni", "*", "i", "*", "(", "7.1", "-", "Pr", ")", "**", "(", "i", "-", "1", ")", "*", "(", "Tr", "-", "1.222", ")", "**", "j", "gpp", "+=", "ni", "*", "i", "*", "(", "i", "-", "1", ")", "*", "(", "7.1", "-", "Pr", ")", "**", "(", "i", "-", "2", ")", "*", "(", "Tr", "-", "1.222", ")", "**", "j", "gt", "+=", "ni", "*", "j", "*", "(", "7.1", "-", "Pr", ")", "**", "i", "*", "(", "Tr", "-", "1.222", ")", "**", "(", "j", "-", "1", ")", "gtt", "+=", "ni", "*", "j", "*", "(", "j", "-", "1", ")", "*", "(", "7.1", "-", "Pr", ")", "**", "i", "*", "(", "Tr", "-", "1.222", ")", "**", "(", "j", "-", "2", ")", "gpt", "-=", "ni", "*", "i", "*", "j", "*", "(", "7.1", "-", "Pr", ")", "**", "(", "i", "-", "1", ")", "*", "(", "Tr", "-", "1.222", ")", "**", "(", "j", "-", "1", ")", "propiedades", "=", "{", "}", "propiedades", "[", "\"T\"", "]", "=", "T", "propiedades", "[", "\"P\"", "]", "=", "P", "propiedades", "[", "\"v\"", "]", "=", "Pr", "*", "gp", "*", "R", "*", "T", "/", "P", "/", "1000", "propiedades", "[", "\"h\"", "]", "=", "Tr", "*", "gt", "*", "R", "*", "T", "propiedades", "[", "\"s\"", "]", "=", "R", "*", "(", "Tr", "*", "gt", "-", "g", ")", "propiedades", "[", "\"cp\"", "]", "=", "-", "R", "*", "Tr", "**", "2", "*", "gtt", "propiedades", "[", "\"cv\"", "]", "=", "R", "*", "(", "-", "Tr", "**", "2", "*", "gtt", "+", "(", "gp", "-", "Tr", "*", "gpt", ")", "**", "2", "/", "gpp", ")", "propiedades", "[", "\"w\"", "]", "=", "sqrt", "(", "R", "*", "T", "*", "1000", "*", "gp", "**", "2", "/", "(", "(", "gp", "-", "Tr", "*", "gpt", ")", "**", "2", "/", "(", "Tr", "**", "2", "*", "gtt", ")", "-", "gpp", ")", ")", "propiedades", "[", "\"alfav\"", "]", "=", "(", "1", "-", "Tr", "*", "gpt", "/", "gp", ")", "/", "T", "propiedades", "[", "\"kt\"", "]", "=", "-", "Pr", "*", "gpp", "/", "gp", "/", "P", "propiedades", "[", "\"region\"", "]", "=", "1", "propiedades", "[", "\"x\"", "]", "=", "0", "return", "propiedades" ]
Basic equation for region 1 Parameters ---------- T : float Temperature, [K] P : float Pressure, [MPa] Returns ------- prop : dict Dict with calculated properties. The available properties are: * v: Specific volume, [m³/kg] * h: Specific enthalpy, [kJ/kg] * s: Specific entropy, [kJ/kgK] * cp: Specific isobaric heat capacity, [kJ/kgK] * cv: Specific isocoric heat capacity, [kJ/kgK] * w: Speed of sound, [m/s] * alfav: Cubic expansion coefficient, [1/K] * kt: Isothermal compressibility, [1/MPa] References ---------- IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam August 2007, http://www.iapws.org/relguide/IF97-Rev.html, Eq 7 Examples -------- >>> _Region1(300,3)["v"] 0.00100215168 >>> _Region1(300,3)["h"] 115.331273 >>> _Region1(300,3)["h"]-3000*_Region1(300,3)["v"] 112.324818 >>> _Region1(300,80)["s"] 0.368563852 >>> _Region1(300,80)["cp"] 4.01008987 >>> _Region1(300,80)["cv"] 3.91736606 >>> _Region1(500,3)["w"] 1240.71337 >>> _Region1(500,3)["alfav"] 0.00164118128 >>> _Region1(500,3)["kt"] 0.00112892188
[ "Basic", "equation", "for", "region", "1" ]
python
train
Crunch-io/crunch-cube
src/cr/cube/crunch_cube.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L1344-L1350
def _update_result(self, result, insertions, dimension_index): """Insert subtotals into resulting ndarray.""" for j, (ind_insertion, value) in enumerate(insertions): result = np.insert( result, ind_insertion + j + 1, value, axis=dimension_index ) return result
[ "def", "_update_result", "(", "self", ",", "result", ",", "insertions", ",", "dimension_index", ")", ":", "for", "j", ",", "(", "ind_insertion", ",", "value", ")", "in", "enumerate", "(", "insertions", ")", ":", "result", "=", "np", ".", "insert", "(", "result", ",", "ind_insertion", "+", "j", "+", "1", ",", "value", ",", "axis", "=", "dimension_index", ")", "return", "result" ]
Insert subtotals into resulting ndarray.
[ "Insert", "subtotals", "into", "resulting", "ndarray", "." ]
python
train
maljovec/topopy
topopy/ContourTree.py
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L350-L472
def _process_tree(self, thisTree, thatTree): """ A function that will process either a split or join tree with reference to the other tree and store it as part of this CT instance. @ In, thisTree, a networkx.Graph instance representing a merge tree for which we will process all of its leaf nodes into this CT object @ In, thatTree, a networkx.Graph instance representing the opposing merge tree which will need to be updated as nodes from thisTree are processed @ Out, None """ if self.debug: sys.stdout.write("Processing Tree: ") start = time.clock() # Get all of the leaf nodes that are not branches in the other # tree if len(thisTree.nodes()) > 1: leaves = set( [ v for v in thisTree.nodes() if thisTree.in_degree(v) == 0 and thatTree.in_degree(v) < 2 ] ) else: leaves = set() while len(leaves) > 0: v = leaves.pop() # if self.debug: # sys.stdout.write('\tProcessing {} -> {}\n' # .format(v, thisTree.edges(v)[0][1])) # Take the leaf and edge out of the input tree and place it # on the CT edges = list(thisTree.out_edges(v)) if len(edges) != 1: warnings.warn( "The node {} should have a single emanating " "edge.\n".format(v) ) e1 = edges[0][0] e2 = edges[0][1] # This may be a bit beside the point, but if we want all of # our edges pointing 'up,' we can verify that the edges we # add have the lower vertex pointing to the upper vertex. # This is useful only for nicely plotting with some graph # tools (graphviz/networkx), and I guess for consistency # sake. if self.Y[e1] < self.Y[e2]: self.edges.append((e1, e2)) else: self.edges.append((e2, e1)) # Removing the node will remove its constituent edges from # thisTree thisTree.remove_node(v) # This is the root node of the other tree if thatTree.out_degree(v) == 0: thatTree.remove_node(v) # if self.debug: # sys.stdout.write('\t\tRemoving root {} from other tree\n' # .format(v)) # This is a "regular" node in the other tree, suppress it # there, but be sure to glue the upper and lower portions # together else: # The other ends of the node being removed are added to # "that" tree if len(thatTree.in_edges(v)) > 0: startNode = list(thatTree.in_edges(v))[0][0] else: # This means we are at the root of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant startNode = None if len(thatTree.out_edges(v)) > 0: endNode = list(thatTree.out_edges(v))[0][1] else: # This means we are at a leaf of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant endNode = None if startNode is not None and endNode is not None: thatTree.add_edge(startNode, endNode) thatTree.remove_node(v) # if self.debug: # sys.stdout.write('\t\tSuppressing {} in other tree and ' # 'gluing {} to {}\n' # .format(v, startNode, endNode)) if len(thisTree.nodes()) > 1: leaves = set( [ v for v in thisTree.nodes() if thisTree.in_degree(v) == 0 and thatTree.in_degree(v) < 2 ] ) else: leaves = set() # if self.debug: # myMessage = '\t\tValid leaves: ' # sep = '' # for leaf in leaves: # myMessage += sep + str(leaf) # sep = ',' # sys.stdout.write(myMessage+'\n') if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "_process_tree", "(", "self", ",", "thisTree", ",", "thatTree", ")", ":", "if", "self", ".", "debug", ":", "sys", ".", "stdout", ".", "write", "(", "\"Processing Tree: \"", ")", "start", "=", "time", ".", "clock", "(", ")", "# Get all of the leaf nodes that are not branches in the other", "# tree", "if", "len", "(", "thisTree", ".", "nodes", "(", ")", ")", ">", "1", ":", "leaves", "=", "set", "(", "[", "v", "for", "v", "in", "thisTree", ".", "nodes", "(", ")", "if", "thisTree", ".", "in_degree", "(", "v", ")", "==", "0", "and", "thatTree", ".", "in_degree", "(", "v", ")", "<", "2", "]", ")", "else", ":", "leaves", "=", "set", "(", ")", "while", "len", "(", "leaves", ")", ">", "0", ":", "v", "=", "leaves", ".", "pop", "(", ")", "# if self.debug:", "# sys.stdout.write('\\tProcessing {} -> {}\\n'", "# .format(v, thisTree.edges(v)[0][1]))", "# Take the leaf and edge out of the input tree and place it", "# on the CT", "edges", "=", "list", "(", "thisTree", ".", "out_edges", "(", "v", ")", ")", "if", "len", "(", "edges", ")", "!=", "1", ":", "warnings", ".", "warn", "(", "\"The node {} should have a single emanating \"", "\"edge.\\n\"", ".", "format", "(", "v", ")", ")", "e1", "=", "edges", "[", "0", "]", "[", "0", "]", "e2", "=", "edges", "[", "0", "]", "[", "1", "]", "# This may be a bit beside the point, but if we want all of", "# our edges pointing 'up,' we can verify that the edges we", "# add have the lower vertex pointing to the upper vertex.", "# This is useful only for nicely plotting with some graph", "# tools (graphviz/networkx), and I guess for consistency", "# sake.", "if", "self", ".", "Y", "[", "e1", "]", "<", "self", ".", "Y", "[", "e2", "]", ":", "self", ".", "edges", ".", "append", "(", "(", "e1", ",", "e2", ")", ")", "else", ":", "self", ".", "edges", ".", "append", "(", "(", "e2", ",", "e1", ")", ")", "# Removing the node will remove its constituent edges from", "# thisTree", "thisTree", ".", "remove_node", "(", "v", ")", "# This is the root node of the other tree", "if", "thatTree", ".", "out_degree", "(", "v", ")", "==", "0", ":", "thatTree", ".", "remove_node", "(", "v", ")", "# if self.debug:", "# sys.stdout.write('\\t\\tRemoving root {} from other tree\\n'", "# .format(v))", "# This is a \"regular\" node in the other tree, suppress it", "# there, but be sure to glue the upper and lower portions", "# together", "else", ":", "# The other ends of the node being removed are added to", "# \"that\" tree", "if", "len", "(", "thatTree", ".", "in_edges", "(", "v", ")", ")", ">", "0", ":", "startNode", "=", "list", "(", "thatTree", ".", "in_edges", "(", "v", ")", ")", "[", "0", "]", "[", "0", "]", "else", ":", "# This means we are at the root of the other tree,", "# we can safely remove this node without connecting", "# its predecessor with its descendant", "startNode", "=", "None", "if", "len", "(", "thatTree", ".", "out_edges", "(", "v", ")", ")", ">", "0", ":", "endNode", "=", "list", "(", "thatTree", ".", "out_edges", "(", "v", ")", ")", "[", "0", "]", "[", "1", "]", "else", ":", "# This means we are at a leaf of the other tree,", "# we can safely remove this node without connecting", "# its predecessor with its descendant", "endNode", "=", "None", "if", "startNode", "is", "not", "None", "and", "endNode", "is", "not", "None", ":", "thatTree", ".", "add_edge", "(", "startNode", ",", "endNode", ")", "thatTree", ".", "remove_node", "(", "v", ")", "# if self.debug:", "# sys.stdout.write('\\t\\tSuppressing {} in other tree and '", "# 'gluing {} to {}\\n'", "# .format(v, startNode, endNode))", "if", "len", "(", "thisTree", ".", "nodes", "(", ")", ")", ">", "1", ":", "leaves", "=", "set", "(", "[", "v", "for", "v", "in", "thisTree", ".", "nodes", "(", ")", "if", "thisTree", ".", "in_degree", "(", "v", ")", "==", "0", "and", "thatTree", ".", "in_degree", "(", "v", ")", "<", "2", "]", ")", "else", ":", "leaves", "=", "set", "(", ")", "# if self.debug:", "# myMessage = '\\t\\tValid leaves: '", "# sep = ''", "# for leaf in leaves:", "# myMessage += sep + str(leaf)", "# sep = ','", "# sys.stdout.write(myMessage+'\\n')", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")" ]
A function that will process either a split or join tree with reference to the other tree and store it as part of this CT instance. @ In, thisTree, a networkx.Graph instance representing a merge tree for which we will process all of its leaf nodes into this CT object @ In, thatTree, a networkx.Graph instance representing the opposing merge tree which will need to be updated as nodes from thisTree are processed @ Out, None
[ "A", "function", "that", "will", "process", "either", "a", "split", "or", "join", "tree", "with", "reference", "to", "the", "other", "tree", "and", "store", "it", "as", "part", "of", "this", "CT", "instance", "." ]
python
train
saltstack/salt
salt/modules/napalm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_network.py#L2238-L2262
def cancel_commit(jid): ''' .. versionadded:: 2019.2.0 Cancel a commit scheduled to be executed via the ``commit_in`` and ``commit_at`` arguments from the :py:func:`net.load_template <salt.modules.napalm_network.load_template>` or :py:func:`net.load_config <salt.modules.napalm_network.load_config>` execution functions. The commit ID is displayed when the commit is scheduled via the functions named above. CLI Example: .. code-block:: bash salt '*' net.cancel_commit 20180726083540640360 ''' job_name = '__napalm_commit_{}'.format(jid) removed = __salt__['schedule.delete'](job_name) if removed['result']: saved = __salt__['schedule.save']() removed['comment'] = 'Commit #{jid} cancelled.'.format(jid=jid) else: removed['comment'] = 'Unable to find commit #{jid}.'.format(jid=jid) return removed
[ "def", "cancel_commit", "(", "jid", ")", ":", "job_name", "=", "'__napalm_commit_{}'", ".", "format", "(", "jid", ")", "removed", "=", "__salt__", "[", "'schedule.delete'", "]", "(", "job_name", ")", "if", "removed", "[", "'result'", "]", ":", "saved", "=", "__salt__", "[", "'schedule.save'", "]", "(", ")", "removed", "[", "'comment'", "]", "=", "'Commit #{jid} cancelled.'", ".", "format", "(", "jid", "=", "jid", ")", "else", ":", "removed", "[", "'comment'", "]", "=", "'Unable to find commit #{jid}.'", ".", "format", "(", "jid", "=", "jid", ")", "return", "removed" ]
.. versionadded:: 2019.2.0 Cancel a commit scheduled to be executed via the ``commit_in`` and ``commit_at`` arguments from the :py:func:`net.load_template <salt.modules.napalm_network.load_template>` or :py:func:`net.load_config <salt.modules.napalm_network.load_config>` execution functions. The commit ID is displayed when the commit is scheduled via the functions named above. CLI Example: .. code-block:: bash salt '*' net.cancel_commit 20180726083540640360
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
julienr/meshcut
meshcut.py
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L306-L347
def merge_close_vertices(verts, faces, close_epsilon=1e-5): """ Will merge vertices that are closer than close_epsilon. Warning, this has a O(n^2) memory usage because we compute the full vert-to-vert distance matrix. If you have a large mesh, might want to use some kind of spatial search structure like an octree or some fancy hashing scheme Returns: new_verts, new_faces """ # Pairwise distance between verts if USE_SCIPY: D = spdist.cdist(verts, verts) else: D = np.sqrt(np.abs(pdist_squareformed_numpy(verts))) # Compute a mapping from old to new : for each input vert, store the index # of the new vert it will be merged into old2new = np.zeros(D.shape[0], dtype=np.int) # A mask indicating if a vertex has already been merged into another merged_verts = np.zeros(D.shape[0], dtype=np.bool) new_verts = [] for i in range(D.shape[0]): if merged_verts[i]: continue else: # The vertices that will be merged into this one merged = np.flatnonzero(D[i, :] < close_epsilon) old2new[merged] = len(new_verts) new_verts.append(verts[i]) merged_verts[merged] = True new_verts = np.array(new_verts) # Recompute face indices to index in new_verts new_faces = np.zeros((len(faces), 3), dtype=np.int) for i, f in enumerate(faces): new_faces[i] = (old2new[f[0]], old2new[f[1]], old2new[f[2]]) # again, plot with utils.trimesh3d(new_verts, new_faces) return new_verts, new_faces
[ "def", "merge_close_vertices", "(", "verts", ",", "faces", ",", "close_epsilon", "=", "1e-5", ")", ":", "# Pairwise distance between verts", "if", "USE_SCIPY", ":", "D", "=", "spdist", ".", "cdist", "(", "verts", ",", "verts", ")", "else", ":", "D", "=", "np", ".", "sqrt", "(", "np", ".", "abs", "(", "pdist_squareformed_numpy", "(", "verts", ")", ")", ")", "# Compute a mapping from old to new : for each input vert, store the index", "# of the new vert it will be merged into", "old2new", "=", "np", ".", "zeros", "(", "D", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "int", ")", "# A mask indicating if a vertex has already been merged into another", "merged_verts", "=", "np", ".", "zeros", "(", "D", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "bool", ")", "new_verts", "=", "[", "]", "for", "i", "in", "range", "(", "D", ".", "shape", "[", "0", "]", ")", ":", "if", "merged_verts", "[", "i", "]", ":", "continue", "else", ":", "# The vertices that will be merged into this one", "merged", "=", "np", ".", "flatnonzero", "(", "D", "[", "i", ",", ":", "]", "<", "close_epsilon", ")", "old2new", "[", "merged", "]", "=", "len", "(", "new_verts", ")", "new_verts", ".", "append", "(", "verts", "[", "i", "]", ")", "merged_verts", "[", "merged", "]", "=", "True", "new_verts", "=", "np", ".", "array", "(", "new_verts", ")", "# Recompute face indices to index in new_verts", "new_faces", "=", "np", ".", "zeros", "(", "(", "len", "(", "faces", ")", ",", "3", ")", ",", "dtype", "=", "np", ".", "int", ")", "for", "i", ",", "f", "in", "enumerate", "(", "faces", ")", ":", "new_faces", "[", "i", "]", "=", "(", "old2new", "[", "f", "[", "0", "]", "]", ",", "old2new", "[", "f", "[", "1", "]", "]", ",", "old2new", "[", "f", "[", "2", "]", "]", ")", "# again, plot with utils.trimesh3d(new_verts, new_faces)", "return", "new_verts", ",", "new_faces" ]
Will merge vertices that are closer than close_epsilon. Warning, this has a O(n^2) memory usage because we compute the full vert-to-vert distance matrix. If you have a large mesh, might want to use some kind of spatial search structure like an octree or some fancy hashing scheme Returns: new_verts, new_faces
[ "Will", "merge", "vertices", "that", "are", "closer", "than", "close_epsilon", "." ]
python
train
GNS3/gns3-server
gns3server/compute/iou/iou_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/iou/iou_vm.py#L188-L214
def _check_requirements(self): """ Checks the IOU image. """ if not self._path: raise IOUError("IOU image is not configured") if not os.path.isfile(self._path) or not os.path.exists(self._path): if os.path.islink(self._path): raise IOUError("IOU image '{}' linked to '{}' is not accessible".format(self._path, os.path.realpath(self._path))) else: raise IOUError("IOU image '{}' is not accessible".format(self._path)) try: with open(self._path, "rb") as f: # read the first 7 bytes of the file. elf_header_start = f.read(7) except OSError as e: raise IOUError("Cannot read ELF header for IOU image '{}': {}".format(self._path, e)) # IOU images must start with the ELF magic number, be 32-bit or 64-bit, little endian # and have an ELF version of 1 normal IOS image are big endian! if elf_header_start != b'\x7fELF\x01\x01\x01' and elf_header_start != b'\x7fELF\x02\x01\x01': raise IOUError("'{}' is not a valid IOU image".format(self._path)) if not os.access(self._path, os.X_OK): raise IOUError("IOU image '{}' is not executable".format(self._path))
[ "def", "_check_requirements", "(", "self", ")", ":", "if", "not", "self", ".", "_path", ":", "raise", "IOUError", "(", "\"IOU image is not configured\"", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "_path", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_path", ")", ":", "if", "os", ".", "path", ".", "islink", "(", "self", ".", "_path", ")", ":", "raise", "IOUError", "(", "\"IOU image '{}' linked to '{}' is not accessible\"", ".", "format", "(", "self", ".", "_path", ",", "os", ".", "path", ".", "realpath", "(", "self", ".", "_path", ")", ")", ")", "else", ":", "raise", "IOUError", "(", "\"IOU image '{}' is not accessible\"", ".", "format", "(", "self", ".", "_path", ")", ")", "try", ":", "with", "open", "(", "self", ".", "_path", ",", "\"rb\"", ")", "as", "f", ":", "# read the first 7 bytes of the file.", "elf_header_start", "=", "f", ".", "read", "(", "7", ")", "except", "OSError", "as", "e", ":", "raise", "IOUError", "(", "\"Cannot read ELF header for IOU image '{}': {}\"", ".", "format", "(", "self", ".", "_path", ",", "e", ")", ")", "# IOU images must start with the ELF magic number, be 32-bit or 64-bit, little endian", "# and have an ELF version of 1 normal IOS image are big endian!", "if", "elf_header_start", "!=", "b'\\x7fELF\\x01\\x01\\x01'", "and", "elf_header_start", "!=", "b'\\x7fELF\\x02\\x01\\x01'", ":", "raise", "IOUError", "(", "\"'{}' is not a valid IOU image\"", ".", "format", "(", "self", ".", "_path", ")", ")", "if", "not", "os", ".", "access", "(", "self", ".", "_path", ",", "os", ".", "X_OK", ")", ":", "raise", "IOUError", "(", "\"IOU image '{}' is not executable\"", ".", "format", "(", "self", ".", "_path", ")", ")" ]
Checks the IOU image.
[ "Checks", "the", "IOU", "image", "." ]
python
train
rueckstiess/mtools
mtools/util/profile_collection.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/profile_collection.py#L117-L131
def _calculate_bounds(self): """Calculate beginning and end of log events.""" # get start datetime first = self.coll_handle.find_one(None, sort=[("ts", ASCENDING)]) last = self.coll_handle.find_one(None, sort=[("ts", DESCENDING)]) self._start = first['ts'] if self._start.tzinfo is None: self._start = self._start.replace(tzinfo=tzutc()) self._end = last['ts'] if self._end.tzinfo is None: self._end = self._end.replace(tzinfo=tzutc()) return True
[ "def", "_calculate_bounds", "(", "self", ")", ":", "# get start datetime", "first", "=", "self", ".", "coll_handle", ".", "find_one", "(", "None", ",", "sort", "=", "[", "(", "\"ts\"", ",", "ASCENDING", ")", "]", ")", "last", "=", "self", ".", "coll_handle", ".", "find_one", "(", "None", ",", "sort", "=", "[", "(", "\"ts\"", ",", "DESCENDING", ")", "]", ")", "self", ".", "_start", "=", "first", "[", "'ts'", "]", "if", "self", ".", "_start", ".", "tzinfo", "is", "None", ":", "self", ".", "_start", "=", "self", ".", "_start", ".", "replace", "(", "tzinfo", "=", "tzutc", "(", ")", ")", "self", ".", "_end", "=", "last", "[", "'ts'", "]", "if", "self", ".", "_end", ".", "tzinfo", "is", "None", ":", "self", ".", "_end", "=", "self", ".", "_end", ".", "replace", "(", "tzinfo", "=", "tzutc", "(", ")", ")", "return", "True" ]
Calculate beginning and end of log events.
[ "Calculate", "beginning", "and", "end", "of", "log", "events", "." ]
python
train
waleedka/hiddenlayer
hiddenlayer/utils.py
https://github.com/waleedka/hiddenlayer/blob/294f8732b271cbdd6310c55bdf5ce855cbf61c75/hiddenlayer/utils.py#L17-L35
def to_data(value): """Standardize data types. Converts PyTorch tensors to Numpy arrays, and Numpy scalars to Python scalars.""" # TODO: Use get_framework() for better detection. if value.__class__.__module__.startswith("torch"): import torch if isinstance(value, torch.nn.parameter.Parameter): value = value.data if isinstance(value, torch.Tensor): if value.requires_grad: value = value.detach() value = value.cpu().numpy().copy() # If 0-dim array, convert to scalar if not value.shape: value = value.item() # Convert Numpy scalar types to Python types if value.__class__.__module__ == "numpy" and value.__class__.__name__ != "ndarray": value = value.item() return value
[ "def", "to_data", "(", "value", ")", ":", "# TODO: Use get_framework() for better detection.", "if", "value", ".", "__class__", ".", "__module__", ".", "startswith", "(", "\"torch\"", ")", ":", "import", "torch", "if", "isinstance", "(", "value", ",", "torch", ".", "nn", ".", "parameter", ".", "Parameter", ")", ":", "value", "=", "value", ".", "data", "if", "isinstance", "(", "value", ",", "torch", ".", "Tensor", ")", ":", "if", "value", ".", "requires_grad", ":", "value", "=", "value", ".", "detach", "(", ")", "value", "=", "value", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ".", "copy", "(", ")", "# If 0-dim array, convert to scalar", "if", "not", "value", ".", "shape", ":", "value", "=", "value", ".", "item", "(", ")", "# Convert Numpy scalar types to Python types", "if", "value", ".", "__class__", ".", "__module__", "==", "\"numpy\"", "and", "value", ".", "__class__", ".", "__name__", "!=", "\"ndarray\"", ":", "value", "=", "value", ".", "item", "(", ")", "return", "value" ]
Standardize data types. Converts PyTorch tensors to Numpy arrays, and Numpy scalars to Python scalars.
[ "Standardize", "data", "types", ".", "Converts", "PyTorch", "tensors", "to", "Numpy", "arrays", "and", "Numpy", "scalars", "to", "Python", "scalars", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/peers.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/peers.py#L17-L36
def isUrl(urlString): """ Attempts to return whether a given URL string is valid by checking for the presence of the URL scheme and netloc using the urlparse module, and then using a regex. From http://stackoverflow.com/questions/7160737/ """ parsed = urlparse.urlparse(urlString) urlparseValid = parsed.netloc != '' and parsed.scheme != '' regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain... r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) return regex.match(urlString) and urlparseValid
[ "def", "isUrl", "(", "urlString", ")", ":", "parsed", "=", "urlparse", ".", "urlparse", "(", "urlString", ")", "urlparseValid", "=", "parsed", ".", "netloc", "!=", "''", "and", "parsed", ".", "scheme", "!=", "''", "regex", "=", "re", ".", "compile", "(", "r'^(?:http|ftp)s?://'", "# http:// or https://", "r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)'", "r'+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'", "# domain...", "r'localhost|'", "# localhost...", "r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'", "# ...or ip", "r'(?::\\d+)?'", "# optional port", "r'(?:/?|[/?]\\S+)$'", ",", "re", ".", "IGNORECASE", ")", "return", "regex", ".", "match", "(", "urlString", ")", "and", "urlparseValid" ]
Attempts to return whether a given URL string is valid by checking for the presence of the URL scheme and netloc using the urlparse module, and then using a regex. From http://stackoverflow.com/questions/7160737/
[ "Attempts", "to", "return", "whether", "a", "given", "URL", "string", "is", "valid", "by", "checking", "for", "the", "presence", "of", "the", "URL", "scheme", "and", "netloc", "using", "the", "urlparse", "module", "and", "then", "using", "a", "regex", "." ]
python
train
pymc-devs/pymc
pymc/PyMCObjects.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/PyMCObjects.py#L63-L86
def extend_parents(parents): """ extend_parents(parents) Returns a set containing nearest conditionally stochastic (Stochastic, not Deterministic) ancestors. """ new_parents = set() for parent in parents: new_parents.add(parent) if isinstance(parent, DeterministicBase): new_parents.remove(parent) new_parents |= parent.extended_parents elif isinstance(parent, ContainerBase): for contained_parent in parent.stochastics: new_parents.add(contained_parent) for contained_parent in parent.deterministics: new_parents |= contained_parent.extended_parents return new_parents
[ "def", "extend_parents", "(", "parents", ")", ":", "new_parents", "=", "set", "(", ")", "for", "parent", "in", "parents", ":", "new_parents", ".", "add", "(", "parent", ")", "if", "isinstance", "(", "parent", ",", "DeterministicBase", ")", ":", "new_parents", ".", "remove", "(", "parent", ")", "new_parents", "|=", "parent", ".", "extended_parents", "elif", "isinstance", "(", "parent", ",", "ContainerBase", ")", ":", "for", "contained_parent", "in", "parent", ".", "stochastics", ":", "new_parents", ".", "add", "(", "contained_parent", ")", "for", "contained_parent", "in", "parent", ".", "deterministics", ":", "new_parents", "|=", "contained_parent", ".", "extended_parents", "return", "new_parents" ]
extend_parents(parents) Returns a set containing nearest conditionally stochastic (Stochastic, not Deterministic) ancestors.
[ "extend_parents", "(", "parents", ")" ]
python
train
Xion/taipan
taipan/collections/dicts.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/collections/dicts.py#L511-L524
def invert(dict_): """Return an inverted dictionary, where former values are keys and former keys are values. .. warning:: If more than one key maps to any given value in input dictionary, it is undefined which one will be chosen for the result. :param dict_: Dictionary to swap keys and values in :return: Inverted dictionary """ ensure_mapping(dict_) return dict_.__class__(izip(itervalues(dict_), iterkeys(dict_)))
[ "def", "invert", "(", "dict_", ")", ":", "ensure_mapping", "(", "dict_", ")", "return", "dict_", ".", "__class__", "(", "izip", "(", "itervalues", "(", "dict_", ")", ",", "iterkeys", "(", "dict_", ")", ")", ")" ]
Return an inverted dictionary, where former values are keys and former keys are values. .. warning:: If more than one key maps to any given value in input dictionary, it is undefined which one will be chosen for the result. :param dict_: Dictionary to swap keys and values in :return: Inverted dictionary
[ "Return", "an", "inverted", "dictionary", "where", "former", "values", "are", "keys", "and", "former", "keys", "are", "values", "." ]
python
train
tshlabs/tunic
tunic/core.py
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L329-L356
def get_previous_release(self): """Get the release ID of the deployment immediately before the "current" deployment, ``None`` if no previous release could be determined. This method performs two network operations. :return: The release ID of the release previous to the "current" release. :rtype: str """ releases = self.get_releases() if not releases: return None current = self.get_current_release() if not current: return None try: current_idx = releases.index(current) except ValueError: return None try: return releases[current_idx + 1] except IndexError: return None
[ "def", "get_previous_release", "(", "self", ")", ":", "releases", "=", "self", ".", "get_releases", "(", ")", "if", "not", "releases", ":", "return", "None", "current", "=", "self", ".", "get_current_release", "(", ")", "if", "not", "current", ":", "return", "None", "try", ":", "current_idx", "=", "releases", ".", "index", "(", "current", ")", "except", "ValueError", ":", "return", "None", "try", ":", "return", "releases", "[", "current_idx", "+", "1", "]", "except", "IndexError", ":", "return", "None" ]
Get the release ID of the deployment immediately before the "current" deployment, ``None`` if no previous release could be determined. This method performs two network operations. :return: The release ID of the release previous to the "current" release. :rtype: str
[ "Get", "the", "release", "ID", "of", "the", "deployment", "immediately", "before", "the", "current", "deployment", "None", "if", "no", "previous", "release", "could", "be", "determined", "." ]
python
train
inasafe/inasafe
scripts/create_api_docs.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/scripts/create_api_docs.py#L161-L182
def create_top_level_index(api_docs_path, packages, max_depth=2): """Create the top level index page (writing to file) :param api_docs_path: Path to the api-docs of inasafe documentation. :type api_docs_path: str :param packages: List of packages which want to be extracted their api/ :type packages: list :param max_depth: The maximum depth of tree in the api docs. :type max_depth: int """ page_text = INDEX_HEADER for package in packages: # Write top level index file entries for safe text = create_top_level_index_entry( title='Package %s' % package, max_depth=max_depth, subtitles=[package]) page_text += '%s\n' % text write_rst_file(api_docs_path, 'index', page_text)
[ "def", "create_top_level_index", "(", "api_docs_path", ",", "packages", ",", "max_depth", "=", "2", ")", ":", "page_text", "=", "INDEX_HEADER", "for", "package", "in", "packages", ":", "# Write top level index file entries for safe", "text", "=", "create_top_level_index_entry", "(", "title", "=", "'Package %s'", "%", "package", ",", "max_depth", "=", "max_depth", ",", "subtitles", "=", "[", "package", "]", ")", "page_text", "+=", "'%s\\n'", "%", "text", "write_rst_file", "(", "api_docs_path", ",", "'index'", ",", "page_text", ")" ]
Create the top level index page (writing to file) :param api_docs_path: Path to the api-docs of inasafe documentation. :type api_docs_path: str :param packages: List of packages which want to be extracted their api/ :type packages: list :param max_depth: The maximum depth of tree in the api docs. :type max_depth: int
[ "Create", "the", "top", "level", "index", "page", "(", "writing", "to", "file", ")" ]
python
train
vatlab/SoS
src/sos/step_executor.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/step_executor.py#L209-L221
def expand_depends_files(*args, **kwargs): '''handle directive depends''' args = [x.resolve() if isinstance(x, dynamic) else x for x in args] kwargs = { x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items() } return sos_targets( *args, **kwargs, _verify_existence=True, _undetermined=False, _source=env.sos_dict['step_name'])
[ "def", "expand_depends_files", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "x", ".", "resolve", "(", ")", "if", "isinstance", "(", "x", ",", "dynamic", ")", "else", "x", "for", "x", "in", "args", "]", "kwargs", "=", "{", "x", ":", "(", "y", ".", "resolve", "(", ")", "if", "isinstance", "(", "y", ",", "dynamic", ")", "else", "y", ")", "for", "x", ",", "y", "in", "kwargs", ".", "items", "(", ")", "}", "return", "sos_targets", "(", "*", "args", ",", "*", "*", "kwargs", ",", "_verify_existence", "=", "True", ",", "_undetermined", "=", "False", ",", "_source", "=", "env", ".", "sos_dict", "[", "'step_name'", "]", ")" ]
handle directive depends
[ "handle", "directive", "depends" ]
python
train
ethpm/py-ethpm
ethpm/utils/ipfs.py
https://github.com/ethpm/py-ethpm/blob/81ed58d7c636fe00c6770edeb0401812b1a5e8fc/ethpm/utils/ipfs.py#L44-L54
def is_ipfs_uri(value: str) -> bool: """ Return a bool indicating whether or not the value is a valid IPFS URI. """ parse_result = parse.urlparse(value) if parse_result.scheme != "ipfs": return False if not parse_result.netloc and not parse_result.path: return False return True
[ "def", "is_ipfs_uri", "(", "value", ":", "str", ")", "->", "bool", ":", "parse_result", "=", "parse", ".", "urlparse", "(", "value", ")", "if", "parse_result", ".", "scheme", "!=", "\"ipfs\"", ":", "return", "False", "if", "not", "parse_result", ".", "netloc", "and", "not", "parse_result", ".", "path", ":", "return", "False", "return", "True" ]
Return a bool indicating whether or not the value is a valid IPFS URI.
[ "Return", "a", "bool", "indicating", "whether", "or", "not", "the", "value", "is", "a", "valid", "IPFS", "URI", "." ]
python
train
materialsproject/pymatgen
pymatgen/electronic_structure/boltztrap.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/boltztrap.py#L2013-L2076
def from_files(path_dir, dos_spin=1): """ get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object """ run_type, warning, efermi, gap, doping_levels = \ BoltztrapAnalyzer.parse_outputtrans(path_dir) vol = BoltztrapAnalyzer.parse_struct(path_dir) intrans = BoltztrapAnalyzer.parse_intrans(path_dir) if run_type == "BOLTZ": dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=False) mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \ seebeck_doping, cond_doping, kappa_doping, hall_doping, \ carrier_conc = BoltztrapAnalyzer. \ parse_cond_and_hall(path_dir, doping_levels) return BoltztrapAnalyzer( gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, intrans, dos, pdos, carrier_conc, vol, warning) elif run_type == "DOS": trim = True if intrans["dos_type"] == "HISTO" else False dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=trim) return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos, warning=warning, vol=vol) elif run_type == "BANDS": bz_kpoints = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:] bz_bands = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6] return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints, warning=warning, vol=vol) elif run_type == "FERMI": """ """ if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')): fs_data = read_cube_file( os.path.join(path_dir, 'boltztrap_BZ.cube')) elif os.path.exists(os.path.join(path_dir, 'fort.30')): fs_data = read_cube_file(os.path.join(path_dir, 'fort.30')) else: raise BoltztrapError("No data file found for fermi surface") return BoltztrapAnalyzer(fermi_surface_data=fs_data) else: raise ValueError("Run type: {} not recognized!".format(run_type))
[ "def", "from_files", "(", "path_dir", ",", "dos_spin", "=", "1", ")", ":", "run_type", ",", "warning", ",", "efermi", ",", "gap", ",", "doping_levels", "=", "BoltztrapAnalyzer", ".", "parse_outputtrans", "(", "path_dir", ")", "vol", "=", "BoltztrapAnalyzer", ".", "parse_struct", "(", "path_dir", ")", "intrans", "=", "BoltztrapAnalyzer", ".", "parse_intrans", "(", "path_dir", ")", "if", "run_type", "==", "\"BOLTZ\"", ":", "dos", ",", "pdos", "=", "BoltztrapAnalyzer", ".", "parse_transdos", "(", "path_dir", ",", "efermi", ",", "dos_spin", "=", "dos_spin", ",", "trim_dos", "=", "False", ")", "mu_steps", ",", "cond", ",", "seebeck", ",", "kappa", ",", "hall", ",", "pn_doping_levels", ",", "mu_doping", ",", "seebeck_doping", ",", "cond_doping", ",", "kappa_doping", ",", "hall_doping", ",", "carrier_conc", "=", "BoltztrapAnalyzer", ".", "parse_cond_and_hall", "(", "path_dir", ",", "doping_levels", ")", "return", "BoltztrapAnalyzer", "(", "gap", ",", "mu_steps", ",", "cond", ",", "seebeck", ",", "kappa", ",", "hall", ",", "pn_doping_levels", ",", "mu_doping", ",", "seebeck_doping", ",", "cond_doping", ",", "kappa_doping", ",", "hall_doping", ",", "intrans", ",", "dos", ",", "pdos", ",", "carrier_conc", ",", "vol", ",", "warning", ")", "elif", "run_type", "==", "\"DOS\"", ":", "trim", "=", "True", "if", "intrans", "[", "\"dos_type\"", "]", "==", "\"HISTO\"", "else", "False", "dos", ",", "pdos", "=", "BoltztrapAnalyzer", ".", "parse_transdos", "(", "path_dir", ",", "efermi", ",", "dos_spin", "=", "dos_spin", ",", "trim_dos", "=", "trim", ")", "return", "BoltztrapAnalyzer", "(", "gap", "=", "gap", ",", "dos", "=", "dos", ",", "dos_partial", "=", "pdos", ",", "warning", "=", "warning", ",", "vol", "=", "vol", ")", "elif", "run_type", "==", "\"BANDS\"", ":", "bz_kpoints", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "path_dir", ",", "\"boltztrap_band.dat\"", ")", ")", "[", ":", ",", "-", "3", ":", "]", "bz_bands", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "path_dir", ",", "\"boltztrap_band.dat\"", ")", ")", "[", ":", ",", "1", ":", "-", "6", "]", "return", "BoltztrapAnalyzer", "(", "bz_bands", "=", "bz_bands", ",", "bz_kpoints", "=", "bz_kpoints", ",", "warning", "=", "warning", ",", "vol", "=", "vol", ")", "elif", "run_type", "==", "\"FERMI\"", ":", "\"\"\"\n \"\"\"", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path_dir", ",", "'boltztrap_BZ.cube'", ")", ")", ":", "fs_data", "=", "read_cube_file", "(", "os", ".", "path", ".", "join", "(", "path_dir", ",", "'boltztrap_BZ.cube'", ")", ")", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path_dir", ",", "'fort.30'", ")", ")", ":", "fs_data", "=", "read_cube_file", "(", "os", ".", "path", ".", "join", "(", "path_dir", ",", "'fort.30'", ")", ")", "else", ":", "raise", "BoltztrapError", "(", "\"No data file found for fermi surface\"", ")", "return", "BoltztrapAnalyzer", "(", "fermi_surface_data", "=", "fs_data", ")", "else", ":", "raise", "ValueError", "(", "\"Run type: {} not recognized!\"", ".", "format", "(", "run_type", ")", ")" ]
get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object
[ "get", "a", "BoltztrapAnalyzer", "object", "from", "a", "set", "of", "files" ]
python
train
moonso/ped_parser
ped_parser/family.py
https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L214-L230
def get_phenotype(self, individual_id): """ Return the phenotype of an individual If individual does not exist return 0 Arguments: individual_id (str): Represents the individual id Returns: int : Integer that represents the phenotype """ phenotype = 0 # This is if unknown phenotype if individual_id in self.individuals: phenotype = self.individuals[individual_id].phenotype return phenotype
[ "def", "get_phenotype", "(", "self", ",", "individual_id", ")", ":", "phenotype", "=", "0", "# This is if unknown phenotype", "if", "individual_id", "in", "self", ".", "individuals", ":", "phenotype", "=", "self", ".", "individuals", "[", "individual_id", "]", ".", "phenotype", "return", "phenotype" ]
Return the phenotype of an individual If individual does not exist return 0 Arguments: individual_id (str): Represents the individual id Returns: int : Integer that represents the phenotype
[ "Return", "the", "phenotype", "of", "an", "individual", "If", "individual", "does", "not", "exist", "return", "0", "Arguments", ":", "individual_id", "(", "str", ")", ":", "Represents", "the", "individual", "id", "Returns", ":", "int", ":", "Integer", "that", "represents", "the", "phenotype" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L2005-L2024
def _add_thread(self, aThread): """ Private method to add a thread object to the snapshot. @type aThread: L{Thread} @param aThread: Thread object. """ ## if not isinstance(aThread, Thread): ## if hasattr(aThread, '__class__'): ## typename = aThread.__class__.__name__ ## else: ## typename = str(type(aThread)) ## msg = "Expected Thread, got %s instead" % typename ## raise TypeError(msg) dwThreadId = aThread.dwThreadId ## if dwThreadId in self.__threadDict: ## msg = "Already have a Thread object with ID %d" % dwThreadId ## raise KeyError(msg) aThread.set_process(self) self.__threadDict[dwThreadId] = aThread
[ "def", "_add_thread", "(", "self", ",", "aThread", ")", ":", "## if not isinstance(aThread, Thread):", "## if hasattr(aThread, '__class__'):", "## typename = aThread.__class__.__name__", "## else:", "## typename = str(type(aThread))", "## msg = \"Expected Thread, got %s instead\" % typename", "## raise TypeError(msg)", "dwThreadId", "=", "aThread", ".", "dwThreadId", "## if dwThreadId in self.__threadDict:", "## msg = \"Already have a Thread object with ID %d\" % dwThreadId", "## raise KeyError(msg)", "aThread", ".", "set_process", "(", "self", ")", "self", ".", "__threadDict", "[", "dwThreadId", "]", "=", "aThread" ]
Private method to add a thread object to the snapshot. @type aThread: L{Thread} @param aThread: Thread object.
[ "Private", "method", "to", "add", "a", "thread", "object", "to", "the", "snapshot", "." ]
python
train
apache/incubator-heron
heronpy/api/stream.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/api/stream.py#L84-L97
def is_grouping_sane(cls, gtype): """Checks if a given gtype is sane""" if gtype == cls.SHUFFLE or gtype == cls.ALL or gtype == cls.LOWEST or gtype == cls.NONE: return True elif isinstance(gtype, cls.FIELDS): return gtype.gtype == topology_pb2.Grouping.Value("FIELDS") and \ gtype.fields is not None elif isinstance(gtype, cls.CUSTOM): return gtype.gtype == topology_pb2.Grouping.Value("CUSTOM") and \ gtype.python_serialized is not None else: #pylint: disable=fixme #TODO: DIRECT are not supported yet return False
[ "def", "is_grouping_sane", "(", "cls", ",", "gtype", ")", ":", "if", "gtype", "==", "cls", ".", "SHUFFLE", "or", "gtype", "==", "cls", ".", "ALL", "or", "gtype", "==", "cls", ".", "LOWEST", "or", "gtype", "==", "cls", ".", "NONE", ":", "return", "True", "elif", "isinstance", "(", "gtype", ",", "cls", ".", "FIELDS", ")", ":", "return", "gtype", ".", "gtype", "==", "topology_pb2", ".", "Grouping", ".", "Value", "(", "\"FIELDS\"", ")", "and", "gtype", ".", "fields", "is", "not", "None", "elif", "isinstance", "(", "gtype", ",", "cls", ".", "CUSTOM", ")", ":", "return", "gtype", ".", "gtype", "==", "topology_pb2", ".", "Grouping", ".", "Value", "(", "\"CUSTOM\"", ")", "and", "gtype", ".", "python_serialized", "is", "not", "None", "else", ":", "#pylint: disable=fixme", "#TODO: DIRECT are not supported yet", "return", "False" ]
Checks if a given gtype is sane
[ "Checks", "if", "a", "given", "gtype", "is", "sane" ]
python
valid
facebook/pyre-check
sapp/sapp/models.py
https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/sapp/sapp/models.py#L87-L94
def prepare(cls, session, pkgen, items): """This is called immediately before the items are written to the database. pkgen is passed in to allow last-minute resolving of ids. """ for item in cls.merge(session, items): if hasattr(item, "id"): item.id.resolve(id=pkgen.get(cls), is_new=True) yield cls.to_dict(item)
[ "def", "prepare", "(", "cls", ",", "session", ",", "pkgen", ",", "items", ")", ":", "for", "item", "in", "cls", ".", "merge", "(", "session", ",", "items", ")", ":", "if", "hasattr", "(", "item", ",", "\"id\"", ")", ":", "item", ".", "id", ".", "resolve", "(", "id", "=", "pkgen", ".", "get", "(", "cls", ")", ",", "is_new", "=", "True", ")", "yield", "cls", ".", "to_dict", "(", "item", ")" ]
This is called immediately before the items are written to the database. pkgen is passed in to allow last-minute resolving of ids.
[ "This", "is", "called", "immediately", "before", "the", "items", "are", "written", "to", "the", "database", ".", "pkgen", "is", "passed", "in", "to", "allow", "last", "-", "minute", "resolving", "of", "ids", "." ]
python
train
portfors-lab/sparkle
sparkle/gui/stim/stimulusview.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulusview.py#L381-L393
def updateSelectionModel(self, components): """Creates a new selection model and adds *components* to it :param components: components in this view to add to the selection :type components: list<:class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` """ # selmodel = self.selectionModel() # selmodel.clearSelection() selmodel = ComponentSelectionModel(self.model()) self.setSelectionModel(selmodel) for comp in components: selmodel.selectComponent(comp) self.viewport().update()
[ "def", "updateSelectionModel", "(", "self", ",", "components", ")", ":", "# selmodel = self.selectionModel()", "# selmodel.clearSelection()", "selmodel", "=", "ComponentSelectionModel", "(", "self", ".", "model", "(", ")", ")", "self", ".", "setSelectionModel", "(", "selmodel", ")", "for", "comp", "in", "components", ":", "selmodel", ".", "selectComponent", "(", "comp", ")", "self", ".", "viewport", "(", ")", ".", "update", "(", ")" ]
Creates a new selection model and adds *components* to it :param components: components in this view to add to the selection :type components: list<:class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
[ "Creates", "a", "new", "selection", "model", "and", "adds", "*", "components", "*", "to", "it" ]
python
train
emillon/mixcloud
mixcloud/__init__.py
https://github.com/emillon/mixcloud/blob/da4c7a70444c7f1712ee13e3a93eb1cd9c3f4ab8/mixcloud/__init__.py#L57-L71
def exchange_token(self, code): """ Exchange the authorization code for an access token. """ access_token_url = OAUTH_ROOT + '/access_token' params = { 'client_id': self.client_id, 'client_secret': self.client_secret, 'redirect_uri': self.redirect_uri, 'code': code, } resp = requests.get(access_token_url, params=params) if not resp.ok: raise MixcloudOauthError("Could not get access token.") return resp.json()['access_token']
[ "def", "exchange_token", "(", "self", ",", "code", ")", ":", "access_token_url", "=", "OAUTH_ROOT", "+", "'/access_token'", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'client_secret'", ":", "self", ".", "client_secret", ",", "'redirect_uri'", ":", "self", ".", "redirect_uri", ",", "'code'", ":", "code", ",", "}", "resp", "=", "requests", ".", "get", "(", "access_token_url", ",", "params", "=", "params", ")", "if", "not", "resp", ".", "ok", ":", "raise", "MixcloudOauthError", "(", "\"Could not get access token.\"", ")", "return", "resp", ".", "json", "(", ")", "[", "'access_token'", "]" ]
Exchange the authorization code for an access token.
[ "Exchange", "the", "authorization", "code", "for", "an", "access", "token", "." ]
python
valid
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py#L308-L318
def match(self, uri): ''' Matches an URL and returns a (handler, target) tuple ''' if uri in self.static: return self.static[uri], {} for combined, subroutes in self.dynamic: match = combined.match(uri) if not match: continue target, groups = subroutes[match.lastindex - 1] groups = groups.match(uri).groupdict() if groups else {} return target, groups return None, {}
[ "def", "match", "(", "self", ",", "uri", ")", ":", "if", "uri", "in", "self", ".", "static", ":", "return", "self", ".", "static", "[", "uri", "]", ",", "{", "}", "for", "combined", ",", "subroutes", "in", "self", ".", "dynamic", ":", "match", "=", "combined", ".", "match", "(", "uri", ")", "if", "not", "match", ":", "continue", "target", ",", "groups", "=", "subroutes", "[", "match", ".", "lastindex", "-", "1", "]", "groups", "=", "groups", ".", "match", "(", "uri", ")", ".", "groupdict", "(", ")", "if", "groups", "else", "{", "}", "return", "target", ",", "groups", "return", "None", ",", "{", "}" ]
Matches an URL and returns a (handler, target) tuple
[ "Matches", "an", "URL", "and", "returns", "a", "(", "handler", "target", ")", "tuple" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L7675-L7712
def check_signature(self, msgbuf, srcSystem, srcComponent): '''check signature on incoming message''' if isinstance(msgbuf, array.array): msgbuf = msgbuf.tostring() timestamp_buf = msgbuf[-12:-6] link_id = msgbuf[-13] (tlow, thigh) = struct.unpack('<IH', timestamp_buf) timestamp = tlow + (thigh<<32) # see if the timestamp is acceptable stream_key = (link_id,srcSystem,srcComponent) if stream_key in self.signing.stream_timestamps: if timestamp <= self.signing.stream_timestamps[stream_key]: # reject old timestamp # print('old timestamp') return False else: # a new stream has appeared. Accept the timestamp if it is at most # one minute behind our current timestamp if timestamp + 6000*1000 < self.signing.timestamp: # print('bad new stream ', timestamp/(100.0*1000*60*60*24*365), self.signing.timestamp/(100.0*1000*60*60*24*365)) return False self.signing.stream_timestamps[stream_key] = timestamp # print('new stream') h = hashlib.new('sha256') h.update(self.signing.secret_key) h.update(msgbuf[:-6]) sig1 = str(h.digest())[:6] sig2 = str(msgbuf)[-6:] if sig1 != sig2: # print('sig mismatch') return False # the timestamp we next send with is the max of the received timestamp and # our current timestamp self.signing.timestamp = max(self.signing.timestamp, timestamp) return True
[ "def", "check_signature", "(", "self", ",", "msgbuf", ",", "srcSystem", ",", "srcComponent", ")", ":", "if", "isinstance", "(", "msgbuf", ",", "array", ".", "array", ")", ":", "msgbuf", "=", "msgbuf", ".", "tostring", "(", ")", "timestamp_buf", "=", "msgbuf", "[", "-", "12", ":", "-", "6", "]", "link_id", "=", "msgbuf", "[", "-", "13", "]", "(", "tlow", ",", "thigh", ")", "=", "struct", ".", "unpack", "(", "'<IH'", ",", "timestamp_buf", ")", "timestamp", "=", "tlow", "+", "(", "thigh", "<<", "32", ")", "# see if the timestamp is acceptable", "stream_key", "=", "(", "link_id", ",", "srcSystem", ",", "srcComponent", ")", "if", "stream_key", "in", "self", ".", "signing", ".", "stream_timestamps", ":", "if", "timestamp", "<=", "self", ".", "signing", ".", "stream_timestamps", "[", "stream_key", "]", ":", "# reject old timestamp", "# print('old timestamp')", "return", "False", "else", ":", "# a new stream has appeared. Accept the timestamp if it is at most", "# one minute behind our current timestamp", "if", "timestamp", "+", "6000", "*", "1000", "<", "self", ".", "signing", ".", "timestamp", ":", "# print('bad new stream ', timestamp/(100.0*1000*60*60*24*365), self.signing.timestamp/(100.0*1000*60*60*24*365))", "return", "False", "self", ".", "signing", ".", "stream_timestamps", "[", "stream_key", "]", "=", "timestamp", "# print('new stream')", "h", "=", "hashlib", ".", "new", "(", "'sha256'", ")", "h", ".", "update", "(", "self", ".", "signing", ".", "secret_key", ")", "h", ".", "update", "(", "msgbuf", "[", ":", "-", "6", "]", ")", "sig1", "=", "str", "(", "h", ".", "digest", "(", ")", ")", "[", ":", "6", "]", "sig2", "=", "str", "(", "msgbuf", ")", "[", "-", "6", ":", "]", "if", "sig1", "!=", "sig2", ":", "# print('sig mismatch')", "return", "False", "# the timestamp we next send with is the max of the received timestamp and", "# our current timestamp", "self", ".", "signing", ".", "timestamp", "=", "max", "(", "self", ".", "signing", ".", "timestamp", ",", "timestamp", ")", "return", "True" ]
check signature on incoming message
[ "check", "signature", "on", "incoming", "message" ]
python
train
marrow/util
marrow/util/object.py
https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/object.py#L253-L308
def getargspec(obj): """An improved inspect.getargspec. Has a slightly different return value from the default getargspec. Returns a tuple of: required, optional, args, kwargs list, dict, bool, bool Required is a list of required named arguments. Optional is a dictionary mapping optional arguments to defaults. Args and kwargs are True for the respective unlimited argument type. """ argnames, varargs, varkw, _defaults = None, None, None, None if inspect.isfunction(obj) or inspect.ismethod(obj): argnames, varargs, varkw, _defaults = inspect.getargspec(obj) elif inspect.isclass(obj): if inspect.ismethoddescriptor(obj.__init__): argnames, varargs, varkw, _defaults = [], False, False, None else: argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__init__) elif hasattr(obj, '__call__'): argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__call__) else: raise TypeError("Object not callable?") # Need test case to prove this is even possible. # if (argnames, varargs, varkw, defaults) is (None, None, None, None): # raise InspectionFailed() if argnames and argnames[0] == 'self': del argnames[0] if _defaults is None: _defaults = [] defaults = dict() else: # Create a mapping dictionary of defaults; this is slightly more useful. defaults = dict() _defaults = list(_defaults) _defaults.reverse() argnames.reverse() for i, default in enumerate(_defaults): defaults[argnames[i]] = default argnames.reverse() # del argnames[-len(_defaults):] return argnames, defaults, True if varargs else False, True if varkw else False
[ "def", "getargspec", "(", "obj", ")", ":", "argnames", ",", "varargs", ",", "varkw", ",", "_defaults", "=", "None", ",", "None", ",", "None", ",", "None", "if", "inspect", ".", "isfunction", "(", "obj", ")", "or", "inspect", ".", "ismethod", "(", "obj", ")", ":", "argnames", ",", "varargs", ",", "varkw", ",", "_defaults", "=", "inspect", ".", "getargspec", "(", "obj", ")", "elif", "inspect", ".", "isclass", "(", "obj", ")", ":", "if", "inspect", ".", "ismethoddescriptor", "(", "obj", ".", "__init__", ")", ":", "argnames", ",", "varargs", ",", "varkw", ",", "_defaults", "=", "[", "]", ",", "False", ",", "False", ",", "None", "else", ":", "argnames", ",", "varargs", ",", "varkw", ",", "_defaults", "=", "inspect", ".", "getargspec", "(", "obj", ".", "__init__", ")", "elif", "hasattr", "(", "obj", ",", "'__call__'", ")", ":", "argnames", ",", "varargs", ",", "varkw", ",", "_defaults", "=", "inspect", ".", "getargspec", "(", "obj", ".", "__call__", ")", "else", ":", "raise", "TypeError", "(", "\"Object not callable?\"", ")", "# Need test case to prove this is even possible.", "# if (argnames, varargs, varkw, defaults) is (None, None, None, None):", "# raise InspectionFailed()", "if", "argnames", "and", "argnames", "[", "0", "]", "==", "'self'", ":", "del", "argnames", "[", "0", "]", "if", "_defaults", "is", "None", ":", "_defaults", "=", "[", "]", "defaults", "=", "dict", "(", ")", "else", ":", "# Create a mapping dictionary of defaults; this is slightly more useful.", "defaults", "=", "dict", "(", ")", "_defaults", "=", "list", "(", "_defaults", ")", "_defaults", ".", "reverse", "(", ")", "argnames", ".", "reverse", "(", ")", "for", "i", ",", "default", "in", "enumerate", "(", "_defaults", ")", ":", "defaults", "[", "argnames", "[", "i", "]", "]", "=", "default", "argnames", ".", "reverse", "(", ")", "# del argnames[-len(_defaults):]", "return", "argnames", ",", "defaults", ",", "True", "if", "varargs", "else", "False", ",", "True", "if", "varkw", "else", "False" ]
An improved inspect.getargspec. Has a slightly different return value from the default getargspec. Returns a tuple of: required, optional, args, kwargs list, dict, bool, bool Required is a list of required named arguments. Optional is a dictionary mapping optional arguments to defaults. Args and kwargs are True for the respective unlimited argument type.
[ "An", "improved", "inspect", ".", "getargspec", "." ]
python
train
GNS3/gns3-server
gns3server/controller/node.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/node.py#L418-L450
def _node_data(self, properties=None): """ Prepare node data to send to the remote controller :param properties: If properties is None use actual property otherwise use the parameter """ if properties: data = copy.copy(properties) else: data = copy.copy(self._properties) # We replace the startup script name by the content of the file mapping = { "base_script_file": "startup_script", "startup_config": "startup_config_content", "private_config": "private_config_content", } for k, v in mapping.items(): if k in list(self._properties.keys()): data[v] = self._base_config_file_content(self._properties[k]) del data[k] del self._properties[k] # We send the file only one time data["name"] = self._name if self._console: # console is optional for builtin nodes data["console"] = self._console if self._console_type: data["console_type"] = self._console_type # None properties are not be send. Because it can mean the emulator doesn't support it for key in list(data.keys()): if data[key] is None or data[key] is {} or key in self.CONTROLLER_ONLY_PROPERTIES: del data[key] return data
[ "def", "_node_data", "(", "self", ",", "properties", "=", "None", ")", ":", "if", "properties", ":", "data", "=", "copy", ".", "copy", "(", "properties", ")", "else", ":", "data", "=", "copy", ".", "copy", "(", "self", ".", "_properties", ")", "# We replace the startup script name by the content of the file", "mapping", "=", "{", "\"base_script_file\"", ":", "\"startup_script\"", ",", "\"startup_config\"", ":", "\"startup_config_content\"", ",", "\"private_config\"", ":", "\"private_config_content\"", ",", "}", "for", "k", ",", "v", "in", "mapping", ".", "items", "(", ")", ":", "if", "k", "in", "list", "(", "self", ".", "_properties", ".", "keys", "(", ")", ")", ":", "data", "[", "v", "]", "=", "self", ".", "_base_config_file_content", "(", "self", ".", "_properties", "[", "k", "]", ")", "del", "data", "[", "k", "]", "del", "self", ".", "_properties", "[", "k", "]", "# We send the file only one time", "data", "[", "\"name\"", "]", "=", "self", ".", "_name", "if", "self", ".", "_console", ":", "# console is optional for builtin nodes", "data", "[", "\"console\"", "]", "=", "self", ".", "_console", "if", "self", ".", "_console_type", ":", "data", "[", "\"console_type\"", "]", "=", "self", ".", "_console_type", "# None properties are not be send. Because it can mean the emulator doesn't support it", "for", "key", "in", "list", "(", "data", ".", "keys", "(", ")", ")", ":", "if", "data", "[", "key", "]", "is", "None", "or", "data", "[", "key", "]", "is", "{", "}", "or", "key", "in", "self", ".", "CONTROLLER_ONLY_PROPERTIES", ":", "del", "data", "[", "key", "]", "return", "data" ]
Prepare node data to send to the remote controller :param properties: If properties is None use actual property otherwise use the parameter
[ "Prepare", "node", "data", "to", "send", "to", "the", "remote", "controller" ]
python
train
avelino/bottle-auth
bottle_auth/core/httputil.py
https://github.com/avelino/bottle-auth/blob/db07e526864aeac05ee68444b47e5db29540ce18/bottle_auth/core/httputil.py#L179-L189
def url_concat(url, args): """Concatenate url and argument dictionary regardless of whether url has existing query parameters. >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' """ if not args: return url if url[-1] not in ('?', '&'): url += '&' if ('?' in url) else '?' return url + urllib.urlencode(args)
[ "def", "url_concat", "(", "url", ",", "args", ")", ":", "if", "not", "args", ":", "return", "url", "if", "url", "[", "-", "1", "]", "not", "in", "(", "'?'", ",", "'&'", ")", ":", "url", "+=", "'&'", "if", "(", "'?'", "in", "url", ")", "else", "'?'", "return", "url", "+", "urllib", ".", "urlencode", "(", "args", ")" ]
Concatenate url and argument dictionary regardless of whether url has existing query parameters. >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d'
[ "Concatenate", "url", "and", "argument", "dictionary", "regardless", "of", "whether", "url", "has", "existing", "query", "parameters", "." ]
python
test
alex-sherman/unsync
examples/mixing_methods.py
https://github.com/alex-sherman/unsync/blob/a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe/examples/mixing_methods.py#L16-L22
async def result_continuation(task): """A preliminary result processor we'll chain on to the original task This will get executed wherever the source task was executed, in this case one of the threads in the ThreadPoolExecutor""" await asyncio.sleep(0.1) num, res = task.result() return num, res * 2
[ "async", "def", "result_continuation", "(", "task", ")", ":", "await", "asyncio", ".", "sleep", "(", "0.1", ")", "num", ",", "res", "=", "task", ".", "result", "(", ")", "return", "num", ",", "res", "*", "2" ]
A preliminary result processor we'll chain on to the original task This will get executed wherever the source task was executed, in this case one of the threads in the ThreadPoolExecutor
[ "A", "preliminary", "result", "processor", "we", "ll", "chain", "on", "to", "the", "original", "task", "This", "will", "get", "executed", "wherever", "the", "source", "task", "was", "executed", "in", "this", "case", "one", "of", "the", "threads", "in", "the", "ThreadPoolExecutor" ]
python
train
rueckstiess/mtools
mtools/util/logevent.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L701-L719
def _extract_level(self): """Extract level and component if available (lazy).""" if self._level is None: split_tokens = self.split_tokens if not split_tokens: self._level = False self._component = False return x = (self.log_levels.index(split_tokens[1]) if split_tokens[1] in self.log_levels else None) if x is not None: self._level = split_tokens[1] self._component = split_tokens[2] else: self._level = False self._component = False
[ "def", "_extract_level", "(", "self", ")", ":", "if", "self", ".", "_level", "is", "None", ":", "split_tokens", "=", "self", ".", "split_tokens", "if", "not", "split_tokens", ":", "self", ".", "_level", "=", "False", "self", ".", "_component", "=", "False", "return", "x", "=", "(", "self", ".", "log_levels", ".", "index", "(", "split_tokens", "[", "1", "]", ")", "if", "split_tokens", "[", "1", "]", "in", "self", ".", "log_levels", "else", "None", ")", "if", "x", "is", "not", "None", ":", "self", ".", "_level", "=", "split_tokens", "[", "1", "]", "self", ".", "_component", "=", "split_tokens", "[", "2", "]", "else", ":", "self", ".", "_level", "=", "False", "self", ".", "_component", "=", "False" ]
Extract level and component if available (lazy).
[ "Extract", "level", "and", "component", "if", "available", "(", "lazy", ")", "." ]
python
train
boakley/robotframework-hub
rfhub/blueprints/doc/__init__.py
https://github.com/boakley/robotframework-hub/blob/f3dc7562fe6218a7b8d7aac7b9ef234e1a573f7c/rfhub/blueprints/doc/__init__.py#L129-L136
def get_collections(kwdb, libtype="*"): """Get list of collections from kwdb, then add urls necessary for hyperlinks""" collections = kwdb.get_collections(libtype=libtype) for result in collections: url = flask.url_for(".doc_for_library", collection_id=result["collection_id"]) result["url"] = url return collections
[ "def", "get_collections", "(", "kwdb", ",", "libtype", "=", "\"*\"", ")", ":", "collections", "=", "kwdb", ".", "get_collections", "(", "libtype", "=", "libtype", ")", "for", "result", "in", "collections", ":", "url", "=", "flask", ".", "url_for", "(", "\".doc_for_library\"", ",", "collection_id", "=", "result", "[", "\"collection_id\"", "]", ")", "result", "[", "\"url\"", "]", "=", "url", "return", "collections" ]
Get list of collections from kwdb, then add urls necessary for hyperlinks
[ "Get", "list", "of", "collections", "from", "kwdb", "then", "add", "urls", "necessary", "for", "hyperlinks" ]
python
train
KE-works/pykechain
pykechain/models/service.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L232-L236
def service(self): """Retrieve the `Service` object to which this execution is associated.""" if not self._service: self._service = self._client.service(id=self.service_id) return self._service
[ "def", "service", "(", "self", ")", ":", "if", "not", "self", ".", "_service", ":", "self", ".", "_service", "=", "self", ".", "_client", ".", "service", "(", "id", "=", "self", ".", "service_id", ")", "return", "self", ".", "_service" ]
Retrieve the `Service` object to which this execution is associated.
[ "Retrieve", "the", "Service", "object", "to", "which", "this", "execution", "is", "associated", "." ]
python
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L371-L391
def get_row_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) gctx_file.close() return row_meta
[ "def", "get_row_metadata", "(", "gctx_file_path", ",", "convert_neg_666", "=", "True", ")", ":", "full_path", "=", "os", ".", "path", ".", "expanduser", "(", "gctx_file_path", ")", "# open file", "gctx_file", "=", "h5py", ".", "File", "(", "full_path", ",", "\"r\"", ")", "row_dset", "=", "gctx_file", "[", "row_meta_group_node", "]", "row_meta", "=", "parse_metadata_df", "(", "\"row\"", ",", "row_dset", ",", "convert_neg_666", ")", "gctx_file", ".", "close", "(", ")", "return", "row_meta" ]
Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values.
[ "Opens", ".", "gctx", "file", "and", "returns", "only", "row", "metadata" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/diet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/diet.py#L253-L257
def _dequantize(q, params): """Dequantize q according to params.""" if not params.quantize: return q return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale
[ "def", "_dequantize", "(", "q", ",", "params", ")", ":", "if", "not", "params", ".", "quantize", ":", "return", "q", "return", "tf", ".", "to_float", "(", "tf", ".", "bitcast", "(", "q", ",", "tf", ".", "int16", ")", ")", "*", "params", ".", "quantization_scale" ]
Dequantize q according to params.
[ "Dequantize", "q", "according", "to", "params", "." ]
python
train
njsmith/colorspacious
colorspacious/conversion.py
https://github.com/njsmith/colorspacious/blob/59e0226003fb1b894597c5081e8ca5a3aa4fcefd/colorspacious/conversion.py#L222-L232
def cspace_convert(arr, start, end): """Converts the colors in ``arr`` from colorspace ``start`` to colorspace ``end``. :param arr: An array-like of colors. :param start, end: Any supported colorspace specifiers. See :ref:`supported-colorspaces` for details. """ converter = cspace_converter(start, end) return converter(arr)
[ "def", "cspace_convert", "(", "arr", ",", "start", ",", "end", ")", ":", "converter", "=", "cspace_converter", "(", "start", ",", "end", ")", "return", "converter", "(", "arr", ")" ]
Converts the colors in ``arr`` from colorspace ``start`` to colorspace ``end``. :param arr: An array-like of colors. :param start, end: Any supported colorspace specifiers. See :ref:`supported-colorspaces` for details.
[ "Converts", "the", "colors", "in", "arr", "from", "colorspace", "start", "to", "colorspace", "end", "." ]
python
train
hMatoba/Piexif
piexif/_insert.py
https://github.com/hMatoba/Piexif/blob/afd0d232cf05cf530423f4b2a82ab291f150601a/piexif/_insert.py#L9-L60
def insert(exif, image, new_file=None): """ py:function:: piexif.insert(exif_bytes, filename) Insert exif into JPEG. :param bytes exif_bytes: Exif as bytes :param str filename: JPEG """ if exif[0:6] != b"\x45\x78\x69\x66\x00\x00": raise ValueError("Given data is not exif data") output_file = False # Prevents "UnicodeWarning: Unicode equal comparison failed" warnings on Python 2 maybe_image = sys.version_info >= (3,0,0) or isinstance(image, str) if maybe_image and image[0:2] == b"\xff\xd8": image_data = image file_type = "jpeg" elif maybe_image and image[0:4] == b"RIFF" and image[8:12] == b"WEBP": image_data = image file_type = "webp" else: with open(image, 'rb') as f: image_data = f.read() if image_data[0:2] == b"\xff\xd8": file_type = "jpeg" elif image_data[0:4] == b"RIFF" and image_data[8:12] == b"WEBP": file_type = "webp" else: raise InvalidImageDataError output_file = True if file_type == "jpeg": exif = b"\xff\xe1" + struct.pack(">H", len(exif) + 2) + exif segments = split_into_segments(image_data) new_data = merge_segments(segments, exif) elif file_type == "webp": exif = exif[6:] new_data = _webp.insert(image_data, exif) if isinstance(new_file, io.BytesIO): new_file.write(new_data) new_file.seek(0) elif new_file: with open(new_file, "wb+") as f: f.write(new_data) elif output_file: with open(image, "wb+") as f: f.write(new_data) else: raise ValueError("Give a 3rd argument to 'insert' to output file")
[ "def", "insert", "(", "exif", ",", "image", ",", "new_file", "=", "None", ")", ":", "if", "exif", "[", "0", ":", "6", "]", "!=", "b\"\\x45\\x78\\x69\\x66\\x00\\x00\"", ":", "raise", "ValueError", "(", "\"Given data is not exif data\"", ")", "output_file", "=", "False", "# Prevents \"UnicodeWarning: Unicode equal comparison failed\" warnings on Python 2", "maybe_image", "=", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ",", "0", ")", "or", "isinstance", "(", "image", ",", "str", ")", "if", "maybe_image", "and", "image", "[", "0", ":", "2", "]", "==", "b\"\\xff\\xd8\"", ":", "image_data", "=", "image", "file_type", "=", "\"jpeg\"", "elif", "maybe_image", "and", "image", "[", "0", ":", "4", "]", "==", "b\"RIFF\"", "and", "image", "[", "8", ":", "12", "]", "==", "b\"WEBP\"", ":", "image_data", "=", "image", "file_type", "=", "\"webp\"", "else", ":", "with", "open", "(", "image", ",", "'rb'", ")", "as", "f", ":", "image_data", "=", "f", ".", "read", "(", ")", "if", "image_data", "[", "0", ":", "2", "]", "==", "b\"\\xff\\xd8\"", ":", "file_type", "=", "\"jpeg\"", "elif", "image_data", "[", "0", ":", "4", "]", "==", "b\"RIFF\"", "and", "image_data", "[", "8", ":", "12", "]", "==", "b\"WEBP\"", ":", "file_type", "=", "\"webp\"", "else", ":", "raise", "InvalidImageDataError", "output_file", "=", "True", "if", "file_type", "==", "\"jpeg\"", ":", "exif", "=", "b\"\\xff\\xe1\"", "+", "struct", ".", "pack", "(", "\">H\"", ",", "len", "(", "exif", ")", "+", "2", ")", "+", "exif", "segments", "=", "split_into_segments", "(", "image_data", ")", "new_data", "=", "merge_segments", "(", "segments", ",", "exif", ")", "elif", "file_type", "==", "\"webp\"", ":", "exif", "=", "exif", "[", "6", ":", "]", "new_data", "=", "_webp", ".", "insert", "(", "image_data", ",", "exif", ")", "if", "isinstance", "(", "new_file", ",", "io", ".", "BytesIO", ")", ":", "new_file", ".", "write", "(", "new_data", ")", "new_file", ".", "seek", "(", "0", ")", "elif", "new_file", ":", "with", "open", "(", "new_file", ",", "\"wb+\"", ")", "as", "f", ":", "f", ".", "write", "(", "new_data", ")", "elif", "output_file", ":", "with", "open", "(", "image", ",", "\"wb+\"", ")", "as", "f", ":", "f", ".", "write", "(", "new_data", ")", "else", ":", "raise", "ValueError", "(", "\"Give a 3rd argument to 'insert' to output file\"", ")" ]
py:function:: piexif.insert(exif_bytes, filename) Insert exif into JPEG. :param bytes exif_bytes: Exif as bytes :param str filename: JPEG
[ "py", ":", "function", "::", "piexif", ".", "insert", "(", "exif_bytes", "filename", ")" ]
python
train
Chilipp/psyplot
psyplot/plotter.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L2263-L2302
def unshare_me(self, keys=None, auto_update=False, draw=None, update_other=True): """ Close the sharing connection of this plotter with others This method undoes the sharing connections made by the :meth:`share` method and release this plotter again. Parameters ---------- keys: string or iterable of strings The formatoptions to unshare, or group names of formatoptions to unshare all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are unshared. %(InteractiveBase.start_update.parameters.draw)s %(InteractiveBase.update.parameters.auto_update)s See Also -------- share, unshare""" auto_update = auto_update or not self.no_auto_update keys = self._set_sharing_keys(keys) to_update = [] for key in keys: fmto = getattr(self, key) try: other_fmto = self._shared.pop(key) except KeyError: pass else: other_fmto.shared.remove(fmto) if update_other: other_fmto.plotter._register_update( force=[other_fmto.key]) to_update.append(other_fmto.plotter) self.update(force=keys, draw=draw, auto_update=auto_update) if update_other and auto_update: for plotter in to_update: plotter.start_update(draw=draw)
[ "def", "unshare_me", "(", "self", ",", "keys", "=", "None", ",", "auto_update", "=", "False", ",", "draw", "=", "None", ",", "update_other", "=", "True", ")", ":", "auto_update", "=", "auto_update", "or", "not", "self", ".", "no_auto_update", "keys", "=", "self", ".", "_set_sharing_keys", "(", "keys", ")", "to_update", "=", "[", "]", "for", "key", "in", "keys", ":", "fmto", "=", "getattr", "(", "self", ",", "key", ")", "try", ":", "other_fmto", "=", "self", ".", "_shared", ".", "pop", "(", "key", ")", "except", "KeyError", ":", "pass", "else", ":", "other_fmto", ".", "shared", ".", "remove", "(", "fmto", ")", "if", "update_other", ":", "other_fmto", ".", "plotter", ".", "_register_update", "(", "force", "=", "[", "other_fmto", ".", "key", "]", ")", "to_update", ".", "append", "(", "other_fmto", ".", "plotter", ")", "self", ".", "update", "(", "force", "=", "keys", ",", "draw", "=", "draw", ",", "auto_update", "=", "auto_update", ")", "if", "update_other", "and", "auto_update", ":", "for", "plotter", "in", "to_update", ":", "plotter", ".", "start_update", "(", "draw", "=", "draw", ")" ]
Close the sharing connection of this plotter with others This method undoes the sharing connections made by the :meth:`share` method and release this plotter again. Parameters ---------- keys: string or iterable of strings The formatoptions to unshare, or group names of formatoptions to unshare all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are unshared. %(InteractiveBase.start_update.parameters.draw)s %(InteractiveBase.update.parameters.auto_update)s See Also -------- share, unshare
[ "Close", "the", "sharing", "connection", "of", "this", "plotter", "with", "others" ]
python
train
quantmind/pulsar
pulsar/async/protocols.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/protocols.py#L353-L360
async def close(self): """Stop serving the :attr:`.Server.sockets` and close all concurrent connections. """ if self._server: self._server.close() self._server = None self.event('stop').fire()
[ "async", "def", "close", "(", "self", ")", ":", "if", "self", ".", "_server", ":", "self", ".", "_server", ".", "close", "(", ")", "self", ".", "_server", "=", "None", "self", ".", "event", "(", "'stop'", ")", ".", "fire", "(", ")" ]
Stop serving the :attr:`.Server.sockets` and close all concurrent connections.
[ "Stop", "serving", "the", ":", "attr", ":", ".", "Server", ".", "sockets", "and", "close", "all", "concurrent", "connections", "." ]
python
train
Karaage-Cluster/karaage
karaage/people/emails.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/people/emails.py#L86-L100
def send_confirm_password_email(person): """Sends an email to user allowing them to confirm their password.""" url = '%s/profile/login/%s/' % ( settings.REGISTRATION_BASE_URL, person.username) context = CONTEXT.copy() context.update({ 'url': url, 'receiver': person, }) to_email = person.email subject, body = render_email('confirm_password', context) send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
[ "def", "send_confirm_password_email", "(", "person", ")", ":", "url", "=", "'%s/profile/login/%s/'", "%", "(", "settings", ".", "REGISTRATION_BASE_URL", ",", "person", ".", "username", ")", "context", "=", "CONTEXT", ".", "copy", "(", ")", "context", ".", "update", "(", "{", "'url'", ":", "url", ",", "'receiver'", ":", "person", ",", "}", ")", "to_email", "=", "person", ".", "email", "subject", ",", "body", "=", "render_email", "(", "'confirm_password'", ",", "context", ")", "send_mail", "(", "subject", ",", "body", ",", "settings", ".", "ACCOUNTS_EMAIL", ",", "[", "to_email", "]", ")" ]
Sends an email to user allowing them to confirm their password.
[ "Sends", "an", "email", "to", "user", "allowing", "them", "to", "confirm", "their", "password", "." ]
python
train
klmitch/appathy
appathy/response.py
https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/response.py#L135-L155
def _serialize(self): """ Serialize the ResponseObject. Returns a webob `Response` object. """ # Do something appropriate if the response object is unbound if self._defcode is None: raise exceptions.UnboundResponse() # Build the response resp = self.response_class(request=self.req, status=self.code, headerlist=self._headers.items()) # Do we have a body? if self.result: resp.content_type = self.content_type resp.body = self.serializer(self.result) # Return the response return resp
[ "def", "_serialize", "(", "self", ")", ":", "# Do something appropriate if the response object is unbound", "if", "self", ".", "_defcode", "is", "None", ":", "raise", "exceptions", ".", "UnboundResponse", "(", ")", "# Build the response", "resp", "=", "self", ".", "response_class", "(", "request", "=", "self", ".", "req", ",", "status", "=", "self", ".", "code", ",", "headerlist", "=", "self", ".", "_headers", ".", "items", "(", ")", ")", "# Do we have a body?", "if", "self", ".", "result", ":", "resp", ".", "content_type", "=", "self", ".", "content_type", "resp", ".", "body", "=", "self", ".", "serializer", "(", "self", ".", "result", ")", "# Return the response", "return", "resp" ]
Serialize the ResponseObject. Returns a webob `Response` object.
[ "Serialize", "the", "ResponseObject", ".", "Returns", "a", "webob", "Response", "object", "." ]
python
train
edibledinos/pwnypack
pwnypack/shellcode/base.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/shellcode/base.py#L102-L115
def alloc_buffer(self, length): """ Allocate a buffer (a range of uninitialized memory). Arguments: length(int): The length of the buffer to allocate. Returns: ~pwnypack.types.Buffer: The object used to address this buffer. """ buf = Buffer(sum(len(v) for v in six.iterkeys(self.data)) + sum(v.length for v in self.buffers), length) self.buffers.append(buf) return buf
[ "def", "alloc_buffer", "(", "self", ",", "length", ")", ":", "buf", "=", "Buffer", "(", "sum", "(", "len", "(", "v", ")", "for", "v", "in", "six", ".", "iterkeys", "(", "self", ".", "data", ")", ")", "+", "sum", "(", "v", ".", "length", "for", "v", "in", "self", ".", "buffers", ")", ",", "length", ")", "self", ".", "buffers", ".", "append", "(", "buf", ")", "return", "buf" ]
Allocate a buffer (a range of uninitialized memory). Arguments: length(int): The length of the buffer to allocate. Returns: ~pwnypack.types.Buffer: The object used to address this buffer.
[ "Allocate", "a", "buffer", "(", "a", "range", "of", "uninitialized", "memory", ")", "." ]
python
train
markrwilliams/txdarn
txdarn/protocol.py
https://github.com/markrwilliams/txdarn/blob/154d25a1ac78c4e2877c0656e3b9cea4332eda57/txdarn/protocol.py#L129-L133
def _connectionEstablished(self, transport): '''Store a reference to our transport and write an open frame.''' self.transport = transport self.transport.writeOpen() self.heartbeater.schedule()
[ "def", "_connectionEstablished", "(", "self", ",", "transport", ")", ":", "self", ".", "transport", "=", "transport", "self", ".", "transport", ".", "writeOpen", "(", ")", "self", ".", "heartbeater", ".", "schedule", "(", ")" ]
Store a reference to our transport and write an open frame.
[ "Store", "a", "reference", "to", "our", "transport", "and", "write", "an", "open", "frame", "." ]
python
train
CiscoTestAutomation/yang
connector/setup.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/connector/setup.py#L110-L116
def find_version(*paths): '''reads a file and returns the defined __version__ value''' version_match = re.search(r"^__version__ ?= ?['\"]([^'\"]*)['\"]", read(*paths), re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.")
[ "def", "find_version", "(", "*", "paths", ")", ":", "version_match", "=", "re", ".", "search", "(", "r\"^__version__ ?= ?['\\\"]([^'\\\"]*)['\\\"]\"", ",", "read", "(", "*", "paths", ")", ",", "re", ".", "M", ")", "if", "version_match", ":", "return", "version_match", ".", "group", "(", "1", ")", "raise", "RuntimeError", "(", "\"Unable to find version string.\"", ")" ]
reads a file and returns the defined __version__ value
[ "reads", "a", "file", "and", "returns", "the", "defined", "__version__", "value" ]
python
train
soimort/you-get
src/you_get/__main__.py
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/__main__.py#L24-L85
def main_dev(**kwargs): """Main entry point. you-get-dev """ # Get (branch, commit) if running from a git repo. head = git.get_head(kwargs['repo_path']) # Get options and arguments. try: opts, args = getopt.getopt(sys.argv[1:], _short_options, _options) except getopt.GetoptError as e: log.wtf(""" [Fatal] {}. Try '{} --help' for more options.""".format(e, script_name)) if not opts and not args: # Display help. print(_help) # Enter GUI mode. #from .gui import gui_main #gui_main() else: conf = {} for opt, arg in opts: if opt in ('-h', '--help'): # Display help. print(_help) elif opt in ('-V', '--version'): # Display version. log.println("you-get:", log.BOLD) log.println(" version: {}".format(__version__)) if head is not None: log.println(" branch: {}\n commit: {}".format(*head)) else: log.println(" branch: {}\n commit: {}".format("(stable)", "(tag v{})".format(__version__))) log.println(" platform: {}".format(platform.platform())) log.println(" python: {}".format(sys.version.split('\n')[0])) elif opt in ('-g', '--gui'): # Run using GUI. conf['gui'] = True elif opt in ('-f', '--force'): # Force download. conf['force'] = True elif opt in ('-l', '--playlist', '--playlists'): # Download playlist whenever possible. conf['playlist'] = True if args: if 'gui' in conf and conf['gui']: # Enter GUI mode. from .gui import gui_main gui_main(*args, **conf) else: # Enter console mode. from .console import console_main console_main(*args, **conf)
[ "def", "main_dev", "(", "*", "*", "kwargs", ")", ":", "# Get (branch, commit) if running from a git repo.", "head", "=", "git", ".", "get_head", "(", "kwargs", "[", "'repo_path'", "]", ")", "# Get options and arguments.", "try", ":", "opts", ",", "args", "=", "getopt", ".", "getopt", "(", "sys", ".", "argv", "[", "1", ":", "]", ",", "_short_options", ",", "_options", ")", "except", "getopt", ".", "GetoptError", "as", "e", ":", "log", ".", "wtf", "(", "\"\"\"\n [Fatal] {}.\n Try '{} --help' for more options.\"\"\"", ".", "format", "(", "e", ",", "script_name", ")", ")", "if", "not", "opts", "and", "not", "args", ":", "# Display help.", "print", "(", "_help", ")", "# Enter GUI mode.", "#from .gui import gui_main", "#gui_main()", "else", ":", "conf", "=", "{", "}", "for", "opt", ",", "arg", "in", "opts", ":", "if", "opt", "in", "(", "'-h'", ",", "'--help'", ")", ":", "# Display help.", "print", "(", "_help", ")", "elif", "opt", "in", "(", "'-V'", ",", "'--version'", ")", ":", "# Display version.", "log", ".", "println", "(", "\"you-get:\"", ",", "log", ".", "BOLD", ")", "log", ".", "println", "(", "\" version: {}\"", ".", "format", "(", "__version__", ")", ")", "if", "head", "is", "not", "None", ":", "log", ".", "println", "(", "\" branch: {}\\n commit: {}\"", ".", "format", "(", "*", "head", ")", ")", "else", ":", "log", ".", "println", "(", "\" branch: {}\\n commit: {}\"", ".", "format", "(", "\"(stable)\"", ",", "\"(tag v{})\"", ".", "format", "(", "__version__", ")", ")", ")", "log", ".", "println", "(", "\" platform: {}\"", ".", "format", "(", "platform", ".", "platform", "(", ")", ")", ")", "log", ".", "println", "(", "\" python: {}\"", ".", "format", "(", "sys", ".", "version", ".", "split", "(", "'\\n'", ")", "[", "0", "]", ")", ")", "elif", "opt", "in", "(", "'-g'", ",", "'--gui'", ")", ":", "# Run using GUI.", "conf", "[", "'gui'", "]", "=", "True", "elif", "opt", "in", "(", "'-f'", ",", "'--force'", ")", ":", "# Force download.", "conf", "[", "'force'", "]", "=", "True", "elif", "opt", "in", "(", "'-l'", ",", "'--playlist'", ",", "'--playlists'", ")", ":", "# Download playlist whenever possible.", "conf", "[", "'playlist'", "]", "=", "True", "if", "args", ":", "if", "'gui'", "in", "conf", "and", "conf", "[", "'gui'", "]", ":", "# Enter GUI mode.", "from", ".", "gui", "import", "gui_main", "gui_main", "(", "*", "args", ",", "*", "*", "conf", ")", "else", ":", "# Enter console mode.", "from", ".", "console", "import", "console_main", "console_main", "(", "*", "args", ",", "*", "*", "conf", ")" ]
Main entry point. you-get-dev
[ "Main", "entry", "point", ".", "you", "-", "get", "-", "dev" ]
python
test
apache/incubator-mxnet
example/gluon/dc_gan/dcgan.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/dc_gan/dcgan.py#L165-L191
def get_netG(): """Get net G""" # build the generator netG = nn.Sequential() with netG.name_scope(): # input is Z, going into a convolution netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False)) netG.add(nn.BatchNorm()) netG.add(nn.Activation('relu')) # state size. (ngf*8) x 4 x 4 netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False)) netG.add(nn.BatchNorm()) netG.add(nn.Activation('relu')) # state size. (ngf*4) x 8 x 8 netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False)) netG.add(nn.BatchNorm()) netG.add(nn.Activation('relu')) # state size. (ngf*2) x 16 x 16 netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False)) netG.add(nn.BatchNorm()) netG.add(nn.Activation('relu')) # state size. (ngf) x 32 x 32 netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False)) netG.add(nn.Activation('tanh')) # state size. (nc) x 64 x 64 return netG
[ "def", "get_netG", "(", ")", ":", "# build the generator", "netG", "=", "nn", ".", "Sequential", "(", ")", "with", "netG", ".", "name_scope", "(", ")", ":", "# input is Z, going into a convolution", "netG", ".", "add", "(", "nn", ".", "Conv2DTranspose", "(", "ngf", "*", "8", ",", "4", ",", "1", ",", "0", ",", "use_bias", "=", "False", ")", ")", "netG", ".", "add", "(", "nn", ".", "BatchNorm", "(", ")", ")", "netG", ".", "add", "(", "nn", ".", "Activation", "(", "'relu'", ")", ")", "# state size. (ngf*8) x 4 x 4", "netG", ".", "add", "(", "nn", ".", "Conv2DTranspose", "(", "ngf", "*", "4", ",", "4", ",", "2", ",", "1", ",", "use_bias", "=", "False", ")", ")", "netG", ".", "add", "(", "nn", ".", "BatchNorm", "(", ")", ")", "netG", ".", "add", "(", "nn", ".", "Activation", "(", "'relu'", ")", ")", "# state size. (ngf*4) x 8 x 8", "netG", ".", "add", "(", "nn", ".", "Conv2DTranspose", "(", "ngf", "*", "2", ",", "4", ",", "2", ",", "1", ",", "use_bias", "=", "False", ")", ")", "netG", ".", "add", "(", "nn", ".", "BatchNorm", "(", ")", ")", "netG", ".", "add", "(", "nn", ".", "Activation", "(", "'relu'", ")", ")", "# state size. (ngf*2) x 16 x 16", "netG", ".", "add", "(", "nn", ".", "Conv2DTranspose", "(", "ngf", ",", "4", ",", "2", ",", "1", ",", "use_bias", "=", "False", ")", ")", "netG", ".", "add", "(", "nn", ".", "BatchNorm", "(", ")", ")", "netG", ".", "add", "(", "nn", ".", "Activation", "(", "'relu'", ")", ")", "# state size. (ngf) x 32 x 32", "netG", ".", "add", "(", "nn", ".", "Conv2DTranspose", "(", "nc", ",", "4", ",", "2", ",", "1", ",", "use_bias", "=", "False", ")", ")", "netG", ".", "add", "(", "nn", ".", "Activation", "(", "'tanh'", ")", ")", "# state size. (nc) x 64 x 64", "return", "netG" ]
Get net G
[ "Get", "net", "G" ]
python
train
senaite/senaite.core
bika/lims/api/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L756-L774
def get_workflow_status_of(brain_or_object, state_var="review_state"): """Get the current workflow status of the given brain or context. :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param state_var: The name of the state variable :type state_var: string :returns: Status :rtype: str """ # Try to get the state from the catalog brain first if is_brain(brain_or_object): if state_var in brain_or_object.schema(): return brain_or_object[state_var] # Retrieve the sate from the object workflow = get_tool("portal_workflow") obj = get_object(brain_or_object) return workflow.getInfoFor(ob=obj, name=state_var, default='')
[ "def", "get_workflow_status_of", "(", "brain_or_object", ",", "state_var", "=", "\"review_state\"", ")", ":", "# Try to get the state from the catalog brain first", "if", "is_brain", "(", "brain_or_object", ")", ":", "if", "state_var", "in", "brain_or_object", ".", "schema", "(", ")", ":", "return", "brain_or_object", "[", "state_var", "]", "# Retrieve the sate from the object", "workflow", "=", "get_tool", "(", "\"portal_workflow\"", ")", "obj", "=", "get_object", "(", "brain_or_object", ")", "return", "workflow", ".", "getInfoFor", "(", "ob", "=", "obj", ",", "name", "=", "state_var", ",", "default", "=", "''", ")" ]
Get the current workflow status of the given brain or context. :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param state_var: The name of the state variable :type state_var: string :returns: Status :rtype: str
[ "Get", "the", "current", "workflow", "status", "of", "the", "given", "brain", "or", "context", "." ]
python
train
etscrivner/nose-perfdump
perfdump/plugin.py
https://github.com/etscrivner/nose-perfdump/blob/a203a68495d30346fab43fb903cb60cd29b17d49/perfdump/plugin.py#L130-L144
def report(self, stream): """Displays the slowest tests""" self.db.commit() stream.writeln() self.draw_header(stream, "10 SLOWEST SETUPS") self.display_slowest_setups(stream) stream.writeln() self.draw_header(stream, "10 SLOWEST TESTS") self.display_slowest_tests(stream) stream.writeln() if self.html_output_file: HtmlReport.write(self.html_output_file)
[ "def", "report", "(", "self", ",", "stream", ")", ":", "self", ".", "db", ".", "commit", "(", ")", "stream", ".", "writeln", "(", ")", "self", ".", "draw_header", "(", "stream", ",", "\"10 SLOWEST SETUPS\"", ")", "self", ".", "display_slowest_setups", "(", "stream", ")", "stream", ".", "writeln", "(", ")", "self", ".", "draw_header", "(", "stream", ",", "\"10 SLOWEST TESTS\"", ")", "self", ".", "display_slowest_tests", "(", "stream", ")", "stream", ".", "writeln", "(", ")", "if", "self", ".", "html_output_file", ":", "HtmlReport", ".", "write", "(", "self", ".", "html_output_file", ")" ]
Displays the slowest tests
[ "Displays", "the", "slowest", "tests" ]
python
train
berkeley-cocosci/Wallace
wallace/models.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L655-L703
def vectors(self, direction="all", failed=False): """Get vectors that connect at this node. Direction can be "incoming", "outgoing" or "all" (default). Failed can be True, False or all """ # check direction if direction not in ["all", "incoming", "outgoing"]: raise ValueError( "{} is not a valid vector direction. " "Must be all, incoming or outgoing.".format(direction)) if failed not in ["all", False, True]: raise ValueError("{} is not a valid vector failed".format(failed)) # get the vectors if failed == "all": if direction == "all": return Vector.query\ .filter(or_(Vector.destination_id == self.id, Vector.origin_id == self.id))\ .all() if direction == "incoming": return Vector.query\ .filter_by(destination_id=self.id)\ .all() if direction == "outgoing": return Vector.query\ .filter_by(origin_id=self.id)\ .all() else: if direction == "all": return Vector.query\ .filter(and_(Vector.failed == failed, or_(Vector.destination_id == self.id, Vector.origin_id == self.id)))\ .all() if direction == "incoming": return Vector.query\ .filter_by(destination_id=self.id, failed=failed)\ .all() if direction == "outgoing": return Vector.query\ .filter_by(origin_id=self.id, failed=failed)\ .all()
[ "def", "vectors", "(", "self", ",", "direction", "=", "\"all\"", ",", "failed", "=", "False", ")", ":", "# check direction", "if", "direction", "not", "in", "[", "\"all\"", ",", "\"incoming\"", ",", "\"outgoing\"", "]", ":", "raise", "ValueError", "(", "\"{} is not a valid vector direction. \"", "\"Must be all, incoming or outgoing.\"", ".", "format", "(", "direction", ")", ")", "if", "failed", "not", "in", "[", "\"all\"", ",", "False", ",", "True", "]", ":", "raise", "ValueError", "(", "\"{} is not a valid vector failed\"", ".", "format", "(", "failed", ")", ")", "# get the vectors", "if", "failed", "==", "\"all\"", ":", "if", "direction", "==", "\"all\"", ":", "return", "Vector", ".", "query", ".", "filter", "(", "or_", "(", "Vector", ".", "destination_id", "==", "self", ".", "id", ",", "Vector", ".", "origin_id", "==", "self", ".", "id", ")", ")", ".", "all", "(", ")", "if", "direction", "==", "\"incoming\"", ":", "return", "Vector", ".", "query", ".", "filter_by", "(", "destination_id", "=", "self", ".", "id", ")", ".", "all", "(", ")", "if", "direction", "==", "\"outgoing\"", ":", "return", "Vector", ".", "query", ".", "filter_by", "(", "origin_id", "=", "self", ".", "id", ")", ".", "all", "(", ")", "else", ":", "if", "direction", "==", "\"all\"", ":", "return", "Vector", ".", "query", ".", "filter", "(", "and_", "(", "Vector", ".", "failed", "==", "failed", ",", "or_", "(", "Vector", ".", "destination_id", "==", "self", ".", "id", ",", "Vector", ".", "origin_id", "==", "self", ".", "id", ")", ")", ")", ".", "all", "(", ")", "if", "direction", "==", "\"incoming\"", ":", "return", "Vector", ".", "query", ".", "filter_by", "(", "destination_id", "=", "self", ".", "id", ",", "failed", "=", "failed", ")", ".", "all", "(", ")", "if", "direction", "==", "\"outgoing\"", ":", "return", "Vector", ".", "query", ".", "filter_by", "(", "origin_id", "=", "self", ".", "id", ",", "failed", "=", "failed", ")", ".", "all", "(", ")" ]
Get vectors that connect at this node. Direction can be "incoming", "outgoing" or "all" (default). Failed can be True, False or all
[ "Get", "vectors", "that", "connect", "at", "this", "node", "." ]
python
train
senaite/senaite.core
bika/lims/browser/analysisrequest/add2.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/add2.py#L486-L510
def get_service_categories(self, restricted=True): """Return all service categories in the right order :param restricted: Client settings restrict categories :type restricted: bool :returns: Category catalog results :rtype: brains """ bsc = api.get_tool("bika_setup_catalog") query = { "portal_type": "AnalysisCategory", "is_active": True, "sort_on": "sortable_title", } categories = bsc(query) client = self.get_client() if client and restricted: restricted_categories = client.getRestrictedCategories() restricted_category_ids = map( lambda c: c.getId(), restricted_categories) # keep correct order of categories if restricted_category_ids: categories = filter( lambda c: c.getId in restricted_category_ids, categories) return categories
[ "def", "get_service_categories", "(", "self", ",", "restricted", "=", "True", ")", ":", "bsc", "=", "api", ".", "get_tool", "(", "\"bika_setup_catalog\"", ")", "query", "=", "{", "\"portal_type\"", ":", "\"AnalysisCategory\"", ",", "\"is_active\"", ":", "True", ",", "\"sort_on\"", ":", "\"sortable_title\"", ",", "}", "categories", "=", "bsc", "(", "query", ")", "client", "=", "self", ".", "get_client", "(", ")", "if", "client", "and", "restricted", ":", "restricted_categories", "=", "client", ".", "getRestrictedCategories", "(", ")", "restricted_category_ids", "=", "map", "(", "lambda", "c", ":", "c", ".", "getId", "(", ")", ",", "restricted_categories", ")", "# keep correct order of categories", "if", "restricted_category_ids", ":", "categories", "=", "filter", "(", "lambda", "c", ":", "c", ".", "getId", "in", "restricted_category_ids", ",", "categories", ")", "return", "categories" ]
Return all service categories in the right order :param restricted: Client settings restrict categories :type restricted: bool :returns: Category catalog results :rtype: brains
[ "Return", "all", "service", "categories", "in", "the", "right", "order" ]
python
train
ContextLab/hypertools
hypertools/tools/cluster.py
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/cluster.py#L28-L100
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True): """ Performs clustering analysis and returns a list of cluster labels Parameters ---------- x : A Numpy array, Pandas Dataframe or list of arrays/dfs The data to be clustered. You can pass a single array/df or a list. If a list is passed, the arrays will be stacked and the clustering will be performed across all lists (i.e. not within each list). cluster : str or dict Model to use to discover clusters. Support algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. n_clusters : int Number of clusters to discover. Not required for HDBSCAN. format_data : bool Whether or not to first call the format_data function (default: True). ndims : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- cluster_labels : list An list of cluster labels """ if cluster == None: return x elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \ (isinstance(cluster, dict) and cluster['model']=='HDBSCAN'): if not _has_hdbscan: raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11') if ndims != None: warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.') if format_data: x = formatter(x, ppca=True) # if reduce is a string, find the corresponding model if isinstance(cluster, six.string_types): model = models[cluster] if cluster != 'HDBSCAN': model_params = { 'n_clusters' : n_clusters } else: model_params = {} # if its a dict, use custom params elif type(cluster) is dict: if isinstance(cluster['model'], six.string_types): model = models[cluster['model']] model_params = cluster['params'] # initialize model model = model(**model_params) # fit the model model.fit(np.vstack(x)) # return the labels return list(model.labels_)
[ "def", "cluster", "(", "x", ",", "cluster", "=", "'KMeans'", ",", "n_clusters", "=", "3", ",", "ndims", "=", "None", ",", "format_data", "=", "True", ")", ":", "if", "cluster", "==", "None", ":", "return", "x", "elif", "(", "isinstance", "(", "cluster", ",", "six", ".", "string_types", ")", "and", "cluster", "==", "'HDBSCAN'", ")", "or", "(", "isinstance", "(", "cluster", ",", "dict", ")", "and", "cluster", "[", "'model'", "]", "==", "'HDBSCAN'", ")", ":", "if", "not", "_has_hdbscan", ":", "raise", "ImportError", "(", "'HDBSCAN is not installed. Please install hdbscan>=0.8.11'", ")", "if", "ndims", "!=", "None", ":", "warnings", ".", "warn", "(", "'The ndims argument is now deprecated. Ignoring dimensionality reduction step.'", ")", "if", "format_data", ":", "x", "=", "formatter", "(", "x", ",", "ppca", "=", "True", ")", "# if reduce is a string, find the corresponding model", "if", "isinstance", "(", "cluster", ",", "six", ".", "string_types", ")", ":", "model", "=", "models", "[", "cluster", "]", "if", "cluster", "!=", "'HDBSCAN'", ":", "model_params", "=", "{", "'n_clusters'", ":", "n_clusters", "}", "else", ":", "model_params", "=", "{", "}", "# if its a dict, use custom params", "elif", "type", "(", "cluster", ")", "is", "dict", ":", "if", "isinstance", "(", "cluster", "[", "'model'", "]", ",", "six", ".", "string_types", ")", ":", "model", "=", "models", "[", "cluster", "[", "'model'", "]", "]", "model_params", "=", "cluster", "[", "'params'", "]", "# initialize model", "model", "=", "model", "(", "*", "*", "model_params", ")", "# fit the model", "model", ".", "fit", "(", "np", ".", "vstack", "(", "x", ")", ")", "# return the labels", "return", "list", "(", "model", ".", "labels_", ")" ]
Performs clustering analysis and returns a list of cluster labels Parameters ---------- x : A Numpy array, Pandas Dataframe or list of arrays/dfs The data to be clustered. You can pass a single array/df or a list. If a list is passed, the arrays will be stacked and the clustering will be performed across all lists (i.e. not within each list). cluster : str or dict Model to use to discover clusters. Support algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. n_clusters : int Number of clusters to discover. Not required for HDBSCAN. format_data : bool Whether or not to first call the format_data function (default: True). ndims : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- cluster_labels : list An list of cluster labels
[ "Performs", "clustering", "analysis", "and", "returns", "a", "list", "of", "cluster", "labels" ]
python
train
jedie/django-cms-tools
django_cms_tools/fixture_helper/page_utils.py
https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/page_utils.py#L33-L41
def get_public_cms_page_urls(*, language_code): """ :param language_code: e.g.: "en" or "de" :return: Tuple with all public urls in the given language """ pages = Page.objects.public() urls = [page.get_absolute_url(language=language_code) for page in pages] urls.sort() return tuple(urls)
[ "def", "get_public_cms_page_urls", "(", "*", ",", "language_code", ")", ":", "pages", "=", "Page", ".", "objects", ".", "public", "(", ")", "urls", "=", "[", "page", ".", "get_absolute_url", "(", "language", "=", "language_code", ")", "for", "page", "in", "pages", "]", "urls", ".", "sort", "(", ")", "return", "tuple", "(", "urls", ")" ]
:param language_code: e.g.: "en" or "de" :return: Tuple with all public urls in the given language
[ ":", "param", "language_code", ":", "e", ".", "g", ".", ":", "en", "or", "de", ":", "return", ":", "Tuple", "with", "all", "public", "urls", "in", "the", "given", "language" ]
python
train
LabKey/labkey-api-python
labkey/security.py
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/security.py#L165-L176
def remove_from_role(server_context, role, user_id=None, email=None, container_path=None): """ Remove user/group from security role :param server_context: A LabKey server context. See utils.create_server_context. :param role: (from get_roles) to remove user from :param user_id: to remove permissions from (must supply this or email or both) :param email: to remove permissions from (must supply this or user_id or both) :param container_path: additional project path context :return: """ return __make_security_role_api_request(server_context, 'removeAssignment.api', role, user_id=user_id, email=email, container_path=container_path)
[ "def", "remove_from_role", "(", "server_context", ",", "role", ",", "user_id", "=", "None", ",", "email", "=", "None", ",", "container_path", "=", "None", ")", ":", "return", "__make_security_role_api_request", "(", "server_context", ",", "'removeAssignment.api'", ",", "role", ",", "user_id", "=", "user_id", ",", "email", "=", "email", ",", "container_path", "=", "container_path", ")" ]
Remove user/group from security role :param server_context: A LabKey server context. See utils.create_server_context. :param role: (from get_roles) to remove user from :param user_id: to remove permissions from (must supply this or email or both) :param email: to remove permissions from (must supply this or user_id or both) :param container_path: additional project path context :return:
[ "Remove", "user", "/", "group", "from", "security", "role", ":", "param", "server_context", ":", "A", "LabKey", "server", "context", ".", "See", "utils", ".", "create_server_context", ".", ":", "param", "role", ":", "(", "from", "get_roles", ")", "to", "remove", "user", "from", ":", "param", "user_id", ":", "to", "remove", "permissions", "from", "(", "must", "supply", "this", "or", "email", "or", "both", ")", ":", "param", "email", ":", "to", "remove", "permissions", "from", "(", "must", "supply", "this", "or", "user_id", "or", "both", ")", ":", "param", "container_path", ":", "additional", "project", "path", "context", ":", "return", ":" ]
python
train
renweizhukov/txt2mobi3
txt2mobi3/txt2html3.py
https://github.com/renweizhukov/txt2mobi3/blob/db78e5b57595b7ca87570eda2a00f9509b80b4c6/txt2mobi3/txt2html3.py#L240-L250
def _start_end_of_index(self, book_idx): """ 根据book_idx计算开始和结束的chapter id :param book_idx: :type int: :return: :rtype: """ start = (book_idx - 1) * self._max_chapters end = min(book_idx * self._max_chapters, len(self._chapters)) return (start, end)
[ "def", "_start_end_of_index", "(", "self", ",", "book_idx", ")", ":", "start", "=", "(", "book_idx", "-", "1", ")", "*", "self", ".", "_max_chapters", "end", "=", "min", "(", "book_idx", "*", "self", ".", "_max_chapters", ",", "len", "(", "self", ".", "_chapters", ")", ")", "return", "(", "start", ",", "end", ")" ]
根据book_idx计算开始和结束的chapter id :param book_idx: :type int: :return: :rtype:
[ "根据book_idx计算开始和结束的chapter", "id", ":", "param", "book_idx", ":", ":", "type", "int", ":", ":", "return", ":", ":", "rtype", ":" ]
python
train
google/grr
grr/server/grr_response_server/databases/mem_users.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_users.py#L22-L37
def WriteGRRUser(self, username, password=None, ui_mode=None, canary_mode=None, user_type=None): """Writes user object for a user with a given name.""" u = self.users.setdefault(username, rdf_objects.GRRUser(username=username)) if password is not None: u.password = password if ui_mode is not None: u.ui_mode = ui_mode if canary_mode is not None: u.canary_mode = canary_mode if user_type is not None: u.user_type = user_type
[ "def", "WriteGRRUser", "(", "self", ",", "username", ",", "password", "=", "None", ",", "ui_mode", "=", "None", ",", "canary_mode", "=", "None", ",", "user_type", "=", "None", ")", ":", "u", "=", "self", ".", "users", ".", "setdefault", "(", "username", ",", "rdf_objects", ".", "GRRUser", "(", "username", "=", "username", ")", ")", "if", "password", "is", "not", "None", ":", "u", ".", "password", "=", "password", "if", "ui_mode", "is", "not", "None", ":", "u", ".", "ui_mode", "=", "ui_mode", "if", "canary_mode", "is", "not", "None", ":", "u", ".", "canary_mode", "=", "canary_mode", "if", "user_type", "is", "not", "None", ":", "u", ".", "user_type", "=", "user_type" ]
Writes user object for a user with a given name.
[ "Writes", "user", "object", "for", "a", "user", "with", "a", "given", "name", "." ]
python
train
pypa/pipenv
pipenv/vendor/click/parser.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/parser.py#L25-L73
def _unpack_args(args, nargs_spec): """Given an iterable of arguments and an iterable of nargs specifications, it returns a tuple with all the unpacked arguments at the first index and all remaining arguments as the second. The nargs specification is the number of arguments that should be consumed or `-1` to indicate that this position should eat up all the remainders. Missing items are filled with `None`. """ args = deque(args) nargs_spec = deque(nargs_spec) rv = [] spos = None def _fetch(c): try: if spos is None: return c.popleft() else: return c.pop() except IndexError: return None while nargs_spec: nargs = _fetch(nargs_spec) if nargs == 1: rv.append(_fetch(args)) elif nargs > 1: x = [_fetch(args) for _ in range(nargs)] # If we're reversed, we're pulling in the arguments in reverse, # so we need to turn them around. if spos is not None: x.reverse() rv.append(tuple(x)) elif nargs < 0: if spos is not None: raise TypeError('Cannot have two nargs < 0') spos = len(rv) rv.append(None) # spos is the position of the wildcard (star). If it's not `None`, # we fill it with the remainder. if spos is not None: rv[spos] = tuple(args) args = [] rv[spos + 1:] = reversed(rv[spos + 1:]) return tuple(rv), list(args)
[ "def", "_unpack_args", "(", "args", ",", "nargs_spec", ")", ":", "args", "=", "deque", "(", "args", ")", "nargs_spec", "=", "deque", "(", "nargs_spec", ")", "rv", "=", "[", "]", "spos", "=", "None", "def", "_fetch", "(", "c", ")", ":", "try", ":", "if", "spos", "is", "None", ":", "return", "c", ".", "popleft", "(", ")", "else", ":", "return", "c", ".", "pop", "(", ")", "except", "IndexError", ":", "return", "None", "while", "nargs_spec", ":", "nargs", "=", "_fetch", "(", "nargs_spec", ")", "if", "nargs", "==", "1", ":", "rv", ".", "append", "(", "_fetch", "(", "args", ")", ")", "elif", "nargs", ">", "1", ":", "x", "=", "[", "_fetch", "(", "args", ")", "for", "_", "in", "range", "(", "nargs", ")", "]", "# If we're reversed, we're pulling in the arguments in reverse,", "# so we need to turn them around.", "if", "spos", "is", "not", "None", ":", "x", ".", "reverse", "(", ")", "rv", ".", "append", "(", "tuple", "(", "x", ")", ")", "elif", "nargs", "<", "0", ":", "if", "spos", "is", "not", "None", ":", "raise", "TypeError", "(", "'Cannot have two nargs < 0'", ")", "spos", "=", "len", "(", "rv", ")", "rv", ".", "append", "(", "None", ")", "# spos is the position of the wildcard (star). If it's not `None`,", "# we fill it with the remainder.", "if", "spos", "is", "not", "None", ":", "rv", "[", "spos", "]", "=", "tuple", "(", "args", ")", "args", "=", "[", "]", "rv", "[", "spos", "+", "1", ":", "]", "=", "reversed", "(", "rv", "[", "spos", "+", "1", ":", "]", ")", "return", "tuple", "(", "rv", ")", ",", "list", "(", "args", ")" ]
Given an iterable of arguments and an iterable of nargs specifications, it returns a tuple with all the unpacked arguments at the first index and all remaining arguments as the second. The nargs specification is the number of arguments that should be consumed or `-1` to indicate that this position should eat up all the remainders. Missing items are filled with `None`.
[ "Given", "an", "iterable", "of", "arguments", "and", "an", "iterable", "of", "nargs", "specifications", "it", "returns", "a", "tuple", "with", "all", "the", "unpacked", "arguments", "at", "the", "first", "index", "and", "all", "remaining", "arguments", "as", "the", "second", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/natural_language_understanding_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L3107-L3114
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'tokens') and self.tokens is not None: _dict['tokens'] = self.tokens._to_dict() if hasattr(self, 'sentences') and self.sentences is not None: _dict['sentences'] = self.sentences return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'tokens'", ")", "and", "self", ".", "tokens", "is", "not", "None", ":", "_dict", "[", "'tokens'", "]", "=", "self", ".", "tokens", ".", "_to_dict", "(", ")", "if", "hasattr", "(", "self", ",", "'sentences'", ")", "and", "self", ".", "sentences", "is", "not", "None", ":", "_dict", "[", "'sentences'", "]", "=", "self", ".", "sentences", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
eruvanos/openbrokerapi
openbrokerapi/api.py
https://github.com/eruvanos/openbrokerapi/blob/29d514e5932f2eac27e03995dd41c8cecf40bb10/openbrokerapi/api.py#L49-L317
def get_blueprint(service_brokers: Union[List[ServiceBroker], ServiceBroker], broker_credentials: Union[None, List[BrokerCredentials], BrokerCredentials], logger: logging.Logger) -> Blueprint: """ Returns the blueprint with service broker api. :param service_brokers: Services that this broker exposes :param broker_credentials: Optional Usernames and passwords that will be required to communicate with service broker :param logger: Used for api logs. This will not influence Flasks logging behavior. :return: Blueprint to register with Flask app instance """ openbroker = Blueprint('open_broker', __name__) service_brokers = ensure_list(service_brokers) # Apply filters logger.debug("Apply print_request filter for debugging") openbroker.before_request(print_request) if DISABLE_VERSION_CHECK: logger.warning( "Minimum API version is not checked, this can cause illegal contracts between service broker and platform!" ) else: logger.debug("Apply check_version filter for version %s" % str(MIN_VERSION)) openbroker.before_request(check_version) logger.debug("Apply check_originating_identity filter") openbroker.before_request(check_originating_identity) if broker_credentials is not None: broker_credentials = ensure_list(broker_credentials) logger.debug("Apply check_auth filter with {} credentials".format(len(broker_credentials))) openbroker.before_request(get_auth_filter(broker_credentials)) def get_broker_by_id(service_id: str): for service in service_brokers: if service.service_id() == service_id: return service raise KeyError('Service {} not found'.format(service_id)) def add_service_id_to_async_response(response, service_id: str): if response.is_async: if response.operation is None: response.operation = service_id else: response.operation = ' '.join((service_id, response.operation)) def extract_authorization_username(request: Request): if request.authorization is not None: return request.authorization.username else: return None @openbroker.errorhandler(Exception) def error_handler(e): logger.exception(e) return to_json_response(ErrorResponse( description=str(e) )), HTTPStatus.INTERNAL_SERVER_ERROR @openbroker.errorhandler(NotImplementedError) def error_handler(e): logger.exception(e) return to_json_response(ErrorResponse( description=str(e) )), HTTPStatus.NOT_IMPLEMENTED @openbroker.route("/v2/catalog", methods=['GET']) def catalog(): """ :return: Catalog of broker (List of services) """ return to_json_response(CatalogResponse(list(s.catalog() for s in service_brokers))) @openbroker.route("/v2/service_instances/<instance_id>", methods=['PUT']) @requires_application_json def provision(instance_id): try: accepts_incomplete = 'true' == request.args.get("accepts_incomplete", 'false') provision_details = ProvisionDetails(**json.loads(request.data)) provision_details.originating_identity = request.originating_identity provision_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(provision_details.service_id) if not broker.check_plan_id(provision_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError, JSONDecodeError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.provision(instance_id, provision_details, accepts_incomplete) add_service_id_to_async_response(result, broker.service_id()) except errors.ErrInstanceAlreadyExists as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.CONFLICT except errors.ErrInvalidParameters as e: return to_json_response(ErrorResponse('InvalidParameters', str(e))), HTTPStatus.BAD_REQUEST except errors.ErrAsyncRequired as e: logger.exception(e) return to_json_response(ErrorResponse( error="AsyncRequired", description="This service plan requires client support for asynchronous service operations." )), HTTPStatus.UNPROCESSABLE_ENTITY if result.state == ProvisionState.IS_ASYNC: return to_json_response(ProvisioningResponse(result.dashboard_url, result.operation)), HTTPStatus.ACCEPTED elif result.state == ProvisionState.IDENTICAL_ALREADY_EXISTS: return to_json_response(ProvisioningResponse(result.dashboard_url, result.operation)), HTTPStatus.OK elif result.state == ProvisionState.SUCCESSFUL_CREATED: return to_json_response(ProvisioningResponse(result.dashboard_url, result.operation)), HTTPStatus.CREATED else: raise errors.ServiceException('IllegalState, ProvisioningState unknown.') @openbroker.route("/v2/service_instances/<instance_id>", methods=['PATCH']) @requires_application_json def update(instance_id): try: accepts_incomplete = 'true' == request.args.get("accepts_incomplete", 'false') update_details = UpdateDetails(**json.loads(request.data)) update_details.originating_identity = request.originating_identity update_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(update_details.service_id) if not broker.check_plan_id(update_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError, JSONDecodeError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.update(instance_id, update_details, accepts_incomplete) add_service_id_to_async_response(result, broker.service_id()) except errors.ErrInvalidParameters as e: return to_json_response(ErrorResponse('InvalidParameters', str(e))), HTTPStatus.BAD_REQUEST except errors.ErrAsyncRequired as e: logger.exception(e) return to_json_response(ErrorResponse( error="AsyncRequired", description="This service plan requires client support for asynchronous service operations." )), HTTPStatus.UNPROCESSABLE_ENTITY if result.is_async: return to_json_response(UpdateResponse(result.operation, result.dashboard_url)), HTTPStatus.ACCEPTED else: return to_json_response(UpdateResponse(None, result.dashboard_url)), HTTPStatus.OK @openbroker.route("/v2/service_instances/<instance_id>/service_bindings/<binding_id>", methods=['PUT']) @requires_application_json def bind(instance_id, binding_id): try: binding_details = BindDetails(**json.loads(request.data)) binding_details.originating_identity = request.originating_identity binding_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(binding_details.service_id) if not broker.check_plan_id(binding_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError, JSONDecodeError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.bind(instance_id, binding_id, binding_details) except errors.ErrBindingAlreadyExists as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.CONFLICT except errors.ErrAppGuidNotProvided as e: logger.exception(e) return to_json_response(ErrorResponse( error="RequiresApp", description="This service supports generation of credentials through binding an application only." )), HTTPStatus.UNPROCESSABLE_ENTITY response = BindResponse( credentials=result.credentials, syslog_drain_url=result.syslog_drain_url, route_service_url=result.route_service_url, volume_mounts=result.volume_mounts ) if result.state == BindState.SUCCESSFUL_BOUND: return to_json_response(response), HTTPStatus.CREATED elif result.state == BindState.IDENTICAL_ALREADY_EXISTS: return to_json_response(response), HTTPStatus.OK else: raise errors.ServiceException('IllegalState, BindState unknown.') @openbroker.route("/v2/service_instances/<instance_id>/service_bindings/<binding_id>", methods=['DELETE']) def unbind(instance_id, binding_id): try: plan_id = request.args["plan_id"] service_id = request.args["service_id"] unbind_details = UnbindDetails(plan_id, service_id) unbind_details.originating_identity = request.originating_identity unbind_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(unbind_details.service_id) if not broker.check_plan_id(unbind_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: broker.unbind(instance_id, binding_id, unbind_details) except errors.ErrBindingDoesNotExist as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.GONE return to_json_response(EmptyResponse()), HTTPStatus.OK @openbroker.route("/v2/service_instances/<instance_id>", methods=['DELETE']) def deprovision(instance_id): try: plan_id = request.args["plan_id"] service_id = request.args["service_id"] accepts_incomplete = 'true' == request.args.get("accepts_incomplete", 'false') deprovision_details = DeprovisionDetails(plan_id, service_id) deprovision_details.originating_identity = request.originating_identity deprovision_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(deprovision_details.service_id) if not broker.check_plan_id(deprovision_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.deprovision(instance_id, deprovision_details, accepts_incomplete) add_service_id_to_async_response(result, broker.service_id()) except errors.ErrInstanceDoesNotExist as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.GONE except errors.ErrAsyncRequired as e: logger.exception(e) return to_json_response(ErrorResponse( error="AsyncRequired", description="This service plan requires client support for asynchronous service operations." )), HTTPStatus.UNPROCESSABLE_ENTITY if result.is_async: return to_json_response(DeprovisionResponse(result.operation)), HTTPStatus.ACCEPTED else: return to_json_response(EmptyResponse()), HTTPStatus.OK @openbroker.route("/v2/service_instances/<instance_id>/last_operation", methods=['GET']) def last_operation(instance_id): # Not required # service_id = request.args.get("service_id", None) # plan_id = request.args.get("plan_id", None) operation_data = request.args.get("operation", None) data = operation_data.split(' ', maxsplit=1) service_id = data[0] if len(data) == 2: operation_data = data[1] else: operation_data = None try: broker = get_broker_by_id(service_id) except KeyError as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST result = broker.last_operation(instance_id, operation_data) return to_json_response(LastOperationResponse(result.state, result.description)), HTTPStatus.OK return openbroker
[ "def", "get_blueprint", "(", "service_brokers", ":", "Union", "[", "List", "[", "ServiceBroker", "]", ",", "ServiceBroker", "]", ",", "broker_credentials", ":", "Union", "[", "None", ",", "List", "[", "BrokerCredentials", "]", ",", "BrokerCredentials", "]", ",", "logger", ":", "logging", ".", "Logger", ")", "->", "Blueprint", ":", "openbroker", "=", "Blueprint", "(", "'open_broker'", ",", "__name__", ")", "service_brokers", "=", "ensure_list", "(", "service_brokers", ")", "# Apply filters", "logger", ".", "debug", "(", "\"Apply print_request filter for debugging\"", ")", "openbroker", ".", "before_request", "(", "print_request", ")", "if", "DISABLE_VERSION_CHECK", ":", "logger", ".", "warning", "(", "\"Minimum API version is not checked, this can cause illegal contracts between service broker and platform!\"", ")", "else", ":", "logger", ".", "debug", "(", "\"Apply check_version filter for version %s\"", "%", "str", "(", "MIN_VERSION", ")", ")", "openbroker", ".", "before_request", "(", "check_version", ")", "logger", ".", "debug", "(", "\"Apply check_originating_identity filter\"", ")", "openbroker", ".", "before_request", "(", "check_originating_identity", ")", "if", "broker_credentials", "is", "not", "None", ":", "broker_credentials", "=", "ensure_list", "(", "broker_credentials", ")", "logger", ".", "debug", "(", "\"Apply check_auth filter with {} credentials\"", ".", "format", "(", "len", "(", "broker_credentials", ")", ")", ")", "openbroker", ".", "before_request", "(", "get_auth_filter", "(", "broker_credentials", ")", ")", "def", "get_broker_by_id", "(", "service_id", ":", "str", ")", ":", "for", "service", "in", "service_brokers", ":", "if", "service", ".", "service_id", "(", ")", "==", "service_id", ":", "return", "service", "raise", "KeyError", "(", "'Service {} not found'", ".", "format", "(", "service_id", ")", ")", "def", "add_service_id_to_async_response", "(", "response", ",", "service_id", ":", "str", ")", ":", "if", "response", ".", "is_async", ":", "if", "response", ".", "operation", "is", "None", ":", "response", ".", "operation", "=", "service_id", "else", ":", "response", ".", "operation", "=", "' '", ".", "join", "(", "(", "service_id", ",", "response", ".", "operation", ")", ")", "def", "extract_authorization_username", "(", "request", ":", "Request", ")", ":", "if", "request", ".", "authorization", "is", "not", "None", ":", "return", "request", ".", "authorization", ".", "username", "else", ":", "return", "None", "@", "openbroker", ".", "errorhandler", "(", "Exception", ")", "def", "error_handler", "(", "e", ")", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "INTERNAL_SERVER_ERROR", "@", "openbroker", ".", "errorhandler", "(", "NotImplementedError", ")", "def", "error_handler", "(", "e", ")", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "NOT_IMPLEMENTED", "@", "openbroker", ".", "route", "(", "\"/v2/catalog\"", ",", "methods", "=", "[", "'GET'", "]", ")", "def", "catalog", "(", ")", ":", "\"\"\"\n :return: Catalog of broker (List of services)\n \"\"\"", "return", "to_json_response", "(", "CatalogResponse", "(", "list", "(", "s", ".", "catalog", "(", ")", "for", "s", "in", "service_brokers", ")", ")", ")", "@", "openbroker", ".", "route", "(", "\"/v2/service_instances/<instance_id>\"", ",", "methods", "=", "[", "'PUT'", "]", ")", "@", "requires_application_json", "def", "provision", "(", "instance_id", ")", ":", "try", ":", "accepts_incomplete", "=", "'true'", "==", "request", ".", "args", ".", "get", "(", "\"accepts_incomplete\"", ",", "'false'", ")", "provision_details", "=", "ProvisionDetails", "(", "*", "*", "json", ".", "loads", "(", "request", ".", "data", ")", ")", "provision_details", ".", "originating_identity", "=", "request", ".", "originating_identity", "provision_details", ".", "authorization_username", "=", "extract_authorization_username", "(", "request", ")", "broker", "=", "get_broker_by_id", "(", "provision_details", ".", "service_id", ")", "if", "not", "broker", ".", "check_plan_id", "(", "provision_details", ".", "plan_id", ")", ":", "raise", "TypeError", "(", "'plan_id not found in this service.'", ")", "except", "(", "TypeError", ",", "KeyError", ",", "JSONDecodeError", ")", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "try", ":", "result", "=", "broker", ".", "provision", "(", "instance_id", ",", "provision_details", ",", "accepts_incomplete", ")", "add_service_id_to_async_response", "(", "result", ",", "broker", ".", "service_id", "(", ")", ")", "except", "errors", ".", "ErrInstanceAlreadyExists", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "EmptyResponse", "(", ")", ")", ",", "HTTPStatus", ".", "CONFLICT", "except", "errors", ".", "ErrInvalidParameters", "as", "e", ":", "return", "to_json_response", "(", "ErrorResponse", "(", "'InvalidParameters'", ",", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "except", "errors", ".", "ErrAsyncRequired", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "error", "=", "\"AsyncRequired\"", ",", "description", "=", "\"This service plan requires client support for asynchronous service operations.\"", ")", ")", ",", "HTTPStatus", ".", "UNPROCESSABLE_ENTITY", "if", "result", ".", "state", "==", "ProvisionState", ".", "IS_ASYNC", ":", "return", "to_json_response", "(", "ProvisioningResponse", "(", "result", ".", "dashboard_url", ",", "result", ".", "operation", ")", ")", ",", "HTTPStatus", ".", "ACCEPTED", "elif", "result", ".", "state", "==", "ProvisionState", ".", "IDENTICAL_ALREADY_EXISTS", ":", "return", "to_json_response", "(", "ProvisioningResponse", "(", "result", ".", "dashboard_url", ",", "result", ".", "operation", ")", ")", ",", "HTTPStatus", ".", "OK", "elif", "result", ".", "state", "==", "ProvisionState", ".", "SUCCESSFUL_CREATED", ":", "return", "to_json_response", "(", "ProvisioningResponse", "(", "result", ".", "dashboard_url", ",", "result", ".", "operation", ")", ")", ",", "HTTPStatus", ".", "CREATED", "else", ":", "raise", "errors", ".", "ServiceException", "(", "'IllegalState, ProvisioningState unknown.'", ")", "@", "openbroker", ".", "route", "(", "\"/v2/service_instances/<instance_id>\"", ",", "methods", "=", "[", "'PATCH'", "]", ")", "@", "requires_application_json", "def", "update", "(", "instance_id", ")", ":", "try", ":", "accepts_incomplete", "=", "'true'", "==", "request", ".", "args", ".", "get", "(", "\"accepts_incomplete\"", ",", "'false'", ")", "update_details", "=", "UpdateDetails", "(", "*", "*", "json", ".", "loads", "(", "request", ".", "data", ")", ")", "update_details", ".", "originating_identity", "=", "request", ".", "originating_identity", "update_details", ".", "authorization_username", "=", "extract_authorization_username", "(", "request", ")", "broker", "=", "get_broker_by_id", "(", "update_details", ".", "service_id", ")", "if", "not", "broker", ".", "check_plan_id", "(", "update_details", ".", "plan_id", ")", ":", "raise", "TypeError", "(", "'plan_id not found in this service.'", ")", "except", "(", "TypeError", ",", "KeyError", ",", "JSONDecodeError", ")", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "try", ":", "result", "=", "broker", ".", "update", "(", "instance_id", ",", "update_details", ",", "accepts_incomplete", ")", "add_service_id_to_async_response", "(", "result", ",", "broker", ".", "service_id", "(", ")", ")", "except", "errors", ".", "ErrInvalidParameters", "as", "e", ":", "return", "to_json_response", "(", "ErrorResponse", "(", "'InvalidParameters'", ",", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "except", "errors", ".", "ErrAsyncRequired", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "error", "=", "\"AsyncRequired\"", ",", "description", "=", "\"This service plan requires client support for asynchronous service operations.\"", ")", ")", ",", "HTTPStatus", ".", "UNPROCESSABLE_ENTITY", "if", "result", ".", "is_async", ":", "return", "to_json_response", "(", "UpdateResponse", "(", "result", ".", "operation", ",", "result", ".", "dashboard_url", ")", ")", ",", "HTTPStatus", ".", "ACCEPTED", "else", ":", "return", "to_json_response", "(", "UpdateResponse", "(", "None", ",", "result", ".", "dashboard_url", ")", ")", ",", "HTTPStatus", ".", "OK", "@", "openbroker", ".", "route", "(", "\"/v2/service_instances/<instance_id>/service_bindings/<binding_id>\"", ",", "methods", "=", "[", "'PUT'", "]", ")", "@", "requires_application_json", "def", "bind", "(", "instance_id", ",", "binding_id", ")", ":", "try", ":", "binding_details", "=", "BindDetails", "(", "*", "*", "json", ".", "loads", "(", "request", ".", "data", ")", ")", "binding_details", ".", "originating_identity", "=", "request", ".", "originating_identity", "binding_details", ".", "authorization_username", "=", "extract_authorization_username", "(", "request", ")", "broker", "=", "get_broker_by_id", "(", "binding_details", ".", "service_id", ")", "if", "not", "broker", ".", "check_plan_id", "(", "binding_details", ".", "plan_id", ")", ":", "raise", "TypeError", "(", "'plan_id not found in this service.'", ")", "except", "(", "TypeError", ",", "KeyError", ",", "JSONDecodeError", ")", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "try", ":", "result", "=", "broker", ".", "bind", "(", "instance_id", ",", "binding_id", ",", "binding_details", ")", "except", "errors", ".", "ErrBindingAlreadyExists", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "EmptyResponse", "(", ")", ")", ",", "HTTPStatus", ".", "CONFLICT", "except", "errors", ".", "ErrAppGuidNotProvided", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "error", "=", "\"RequiresApp\"", ",", "description", "=", "\"This service supports generation of credentials through binding an application only.\"", ")", ")", ",", "HTTPStatus", ".", "UNPROCESSABLE_ENTITY", "response", "=", "BindResponse", "(", "credentials", "=", "result", ".", "credentials", ",", "syslog_drain_url", "=", "result", ".", "syslog_drain_url", ",", "route_service_url", "=", "result", ".", "route_service_url", ",", "volume_mounts", "=", "result", ".", "volume_mounts", ")", "if", "result", ".", "state", "==", "BindState", ".", "SUCCESSFUL_BOUND", ":", "return", "to_json_response", "(", "response", ")", ",", "HTTPStatus", ".", "CREATED", "elif", "result", ".", "state", "==", "BindState", ".", "IDENTICAL_ALREADY_EXISTS", ":", "return", "to_json_response", "(", "response", ")", ",", "HTTPStatus", ".", "OK", "else", ":", "raise", "errors", ".", "ServiceException", "(", "'IllegalState, BindState unknown.'", ")", "@", "openbroker", ".", "route", "(", "\"/v2/service_instances/<instance_id>/service_bindings/<binding_id>\"", ",", "methods", "=", "[", "'DELETE'", "]", ")", "def", "unbind", "(", "instance_id", ",", "binding_id", ")", ":", "try", ":", "plan_id", "=", "request", ".", "args", "[", "\"plan_id\"", "]", "service_id", "=", "request", ".", "args", "[", "\"service_id\"", "]", "unbind_details", "=", "UnbindDetails", "(", "plan_id", ",", "service_id", ")", "unbind_details", ".", "originating_identity", "=", "request", ".", "originating_identity", "unbind_details", ".", "authorization_username", "=", "extract_authorization_username", "(", "request", ")", "broker", "=", "get_broker_by_id", "(", "unbind_details", ".", "service_id", ")", "if", "not", "broker", ".", "check_plan_id", "(", "unbind_details", ".", "plan_id", ")", ":", "raise", "TypeError", "(", "'plan_id not found in this service.'", ")", "except", "(", "TypeError", ",", "KeyError", ")", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "try", ":", "broker", ".", "unbind", "(", "instance_id", ",", "binding_id", ",", "unbind_details", ")", "except", "errors", ".", "ErrBindingDoesNotExist", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "EmptyResponse", "(", ")", ")", ",", "HTTPStatus", ".", "GONE", "return", "to_json_response", "(", "EmptyResponse", "(", ")", ")", ",", "HTTPStatus", ".", "OK", "@", "openbroker", ".", "route", "(", "\"/v2/service_instances/<instance_id>\"", ",", "methods", "=", "[", "'DELETE'", "]", ")", "def", "deprovision", "(", "instance_id", ")", ":", "try", ":", "plan_id", "=", "request", ".", "args", "[", "\"plan_id\"", "]", "service_id", "=", "request", ".", "args", "[", "\"service_id\"", "]", "accepts_incomplete", "=", "'true'", "==", "request", ".", "args", ".", "get", "(", "\"accepts_incomplete\"", ",", "'false'", ")", "deprovision_details", "=", "DeprovisionDetails", "(", "plan_id", ",", "service_id", ")", "deprovision_details", ".", "originating_identity", "=", "request", ".", "originating_identity", "deprovision_details", ".", "authorization_username", "=", "extract_authorization_username", "(", "request", ")", "broker", "=", "get_broker_by_id", "(", "deprovision_details", ".", "service_id", ")", "if", "not", "broker", ".", "check_plan_id", "(", "deprovision_details", ".", "plan_id", ")", ":", "raise", "TypeError", "(", "'plan_id not found in this service.'", ")", "except", "(", "TypeError", ",", "KeyError", ")", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "try", ":", "result", "=", "broker", ".", "deprovision", "(", "instance_id", ",", "deprovision_details", ",", "accepts_incomplete", ")", "add_service_id_to_async_response", "(", "result", ",", "broker", ".", "service_id", "(", ")", ")", "except", "errors", ".", "ErrInstanceDoesNotExist", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "EmptyResponse", "(", ")", ")", ",", "HTTPStatus", ".", "GONE", "except", "errors", ".", "ErrAsyncRequired", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "error", "=", "\"AsyncRequired\"", ",", "description", "=", "\"This service plan requires client support for asynchronous service operations.\"", ")", ")", ",", "HTTPStatus", ".", "UNPROCESSABLE_ENTITY", "if", "result", ".", "is_async", ":", "return", "to_json_response", "(", "DeprovisionResponse", "(", "result", ".", "operation", ")", ")", ",", "HTTPStatus", ".", "ACCEPTED", "else", ":", "return", "to_json_response", "(", "EmptyResponse", "(", ")", ")", ",", "HTTPStatus", ".", "OK", "@", "openbroker", ".", "route", "(", "\"/v2/service_instances/<instance_id>/last_operation\"", ",", "methods", "=", "[", "'GET'", "]", ")", "def", "last_operation", "(", "instance_id", ")", ":", "# Not required", "# service_id = request.args.get(\"service_id\", None)", "# plan_id = request.args.get(\"plan_id\", None)", "operation_data", "=", "request", ".", "args", ".", "get", "(", "\"operation\"", ",", "None", ")", "data", "=", "operation_data", ".", "split", "(", "' '", ",", "maxsplit", "=", "1", ")", "service_id", "=", "data", "[", "0", "]", "if", "len", "(", "data", ")", "==", "2", ":", "operation_data", "=", "data", "[", "1", "]", "else", ":", "operation_data", "=", "None", "try", ":", "broker", "=", "get_broker_by_id", "(", "service_id", ")", "except", "KeyError", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "result", "=", "broker", ".", "last_operation", "(", "instance_id", ",", "operation_data", ")", "return", "to_json_response", "(", "LastOperationResponse", "(", "result", ".", "state", ",", "result", ".", "description", ")", ")", ",", "HTTPStatus", ".", "OK", "return", "openbroker" ]
Returns the blueprint with service broker api. :param service_brokers: Services that this broker exposes :param broker_credentials: Optional Usernames and passwords that will be required to communicate with service broker :param logger: Used for api logs. This will not influence Flasks logging behavior. :return: Blueprint to register with Flask app instance
[ "Returns", "the", "blueprint", "with", "service", "broker", "api", "." ]
python
train
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L3350-L3361
def cublasZtpsv(handle, uplo, trans, diag, n, AP, x, incx): """ Solve complex triangular-packed system with one right-hand size. """ status = _libcublas.cublasZtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
[ "def", "cublasZtpsv", "(", "handle", ",", "uplo", ",", "trans", ",", "diag", ",", "n", ",", "AP", ",", "x", ",", "incx", ")", ":", "status", "=", "_libcublas", ".", "cublasZtpsv_v2", "(", "handle", ",", "_CUBLAS_FILL_MODE", "[", "uplo", "]", ",", "_CUBLAS_OP", "[", "trans", "]", ",", "_CUBLAS_DIAG", "[", "diag", "]", ",", "n", ",", "int", "(", "AP", ")", ",", "int", "(", "x", ")", ",", "incx", ")", "cublasCheckStatus", "(", "status", ")" ]
Solve complex triangular-packed system with one right-hand size.
[ "Solve", "complex", "triangular", "-", "packed", "system", "with", "one", "right", "-", "hand", "size", "." ]
python
train
etalab/cada
cada/commands.py
https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L147-L175
def load(patterns, full_reindex): ''' Load one or more CADA CSV files matching patterns ''' header('Loading CSV files') for pattern in patterns: for filename in iglob(pattern): echo('Loading {}'.format(white(filename))) with open(filename) as f: reader = csv.reader(f) # Skip header reader.next() for idx, row in enumerate(reader, 1): try: advice = csv.from_row(row) skipped = False if not full_reindex: index(advice) echo('.' if idx % 50 else white(idx), nl=False) except Exception: echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False) skipped = True if skipped: echo(white('{}(s)'.format(idx)) if idx % 50 else '') else: echo(white(idx) if idx % 50 else '') success('Processed {0} rows'.format(idx)) if full_reindex: reindex()
[ "def", "load", "(", "patterns", ",", "full_reindex", ")", ":", "header", "(", "'Loading CSV files'", ")", "for", "pattern", "in", "patterns", ":", "for", "filename", "in", "iglob", "(", "pattern", ")", ":", "echo", "(", "'Loading {}'", ".", "format", "(", "white", "(", "filename", ")", ")", ")", "with", "open", "(", "filename", ")", "as", "f", ":", "reader", "=", "csv", ".", "reader", "(", "f", ")", "# Skip header", "reader", ".", "next", "(", ")", "for", "idx", ",", "row", "in", "enumerate", "(", "reader", ",", "1", ")", ":", "try", ":", "advice", "=", "csv", ".", "from_row", "(", "row", ")", "skipped", "=", "False", "if", "not", "full_reindex", ":", "index", "(", "advice", ")", "echo", "(", "'.'", "if", "idx", "%", "50", "else", "white", "(", "idx", ")", ",", "nl", "=", "False", ")", "except", "Exception", ":", "echo", "(", "cyan", "(", "'s'", ")", "if", "idx", "%", "50", "else", "white", "(", "'{0}(s)'", ".", "format", "(", "idx", ")", ")", ",", "nl", "=", "False", ")", "skipped", "=", "True", "if", "skipped", ":", "echo", "(", "white", "(", "'{}(s)'", ".", "format", "(", "idx", ")", ")", "if", "idx", "%", "50", "else", "''", ")", "else", ":", "echo", "(", "white", "(", "idx", ")", "if", "idx", "%", "50", "else", "''", ")", "success", "(", "'Processed {0} rows'", ".", "format", "(", "idx", ")", ")", "if", "full_reindex", ":", "reindex", "(", ")" ]
Load one or more CADA CSV files matching patterns
[ "Load", "one", "or", "more", "CADA", "CSV", "files", "matching", "patterns" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/external_config.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/external_config.py#L250-L259
def columns(self): """List[:class:`~.external_config.BigtableColumn`]: Lists of columns that should be exposed as individual fields. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies.columns https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns """ prop = self._properties.get("columns", []) return [BigtableColumn.from_api_repr(col) for col in prop]
[ "def", "columns", "(", "self", ")", ":", "prop", "=", "self", ".", "_properties", ".", "get", "(", "\"columns\"", ",", "[", "]", ")", "return", "[", "BigtableColumn", ".", "from_api_repr", "(", "col", ")", "for", "col", "in", "prop", "]" ]
List[:class:`~.external_config.BigtableColumn`]: Lists of columns that should be exposed as individual fields. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies.columns https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns
[ "List", "[", ":", "class", ":", "~", ".", "external_config", ".", "BigtableColumn", "]", ":", "Lists", "of", "columns", "that", "should", "be", "exposed", "as", "individual", "fields", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/ha/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/ha/utils.py#L130-L186
def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. Example of supplying additional settings:: COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' AGENT_CA_PARAMS = 'op monitor interval="5s"' ha_console_settings = { 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) generate_ha_relation_data('nova', extra_settings=ha_console_settings) @param service: Name of the service being configured @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) _relation_data = { 'resources': { _haproxy_res: 'lsb:haproxy', }, 'resource_params': { _haproxy_res: 'op monitor interval="5s"' }, 'init_services': { _haproxy_res: 'haproxy' }, 'clones': { 'cl_{}_haproxy'.format(service): _haproxy_res }, } if extra_settings: for k, v in extra_settings.items(): if _relation_data.get(k): _relation_data[k].update(v) else: _relation_data[k] = v if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: update_hacluster_vip(service, _relation_data) return { 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) for k, v in _relation_data.items() if v }
[ "def", "generate_ha_relation_data", "(", "service", ",", "extra_settings", "=", "None", ")", ":", "_haproxy_res", "=", "'res_{}_haproxy'", ".", "format", "(", "service", ")", "_relation_data", "=", "{", "'resources'", ":", "{", "_haproxy_res", ":", "'lsb:haproxy'", ",", "}", ",", "'resource_params'", ":", "{", "_haproxy_res", ":", "'op monitor interval=\"5s\"'", "}", ",", "'init_services'", ":", "{", "_haproxy_res", ":", "'haproxy'", "}", ",", "'clones'", ":", "{", "'cl_{}_haproxy'", ".", "format", "(", "service", ")", ":", "_haproxy_res", "}", ",", "}", "if", "extra_settings", ":", "for", "k", ",", "v", "in", "extra_settings", ".", "items", "(", ")", ":", "if", "_relation_data", ".", "get", "(", "k", ")", ":", "_relation_data", "[", "k", "]", ".", "update", "(", "v", ")", "else", ":", "_relation_data", "[", "k", "]", "=", "v", "if", "config", "(", "'dns-ha'", ")", ":", "update_hacluster_dns_ha", "(", "service", ",", "_relation_data", ")", "else", ":", "update_hacluster_vip", "(", "service", ",", "_relation_data", ")", "return", "{", "'json_{}'", ".", "format", "(", "k", ")", ":", "json", ".", "dumps", "(", "v", ",", "*", "*", "JSON_ENCODE_OPTIONS", ")", "for", "k", ",", "v", "in", "_relation_data", ".", "items", "(", ")", "if", "v", "}" ]
Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. Example of supplying additional settings:: COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' AGENT_CA_PARAMS = 'op monitor interval="5s"' ha_console_settings = { 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) generate_ha_relation_data('nova', extra_settings=ha_console_settings) @param service: Name of the service being configured @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set
[ "Generate", "relation", "data", "for", "ha", "relation" ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/egimff.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/egimff.py#L399-L424
def xml2dict(root): """Use functions instead of Class and remove namespace based on: http://stackoverflow.com/questions/2148119 """ output = {} if root.items(): output.update(dict(root.items())) for element in root: if element: if len(element) == 1 or element[0].tag != element[1].tag: one_dict = xml2dict(element) else: one_dict = {ns(element[0].tag): xml2list(element)} if element.items(): one_dict.update(dict(element.items())) output.update({ns(element.tag): one_dict}) elif element.items(): output.update({ns(element.tag): dict(element.items())}) else: output.update({ns(element.tag): element.text}) return output
[ "def", "xml2dict", "(", "root", ")", ":", "output", "=", "{", "}", "if", "root", ".", "items", "(", ")", ":", "output", ".", "update", "(", "dict", "(", "root", ".", "items", "(", ")", ")", ")", "for", "element", "in", "root", ":", "if", "element", ":", "if", "len", "(", "element", ")", "==", "1", "or", "element", "[", "0", "]", ".", "tag", "!=", "element", "[", "1", "]", ".", "tag", ":", "one_dict", "=", "xml2dict", "(", "element", ")", "else", ":", "one_dict", "=", "{", "ns", "(", "element", "[", "0", "]", ".", "tag", ")", ":", "xml2list", "(", "element", ")", "}", "if", "element", ".", "items", "(", ")", ":", "one_dict", ".", "update", "(", "dict", "(", "element", ".", "items", "(", ")", ")", ")", "output", ".", "update", "(", "{", "ns", "(", "element", ".", "tag", ")", ":", "one_dict", "}", ")", "elif", "element", ".", "items", "(", ")", ":", "output", ".", "update", "(", "{", "ns", "(", "element", ".", "tag", ")", ":", "dict", "(", "element", ".", "items", "(", ")", ")", "}", ")", "else", ":", "output", ".", "update", "(", "{", "ns", "(", "element", ".", "tag", ")", ":", "element", ".", "text", "}", ")", "return", "output" ]
Use functions instead of Class and remove namespace based on: http://stackoverflow.com/questions/2148119
[ "Use", "functions", "instead", "of", "Class", "and", "remove", "namespace", "based", "on", ":", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "2148119" ]
python
train
openstack/proliantutils
proliantutils/redfish/redfish.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L1023-L1040
def inject_nmi(self): """Inject NMI, Non Maskable Interrupt. Inject NMI (Non Maskable Interrupt) for a node immediately. :raises: IloError, on an error from iLO """ sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) if sushy_system.power_state != sushy.SYSTEM_POWER_STATE_ON: raise exception.IloError("Server is not in powered on state.") try: sushy_system.reset_system(sushy.RESET_NMI) except sushy.exceptions.SushyError as e: msg = (self._('The Redfish controller failed to inject nmi to ' 'server. Error %(error)s') % {'error': str(e)}) LOG.debug(msg) raise exception.IloError(msg)
[ "def", "inject_nmi", "(", "self", ")", ":", "sushy_system", "=", "self", ".", "_get_sushy_system", "(", "PROLIANT_SYSTEM_ID", ")", "if", "sushy_system", ".", "power_state", "!=", "sushy", ".", "SYSTEM_POWER_STATE_ON", ":", "raise", "exception", ".", "IloError", "(", "\"Server is not in powered on state.\"", ")", "try", ":", "sushy_system", ".", "reset_system", "(", "sushy", ".", "RESET_NMI", ")", "except", "sushy", ".", "exceptions", ".", "SushyError", "as", "e", ":", "msg", "=", "(", "self", ".", "_", "(", "'The Redfish controller failed to inject nmi to '", "'server. Error %(error)s'", ")", "%", "{", "'error'", ":", "str", "(", "e", ")", "}", ")", "LOG", ".", "debug", "(", "msg", ")", "raise", "exception", ".", "IloError", "(", "msg", ")" ]
Inject NMI, Non Maskable Interrupt. Inject NMI (Non Maskable Interrupt) for a node immediately. :raises: IloError, on an error from iLO
[ "Inject", "NMI", "Non", "Maskable", "Interrupt", "." ]
python
train
yunojuno/elasticsearch-django
elasticsearch_django/index.py
https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/index.py#L31-L35
def delete_index(index): """Delete index entirely (removes all documents and mapping).""" logger.info("Deleting search index: '%s'", index) client = get_client() return client.indices.delete(index=index)
[ "def", "delete_index", "(", "index", ")", ":", "logger", ".", "info", "(", "\"Deleting search index: '%s'\"", ",", "index", ")", "client", "=", "get_client", "(", ")", "return", "client", ".", "indices", ".", "delete", "(", "index", "=", "index", ")" ]
Delete index entirely (removes all documents and mapping).
[ "Delete", "index", "entirely", "(", "removes", "all", "documents", "and", "mapping", ")", "." ]
python
train
zeromake/aiosqlite3
aiosqlite3/sa/transaction.py
https://github.com/zeromake/aiosqlite3/blob/1a74a062507e2df8f833a70885e69dca0ab3e7e7/aiosqlite3/sa/transaction.py#L50-L73
def close(self): """ Close this transaction. If this transaction is the base transaction in a begin/commit nesting, the transaction will rollback(). Otherwise, the method returns. This is used to cancel a Transaction without affecting the scope of an enclosing transaction. """ if not self._connection or not self._parent: return if not self._parent._is_active: # pragma: no cover self._connection = None # self._parent = None return if self._parent is self: yield from self.rollback() else: self._is_active = False self._connection = None self._parent = None
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_connection", "or", "not", "self", ".", "_parent", ":", "return", "if", "not", "self", ".", "_parent", ".", "_is_active", ":", "# pragma: no cover", "self", ".", "_connection", "=", "None", "# self._parent = None", "return", "if", "self", ".", "_parent", "is", "self", ":", "yield", "from", "self", ".", "rollback", "(", ")", "else", ":", "self", ".", "_is_active", "=", "False", "self", ".", "_connection", "=", "None", "self", ".", "_parent", "=", "None" ]
Close this transaction. If this transaction is the base transaction in a begin/commit nesting, the transaction will rollback(). Otherwise, the method returns. This is used to cancel a Transaction without affecting the scope of an enclosing transaction.
[ "Close", "this", "transaction", "." ]
python
train
softlayer/softlayer-python
SoftLayer/managers/firewall.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/firewall.py#L46-L61
def get_standard_package(self, server_id, is_virt=True): """Retrieves the standard firewall package for the virtual server. :param int server_id: The ID of the server to create the firewall for :param bool is_virt: True if the ID provided is for a virtual server, False for a server :returns: A dictionary containing the standard virtual server firewall package """ firewall_port_speed = self._get_fwl_port_speed(server_id, is_virt) _value = "%s%s" % (firewall_port_speed, "Mbps Hardware Firewall") _filter = {'items': {'description': utils.query_filter(_value)}} return self.prod_pkg.getItems(id=0, filter=_filter)
[ "def", "get_standard_package", "(", "self", ",", "server_id", ",", "is_virt", "=", "True", ")", ":", "firewall_port_speed", "=", "self", ".", "_get_fwl_port_speed", "(", "server_id", ",", "is_virt", ")", "_value", "=", "\"%s%s\"", "%", "(", "firewall_port_speed", ",", "\"Mbps Hardware Firewall\"", ")", "_filter", "=", "{", "'items'", ":", "{", "'description'", ":", "utils", ".", "query_filter", "(", "_value", ")", "}", "}", "return", "self", ".", "prod_pkg", ".", "getItems", "(", "id", "=", "0", ",", "filter", "=", "_filter", ")" ]
Retrieves the standard firewall package for the virtual server. :param int server_id: The ID of the server to create the firewall for :param bool is_virt: True if the ID provided is for a virtual server, False for a server :returns: A dictionary containing the standard virtual server firewall package
[ "Retrieves", "the", "standard", "firewall", "package", "for", "the", "virtual", "server", "." ]
python
train
TC01/calcpkg
calcrepo/repo.py
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L185-L198
def downloadFileFromUrl(self, url): """Given a URL, download the specified file""" fullurl = self.baseUrl + url try: urlobj = urllib2.urlopen(fullurl) contents = urlobj.read() except urllib2.HTTPError, e: self.printd("HTTP error:", e.code, url) return None except urllib2.URLError, e: self.printd("URL error:", e.code, url) return None self.printd("Fetched '%s' (size %d bytes)" % (fullurl, len(contents))) return contents
[ "def", "downloadFileFromUrl", "(", "self", ",", "url", ")", ":", "fullurl", "=", "self", ".", "baseUrl", "+", "url", "try", ":", "urlobj", "=", "urllib2", ".", "urlopen", "(", "fullurl", ")", "contents", "=", "urlobj", ".", "read", "(", ")", "except", "urllib2", ".", "HTTPError", ",", "e", ":", "self", ".", "printd", "(", "\"HTTP error:\"", ",", "e", ".", "code", ",", "url", ")", "return", "None", "except", "urllib2", ".", "URLError", ",", "e", ":", "self", ".", "printd", "(", "\"URL error:\"", ",", "e", ".", "code", ",", "url", ")", "return", "None", "self", ".", "printd", "(", "\"Fetched '%s' (size %d bytes)\"", "%", "(", "fullurl", ",", "len", "(", "contents", ")", ")", ")", "return", "contents" ]
Given a URL, download the specified file
[ "Given", "a", "URL", "download", "the", "specified", "file" ]
python
train
yvesalexandre/bandicoot
bandicoot/spatial.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/spatial.py#L115-L130
def frequent_antennas(positions, percentage=0.8): """ The number of location that account for 80% of the locations where the user was. Percentage can be supplied as a decimal (e.g., .8 for default 80%). """ location_count = Counter(list(map(str, positions))) target = math.ceil(sum(location_count.values()) * percentage) location_sort = sorted(list(location_count.keys()), key=lambda x: location_count[x]) while target > 0 and len(location_sort) > 0: location_id = location_sort.pop() target -= location_count[location_id] return len(location_count) - len(location_sort)
[ "def", "frequent_antennas", "(", "positions", ",", "percentage", "=", "0.8", ")", ":", "location_count", "=", "Counter", "(", "list", "(", "map", "(", "str", ",", "positions", ")", ")", ")", "target", "=", "math", ".", "ceil", "(", "sum", "(", "location_count", ".", "values", "(", ")", ")", "*", "percentage", ")", "location_sort", "=", "sorted", "(", "list", "(", "location_count", ".", "keys", "(", ")", ")", ",", "key", "=", "lambda", "x", ":", "location_count", "[", "x", "]", ")", "while", "target", ">", "0", "and", "len", "(", "location_sort", ")", ">", "0", ":", "location_id", "=", "location_sort", ".", "pop", "(", ")", "target", "-=", "location_count", "[", "location_id", "]", "return", "len", "(", "location_count", ")", "-", "len", "(", "location_sort", ")" ]
The number of location that account for 80% of the locations where the user was. Percentage can be supplied as a decimal (e.g., .8 for default 80%).
[ "The", "number", "of", "location", "that", "account", "for", "80%", "of", "the", "locations", "where", "the", "user", "was", ".", "Percentage", "can", "be", "supplied", "as", "a", "decimal", "(", "e", ".", "g", ".", ".", "8", "for", "default", "80%", ")", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3337-L3343
def listGetString(self, doc, inLine): """Build the string equivalent to the text contained in the Node list made of TEXTs and ENTITY_REFs """ if doc is None: doc__o = None else: doc__o = doc._o ret = libxml2mod.xmlNodeListGetString(doc__o, self._o, inLine) return ret
[ "def", "listGetString", "(", "self", ",", "doc", ",", "inLine", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlNodeListGetString", "(", "doc__o", ",", "self", ".", "_o", ",", "inLine", ")", "return", "ret" ]
Build the string equivalent to the text contained in the Node list made of TEXTs and ENTITY_REFs
[ "Build", "the", "string", "equivalent", "to", "the", "text", "contained", "in", "the", "Node", "list", "made", "of", "TEXTs", "and", "ENTITY_REFs" ]
python
train
brocade/pynos
pynos/versions/base/yang/brocade_fcoe.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_fcoe.py#L131-L144
def fcoe_fcoe_map_fcoe_map_fabric_map_fcoe_map_fabric_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe") fcoe_map = ET.SubElement(fcoe, "fcoe-map") fcoe_map_name_key = ET.SubElement(fcoe_map, "fcoe-map-name") fcoe_map_name_key.text = kwargs.pop('fcoe_map_name') fcoe_map_fabric_map = ET.SubElement(fcoe_map, "fcoe-map-fabric-map") fcoe_map_fabric_map_name = ET.SubElement(fcoe_map_fabric_map, "fcoe-map-fabric-map-name") fcoe_map_fabric_map_name.text = kwargs.pop('fcoe_map_fabric_map_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_fcoe_map_fcoe_map_fabric_map_fcoe_map_fabric_map_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe", "=", "ET", ".", "SubElement", "(", "config", ",", "\"fcoe\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-fcoe\"", ")", "fcoe_map", "=", "ET", ".", "SubElement", "(", "fcoe", ",", "\"fcoe-map\"", ")", "fcoe_map_name_key", "=", "ET", ".", "SubElement", "(", "fcoe_map", ",", "\"fcoe-map-name\"", ")", "fcoe_map_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_map_name'", ")", "fcoe_map_fabric_map", "=", "ET", ".", "SubElement", "(", "fcoe_map", ",", "\"fcoe-map-fabric-map\"", ")", "fcoe_map_fabric_map_name", "=", "ET", ".", "SubElement", "(", "fcoe_map_fabric_map", ",", "\"fcoe-map-fabric-map-name\"", ")", "fcoe_map_fabric_map_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_map_fabric_map_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1475-L1478
def p_namedblock(self, p): 'namedblock : BEGIN COLON ID namedblock_statements END' p[0] = Block(p[4], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_namedblock", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Block", "(", "p", "[", "4", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
namedblock : BEGIN COLON ID namedblock_statements END
[ "namedblock", ":", "BEGIN", "COLON", "ID", "namedblock_statements", "END" ]
python
train
bitesofcode/projexui
projexui/widgets/xcolortreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcolortreewidget.py#L79-L86
def setName( self, name ): """ Sets the name for this color item to the inputed name. :param name | <str> """ self._name = projex.text.nativestring(name) self.setText(0, ' '.join(projex.text.words(self._name)))
[ "def", "setName", "(", "self", ",", "name", ")", ":", "self", ".", "_name", "=", "projex", ".", "text", ".", "nativestring", "(", "name", ")", "self", ".", "setText", "(", "0", ",", "' '", ".", "join", "(", "projex", ".", "text", ".", "words", "(", "self", ".", "_name", ")", ")", ")" ]
Sets the name for this color item to the inputed name. :param name | <str>
[ "Sets", "the", "name", "for", "this", "color", "item", "to", "the", "inputed", "name", ".", ":", "param", "name", "|", "<str", ">" ]
python
train
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L267-L279
def get_logs(self, project_name, spider_name): """ Get urls that scrapyd logs file by project name and spider name :param project_name: the project name :param spider_name: the spider name :return: two list of the logs file name and logs file url """ url, method = self.command_set['logs'][0] + project_name + '/' + spider_name + '/', self.command_set['logs'][1] response = http_utils.request(url, method_type=method) html_parser = ScrapydLogsPageHTMLParser() html_parser.feed(response) html_parser.clean_enter_sign() return html_parser.result, [url + x for x in html_parser.result]
[ "def", "get_logs", "(", "self", ",", "project_name", ",", "spider_name", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'logs'", "]", "[", "0", "]", "+", "project_name", "+", "'/'", "+", "spider_name", "+", "'/'", ",", "self", ".", "command_set", "[", "'logs'", "]", "[", "1", "]", "response", "=", "http_utils", ".", "request", "(", "url", ",", "method_type", "=", "method", ")", "html_parser", "=", "ScrapydLogsPageHTMLParser", "(", ")", "html_parser", ".", "feed", "(", "response", ")", "html_parser", ".", "clean_enter_sign", "(", ")", "return", "html_parser", ".", "result", ",", "[", "url", "+", "x", "for", "x", "in", "html_parser", ".", "result", "]" ]
Get urls that scrapyd logs file by project name and spider name :param project_name: the project name :param spider_name: the spider name :return: two list of the logs file name and logs file url
[ "Get", "urls", "that", "scrapyd", "logs", "file", "by", "project", "name", "and", "spider", "name", ":", "param", "project_name", ":", "the", "project", "name", ":", "param", "spider_name", ":", "the", "spider", "name", ":", "return", ":", "two", "list", "of", "the", "logs", "file", "name", "and", "logs", "file", "url" ]
python
train
bkeating/python-payflowpro
payflowpro/classes.py
https://github.com/bkeating/python-payflowpro/blob/e74fc85135f171caa28277196fdcf7c7481ff298/payflowpro/classes.py#L359-L409
def parse_parameters(payflowpro_response_data): """ Parses a set of Payflow Pro response parameter name and value pairs into a list of PayflowProObjects, and returns a tuple containing the object list and a dictionary containing any unconsumed data. The first item in the object list will always be the Response object, and the RecurringPayments object (if any) will be last. The presence of any unconsumed data in the resulting dictionary probably indicates an error or oversight in the PayflowProObject definitions. """ def build_class(klass, unconsumed_data): known_att_names_set = set(klass.base_fields.keys()) available_atts_set = known_att_names_set.intersection(unconsumed_data) if available_atts_set: available_atts = dict() for name in available_atts_set: available_atts[name] = unconsumed_data[name] del unconsumed_data[name] return klass(**available_atts) return None unconsumed_data = payflowpro_response_data.copy() # Parse the response data first response = build_class(Response, unconsumed_data) result_objects = [response] # Parse the remaining data for klass in object.__class__.__subclasses__(PayflowProObject): obj = build_class(klass, unconsumed_data) if obj: result_objects.append(obj) # Special handling of RecurringPayments p_count = 1 payments = [] while ("p_result%d" % p_count) in unconsumed_data: payments.append(RecurringPayment( p_result = unconsumed_data.pop("p_result%d" % p_count, None), p_pnref = unconsumed_data.pop("p_pnref%d" % p_count, None), p_transtate = unconsumed_data.pop("p_transtate%d" % p_count, None), p_tender = unconsumed_data.pop("p_tender%d" % p_count, None), p_transtime = unconsumed_data.pop("p_transtime%d" % p_count, None), p_amt = unconsumed_data.pop("p_amt%d" % p_count, None))) p_count += 1 if payments: result_objects.append(RecurringPayments(payments=payments)) return (result_objects, unconsumed_data,)
[ "def", "parse_parameters", "(", "payflowpro_response_data", ")", ":", "def", "build_class", "(", "klass", ",", "unconsumed_data", ")", ":", "known_att_names_set", "=", "set", "(", "klass", ".", "base_fields", ".", "keys", "(", ")", ")", "available_atts_set", "=", "known_att_names_set", ".", "intersection", "(", "unconsumed_data", ")", "if", "available_atts_set", ":", "available_atts", "=", "dict", "(", ")", "for", "name", "in", "available_atts_set", ":", "available_atts", "[", "name", "]", "=", "unconsumed_data", "[", "name", "]", "del", "unconsumed_data", "[", "name", "]", "return", "klass", "(", "*", "*", "available_atts", ")", "return", "None", "unconsumed_data", "=", "payflowpro_response_data", ".", "copy", "(", ")", "# Parse the response data first", "response", "=", "build_class", "(", "Response", ",", "unconsumed_data", ")", "result_objects", "=", "[", "response", "]", "# Parse the remaining data", "for", "klass", "in", "object", ".", "__class__", ".", "__subclasses__", "(", "PayflowProObject", ")", ":", "obj", "=", "build_class", "(", "klass", ",", "unconsumed_data", ")", "if", "obj", ":", "result_objects", ".", "append", "(", "obj", ")", "# Special handling of RecurringPayments", "p_count", "=", "1", "payments", "=", "[", "]", "while", "(", "\"p_result%d\"", "%", "p_count", ")", "in", "unconsumed_data", ":", "payments", ".", "append", "(", "RecurringPayment", "(", "p_result", "=", "unconsumed_data", ".", "pop", "(", "\"p_result%d\"", "%", "p_count", ",", "None", ")", ",", "p_pnref", "=", "unconsumed_data", ".", "pop", "(", "\"p_pnref%d\"", "%", "p_count", ",", "None", ")", ",", "p_transtate", "=", "unconsumed_data", ".", "pop", "(", "\"p_transtate%d\"", "%", "p_count", ",", "None", ")", ",", "p_tender", "=", "unconsumed_data", ".", "pop", "(", "\"p_tender%d\"", "%", "p_count", ",", "None", ")", ",", "p_transtime", "=", "unconsumed_data", ".", "pop", "(", "\"p_transtime%d\"", "%", "p_count", ",", "None", ")", ",", "p_amt", "=", "unconsumed_data", ".", "pop", "(", "\"p_amt%d\"", "%", "p_count", ",", "None", ")", ")", ")", "p_count", "+=", "1", "if", "payments", ":", "result_objects", ".", "append", "(", "RecurringPayments", "(", "payments", "=", "payments", ")", ")", "return", "(", "result_objects", ",", "unconsumed_data", ",", ")" ]
Parses a set of Payflow Pro response parameter name and value pairs into a list of PayflowProObjects, and returns a tuple containing the object list and a dictionary containing any unconsumed data. The first item in the object list will always be the Response object, and the RecurringPayments object (if any) will be last. The presence of any unconsumed data in the resulting dictionary probably indicates an error or oversight in the PayflowProObject definitions.
[ "Parses", "a", "set", "of", "Payflow", "Pro", "response", "parameter", "name", "and", "value", "pairs", "into", "a", "list", "of", "PayflowProObjects", "and", "returns", "a", "tuple", "containing", "the", "object", "list", "and", "a", "dictionary", "containing", "any", "unconsumed", "data", ".", "The", "first", "item", "in", "the", "object", "list", "will", "always", "be", "the", "Response", "object", "and", "the", "RecurringPayments", "object", "(", "if", "any", ")", "will", "be", "last", "." ]
python
train
upsight/doctor
doctor/docs/base.py
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/docs/base.py#L642-L664
def run(self): # pragma: no cover """Called by Sphinx to generate documentation for this directive.""" if self.directive_name is None: raise NotImplementedError('directive_name must be implemented by ' 'subclasses of BaseDirective') env, state = self._prepare_env() state.doc_names.add(env.docname) directive_name = '<{}>'.format(self.directive_name) node = nodes.section() node.document = self.state.document result = ViewList() for line in self._render_rst(): if line.startswith(HEADING_TOKEN): # Remove heading token, then append 2 lines, one with # the heading text, and the other with the dashes to # underline the heading. heading = line[HEADING_TOKEN_LENGTH:] result.append(heading, directive_name) result.append('-' * len(heading), directive_name) else: result.append(line, directive_name) nested_parse_with_titles(self.state, result, node) return node.children
[ "def", "run", "(", "self", ")", ":", "# pragma: no cover", "if", "self", ".", "directive_name", "is", "None", ":", "raise", "NotImplementedError", "(", "'directive_name must be implemented by '", "'subclasses of BaseDirective'", ")", "env", ",", "state", "=", "self", ".", "_prepare_env", "(", ")", "state", ".", "doc_names", ".", "add", "(", "env", ".", "docname", ")", "directive_name", "=", "'<{}>'", ".", "format", "(", "self", ".", "directive_name", ")", "node", "=", "nodes", ".", "section", "(", ")", "node", ".", "document", "=", "self", ".", "state", ".", "document", "result", "=", "ViewList", "(", ")", "for", "line", "in", "self", ".", "_render_rst", "(", ")", ":", "if", "line", ".", "startswith", "(", "HEADING_TOKEN", ")", ":", "# Remove heading token, then append 2 lines, one with", "# the heading text, and the other with the dashes to", "# underline the heading.", "heading", "=", "line", "[", "HEADING_TOKEN_LENGTH", ":", "]", "result", ".", "append", "(", "heading", ",", "directive_name", ")", "result", ".", "append", "(", "'-'", "*", "len", "(", "heading", ")", ",", "directive_name", ")", "else", ":", "result", ".", "append", "(", "line", ",", "directive_name", ")", "nested_parse_with_titles", "(", "self", ".", "state", ",", "result", ",", "node", ")", "return", "node", ".", "children" ]
Called by Sphinx to generate documentation for this directive.
[ "Called", "by", "Sphinx", "to", "generate", "documentation", "for", "this", "directive", "." ]
python
train
SHTOOLS/SHTOOLS
pyshtools/shclasses/slepian.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/slepian.py#L444-L521
def spectra(self, alpha=None, nmax=None, convention='power', unit='per_l', base=10.): """ Return the spectra of one or more Slepian functions. Usage ----- spectra = x.spectra([alpha, nmax, convention, unit, base]) Returns ------- spectra : ndarray, shape (lmax+1, nmax) A matrix with each column containing the spectrum of a Slepian function, and where the functions are arranged with increasing concentration factors. If alpha is set, only a single vector is returned, whereas if nmax is set, the first nmax spectra are returned. Parameters ---------- alpha : int, optional, default = None The function number of the output spectrum, where alpha=0 corresponds to the best concentrated Slepian function. nmax : int, optional, default = 1 The number of best concentrated Slepian function power spectra to return. convention : str, optional, default = 'power' The type of spectrum to return: 'power' for power spectrum, 'energy' for energy spectrum, and 'l2norm' for the l2 norm spectrum. unit : str, optional, default = 'per_l' If 'per_l', return the total contribution to the spectrum for each spherical harmonic degree l. If 'per_lm', return the average contribution to the spectrum for each coefficient at spherical harmonic degree l. If 'per_dlogl', return the spectrum per log interval dlog_a(l). base : float, optional, default = 10. The logarithm base when calculating the 'per_dlogl' spectrum. Description ----------- This function returns either the power spectrum, energy spectrum, or l2-norm spectrum of one or more of the Slepian funtions. Total power is defined as the integral of the function squared over all space, divided by the area the function spans. If the mean of the function is zero, this is equivalent to the variance of the function. The total energy is the integral of the function squared over all space and is 4pi times the total power. The l2-norm is the sum of the magnitude of the coefficients squared. The output spectrum can be expresed using one of three units. 'per_l' returns the contribution to the total spectrum from all angular orders at degree l. 'per_lm' returns the average contribution to the total spectrum from a single coefficient at degree l. The 'per_lm' spectrum is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to the total spectrum from all angular orders over an infinitessimal logarithmic degree band. The contrubution in the band dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a). """ if alpha is None: if nmax is None: nmax = self.nmax spectra = _np.zeros((self.lmax+1, nmax)) for iwin in range(nmax): coeffs = self.to_array(iwin) spectra[:, iwin] = _spectrum(coeffs, normalization='4pi', convention=convention, unit=unit, base=base) else: coeffs = self.to_array(alpha) spectra = _spectrum(coeffs, normalization='4pi', convention=convention, unit=unit, base=base) return spectra
[ "def", "spectra", "(", "self", ",", "alpha", "=", "None", ",", "nmax", "=", "None", ",", "convention", "=", "'power'", ",", "unit", "=", "'per_l'", ",", "base", "=", "10.", ")", ":", "if", "alpha", "is", "None", ":", "if", "nmax", "is", "None", ":", "nmax", "=", "self", ".", "nmax", "spectra", "=", "_np", ".", "zeros", "(", "(", "self", ".", "lmax", "+", "1", ",", "nmax", ")", ")", "for", "iwin", "in", "range", "(", "nmax", ")", ":", "coeffs", "=", "self", ".", "to_array", "(", "iwin", ")", "spectra", "[", ":", ",", "iwin", "]", "=", "_spectrum", "(", "coeffs", ",", "normalization", "=", "'4pi'", ",", "convention", "=", "convention", ",", "unit", "=", "unit", ",", "base", "=", "base", ")", "else", ":", "coeffs", "=", "self", ".", "to_array", "(", "alpha", ")", "spectra", "=", "_spectrum", "(", "coeffs", ",", "normalization", "=", "'4pi'", ",", "convention", "=", "convention", ",", "unit", "=", "unit", ",", "base", "=", "base", ")", "return", "spectra" ]
Return the spectra of one or more Slepian functions. Usage ----- spectra = x.spectra([alpha, nmax, convention, unit, base]) Returns ------- spectra : ndarray, shape (lmax+1, nmax) A matrix with each column containing the spectrum of a Slepian function, and where the functions are arranged with increasing concentration factors. If alpha is set, only a single vector is returned, whereas if nmax is set, the first nmax spectra are returned. Parameters ---------- alpha : int, optional, default = None The function number of the output spectrum, where alpha=0 corresponds to the best concentrated Slepian function. nmax : int, optional, default = 1 The number of best concentrated Slepian function power spectra to return. convention : str, optional, default = 'power' The type of spectrum to return: 'power' for power spectrum, 'energy' for energy spectrum, and 'l2norm' for the l2 norm spectrum. unit : str, optional, default = 'per_l' If 'per_l', return the total contribution to the spectrum for each spherical harmonic degree l. If 'per_lm', return the average contribution to the spectrum for each coefficient at spherical harmonic degree l. If 'per_dlogl', return the spectrum per log interval dlog_a(l). base : float, optional, default = 10. The logarithm base when calculating the 'per_dlogl' spectrum. Description ----------- This function returns either the power spectrum, energy spectrum, or l2-norm spectrum of one or more of the Slepian funtions. Total power is defined as the integral of the function squared over all space, divided by the area the function spans. If the mean of the function is zero, this is equivalent to the variance of the function. The total energy is the integral of the function squared over all space and is 4pi times the total power. The l2-norm is the sum of the magnitude of the coefficients squared. The output spectrum can be expresed using one of three units. 'per_l' returns the contribution to the total spectrum from all angular orders at degree l. 'per_lm' returns the average contribution to the total spectrum from a single coefficient at degree l. The 'per_lm' spectrum is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to the total spectrum from all angular orders over an infinitessimal logarithmic degree band. The contrubution in the band dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
[ "Return", "the", "spectra", "of", "one", "or", "more", "Slepian", "functions", "." ]
python
train
lorien/grab
grab/base.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L749-L768
def detect_request_method(self): """ Analyze request config and find which request method will be used. Returns request method in upper case This method needs simetime when `process_config` method was not called yet. """ method = self.config['method'] if method: method = method.upper() else: if self.config['post'] or self.config['multipart_post']: method = 'POST' else: method = 'GET' return method
[ "def", "detect_request_method", "(", "self", ")", ":", "method", "=", "self", ".", "config", "[", "'method'", "]", "if", "method", ":", "method", "=", "method", ".", "upper", "(", ")", "else", ":", "if", "self", ".", "config", "[", "'post'", "]", "or", "self", ".", "config", "[", "'multipart_post'", "]", ":", "method", "=", "'POST'", "else", ":", "method", "=", "'GET'", "return", "method" ]
Analyze request config and find which request method will be used. Returns request method in upper case This method needs simetime when `process_config` method was not called yet.
[ "Analyze", "request", "config", "and", "find", "which", "request", "method", "will", "be", "used", "." ]
python
train
skyfielders/python-skyfield
skyfield/positionlib.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/positionlib.py#L245-L248
def galactic_xyz(self): """Compute galactic coordinates (x, y, z)""" vector = _GALACTIC.dot(self.position.au) return Distance(vector)
[ "def", "galactic_xyz", "(", "self", ")", ":", "vector", "=", "_GALACTIC", ".", "dot", "(", "self", ".", "position", ".", "au", ")", "return", "Distance", "(", "vector", ")" ]
Compute galactic coordinates (x, y, z)
[ "Compute", "galactic", "coordinates", "(", "x", "y", "z", ")" ]
python
train
decryptus/sonicprobe
sonicprobe/libs/anysql.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/anysql.py#L204-L220
def querymany(self, sql_query, columns, seq_of_parameters): """ Same as .query() but eventually call the .executemany() method of the underlying DBAPI2.0 cursor instead of .execute() """ tmp_query = self.__preparequery(sql_query, columns) if self.__methods[METHOD_MODULE].paramstyle == "qmark": raise NotImplementedError, "qmark isn't fully supported" try: self.__dbapi2_cursor.executemany(tmp_query, seq_of_parameters) except Exception, e: self.__connection.reconnect(tmp_query, self.__log_reconnect) self.__dbapi2_cursor = self.__connection._get_raw_cursor() self.__dbapi2_cursor.executemany(tmp_query, seq_of_parameters)
[ "def", "querymany", "(", "self", ",", "sql_query", ",", "columns", ",", "seq_of_parameters", ")", ":", "tmp_query", "=", "self", ".", "__preparequery", "(", "sql_query", ",", "columns", ")", "if", "self", ".", "__methods", "[", "METHOD_MODULE", "]", ".", "paramstyle", "==", "\"qmark\"", ":", "raise", "NotImplementedError", ",", "\"qmark isn't fully supported\"", "try", ":", "self", ".", "__dbapi2_cursor", ".", "executemany", "(", "tmp_query", ",", "seq_of_parameters", ")", "except", "Exception", ",", "e", ":", "self", ".", "__connection", ".", "reconnect", "(", "tmp_query", ",", "self", ".", "__log_reconnect", ")", "self", ".", "__dbapi2_cursor", "=", "self", ".", "__connection", ".", "_get_raw_cursor", "(", ")", "self", ".", "__dbapi2_cursor", ".", "executemany", "(", "tmp_query", ",", "seq_of_parameters", ")" ]
Same as .query() but eventually call the .executemany() method of the underlying DBAPI2.0 cursor instead of .execute()
[ "Same", "as", ".", "query", "()", "but", "eventually", "call", "the", ".", "executemany", "()", "method", "of", "the", "underlying", "DBAPI2", ".", "0", "cursor", "instead", "of", ".", "execute", "()" ]
python
train
scieloorg/articlemetaapi
articlemeta/client.py
https://github.com/scieloorg/articlemetaapi/blob/7ff87a615951bfdcc6fd535ce7f7c65065f64caa/articlemeta/client.py#L689-L702
def add_journal(self, data): """ This method include new journals to the ArticleMeta. data: legacy SciELO Documents JSON Type 3. """ journal = self.dispatcher( 'add_journal', data, self._admintoken ) return json.loads(journal)
[ "def", "add_journal", "(", "self", ",", "data", ")", ":", "journal", "=", "self", ".", "dispatcher", "(", "'add_journal'", ",", "data", ",", "self", ".", "_admintoken", ")", "return", "json", ".", "loads", "(", "journal", ")" ]
This method include new journals to the ArticleMeta. data: legacy SciELO Documents JSON Type 3.
[ "This", "method", "include", "new", "journals", "to", "the", "ArticleMeta", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/internal/assert_util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/assert_util.py#L76-L106
def assert_rank_at_most(x, rank, data=None, summarize=None, message=None, name=None): """Assert `x` has rank equal to `rank` or smaller. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.assert_rank_at_most(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_at_most". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or lower. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank. """ with tf.compat.v2.name_scope(name or 'assert_rank_at_most'): return tf.compat.v1.assert_less_equal( tf.rank(x), rank, data=data, summarize=summarize, message=message)
[ "def", "assert_rank_at_most", "(", "x", ",", "rank", ",", "data", "=", "None", ",", "summarize", "=", "None", ",", "message", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v2", ".", "name_scope", "(", "name", "or", "'assert_rank_at_most'", ")", ":", "return", "tf", ".", "compat", ".", "v1", ".", "assert_less_equal", "(", "tf", ".", "rank", "(", "x", ")", ",", "rank", ",", "data", "=", "data", ",", "summarize", "=", "summarize", ",", "message", "=", "message", ")" ]
Assert `x` has rank equal to `rank` or smaller. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.assert_rank_at_most(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_at_most". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or lower. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank.
[ "Assert", "x", "has", "rank", "equal", "to", "rank", "or", "smaller", "." ]
python
test
luckydonald/pytgbot
code_generation/output/pytgbot/bot.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/bot.py#L1469-L1502
def get_file(self, file_id): """ Use this method to get basic info about a file and prepare it for downloading. For the moment, bots can download files of up to 20MB in size. On success, a File object is returned. The file can then be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>, where <file_path> is taken from the response. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile again. Note: This function may not preserve the original file name and MIME type. You should save the file's MIME type and name (if available) when the File object is received. https://core.telegram.org/bots/api#getfile Parameters: :param file_id: File identifier to get info about :type file_id: str|unicode Returns: :return: On success, a File object is returned :rtype: pytgbot.api_types.receivable.media.File """ assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") result = self.do("getFile", file_id=file_id) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) from pytgbot.api_types.receivable.media import File try: return File.from_array(result) except TgApiParseException: logger.debug("Failed parsing as api_type File", exc_info=True) # end try # no valid parsing so far raise TgApiParseException("Could not parse result.") # See debug log for details! # end if return_python_objects return result
[ "def", "get_file", "(", "self", ",", "file_id", ")", ":", "assert_type_or_raise", "(", "file_id", ",", "unicode_type", ",", "parameter_name", "=", "\"file_id\"", ")", "result", "=", "self", ".", "do", "(", "\"getFile\"", ",", "file_id", "=", "file_id", ")", "if", "self", ".", "return_python_objects", ":", "logger", ".", "debug", "(", "\"Trying to parse {data}\"", ".", "format", "(", "data", "=", "repr", "(", "result", ")", ")", ")", "from", "pytgbot", ".", "api_types", ".", "receivable", ".", "media", "import", "File", "try", ":", "return", "File", ".", "from_array", "(", "result", ")", "except", "TgApiParseException", ":", "logger", ".", "debug", "(", "\"Failed parsing as api_type File\"", ",", "exc_info", "=", "True", ")", "# end try", "# no valid parsing so far", "raise", "TgApiParseException", "(", "\"Could not parse result.\"", ")", "# See debug log for details!", "# end if return_python_objects", "return", "result" ]
Use this method to get basic info about a file and prepare it for downloading. For the moment, bots can download files of up to 20MB in size. On success, a File object is returned. The file can then be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>, where <file_path> is taken from the response. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile again. Note: This function may not preserve the original file name and MIME type. You should save the file's MIME type and name (if available) when the File object is received. https://core.telegram.org/bots/api#getfile Parameters: :param file_id: File identifier to get info about :type file_id: str|unicode Returns: :return: On success, a File object is returned :rtype: pytgbot.api_types.receivable.media.File
[ "Use", "this", "method", "to", "get", "basic", "info", "about", "a", "file", "and", "prepare", "it", "for", "downloading", ".", "For", "the", "moment", "bots", "can", "download", "files", "of", "up", "to", "20MB", "in", "size", ".", "On", "success", "a", "File", "object", "is", "returned", ".", "The", "file", "can", "then", "be", "downloaded", "via", "the", "link", "https", ":", "//", "api", ".", "telegram", ".", "org", "/", "file", "/", "bot<token", ">", "/", "<file_path", ">", "where", "<file_path", ">", "is", "taken", "from", "the", "response", ".", "It", "is", "guaranteed", "that", "the", "link", "will", "be", "valid", "for", "at", "least", "1", "hour", ".", "When", "the", "link", "expires", "a", "new", "one", "can", "be", "requested", "by", "calling", "getFile", "again", ".", "Note", ":", "This", "function", "may", "not", "preserve", "the", "original", "file", "name", "and", "MIME", "type", ".", "You", "should", "save", "the", "file", "s", "MIME", "type", "and", "name", "(", "if", "available", ")", "when", "the", "File", "object", "is", "received", "." ]
python
train
sammchardy/python-kucoin
kucoin/client.py
https://github.com/sammchardy/python-kucoin/blob/a4cacde413804784bd313f27a0ad37234888be29/kucoin/client.py#L1769-L1835
def get_kline_data(self, symbol, kline_type='5min', start=None, end=None): """Get kline data For each query, the system would return at most 1500 pieces of data. To obtain more data, please page the data by time. :param symbol: Name of symbol e.g. KCS-BTC :type symbol: string :param kline_type: type of symbol, type of candlestick patterns: 1min, 3min, 5min, 15min, 30min, 1hour, 2hour, 4hour, 6hour, 8hour, 12hour, 1day, 1week :type kline_type: string :param start: Start time as unix timestamp (optional) default start of day in UTC :type start: int :param end: End time as unix timestamp (optional) default now in UTC :type end: int https://docs.kucoin.com/#get-historic-rates .. code:: python klines = client.get_kline_data('KCS-BTC', '5min', 1507479171, 1510278278) :returns: ApiResponse .. code:: python [ [ "1545904980", //Start time of the candle cycle "0.058", //opening price "0.049", //closing price "0.058", //highest price "0.049", //lowest price "0.018", //Transaction amount "0.000945" //Transaction volume ], [ "1545904920", "0.058", "0.072", "0.072", "0.058", "0.103", "0.006986" ] ] :raises: KucoinResponseException, KucoinAPIException """ data = { 'symbol': symbol } if kline_type is not None: data['type'] = kline_type if start is not None: data['startAt'] = start else: data['startAt'] = calendar.timegm(datetime.utcnow().date().timetuple()) if end is not None: data['endAt'] = end else: data['endAt'] = int(time.time()) return self._get('market/candles', False, data=data)
[ "def", "get_kline_data", "(", "self", ",", "symbol", ",", "kline_type", "=", "'5min'", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "data", "=", "{", "'symbol'", ":", "symbol", "}", "if", "kline_type", "is", "not", "None", ":", "data", "[", "'type'", "]", "=", "kline_type", "if", "start", "is", "not", "None", ":", "data", "[", "'startAt'", "]", "=", "start", "else", ":", "data", "[", "'startAt'", "]", "=", "calendar", ".", "timegm", "(", "datetime", ".", "utcnow", "(", ")", ".", "date", "(", ")", ".", "timetuple", "(", ")", ")", "if", "end", "is", "not", "None", ":", "data", "[", "'endAt'", "]", "=", "end", "else", ":", "data", "[", "'endAt'", "]", "=", "int", "(", "time", ".", "time", "(", ")", ")", "return", "self", ".", "_get", "(", "'market/candles'", ",", "False", ",", "data", "=", "data", ")" ]
Get kline data For each query, the system would return at most 1500 pieces of data. To obtain more data, please page the data by time. :param symbol: Name of symbol e.g. KCS-BTC :type symbol: string :param kline_type: type of symbol, type of candlestick patterns: 1min, 3min, 5min, 15min, 30min, 1hour, 2hour, 4hour, 6hour, 8hour, 12hour, 1day, 1week :type kline_type: string :param start: Start time as unix timestamp (optional) default start of day in UTC :type start: int :param end: End time as unix timestamp (optional) default now in UTC :type end: int https://docs.kucoin.com/#get-historic-rates .. code:: python klines = client.get_kline_data('KCS-BTC', '5min', 1507479171, 1510278278) :returns: ApiResponse .. code:: python [ [ "1545904980", //Start time of the candle cycle "0.058", //opening price "0.049", //closing price "0.058", //highest price "0.049", //lowest price "0.018", //Transaction amount "0.000945" //Transaction volume ], [ "1545904920", "0.058", "0.072", "0.072", "0.058", "0.103", "0.006986" ] ] :raises: KucoinResponseException, KucoinAPIException
[ "Get", "kline", "data" ]
python
train