repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
dpgaspar/Flask-AppBuilder
flask_appbuilder/models/filters.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/models/filters.py#L220-L229
def get_filter_value(self, column_name): """ Returns the filtered value for a certain column :param column_name: The name of the column that we want the value from :return: the filter value of the column """ for flt, value in zip(self.filters, self.values): if flt.column_name == column_name: return value
[ "def", "get_filter_value", "(", "self", ",", "column_name", ")", ":", "for", "flt", ",", "value", "in", "zip", "(", "self", ".", "filters", ",", "self", ".", "values", ")", ":", "if", "flt", ".", "column_name", "==", "column_name", ":", "return", "value" ]
Returns the filtered value for a certain column :param column_name: The name of the column that we want the value from :return: the filter value of the column
[ "Returns", "the", "filtered", "value", "for", "a", "certain", "column" ]
python
train
ucsb-cs/submit
submit/models.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L516-L560
def verify_submission(self, base_path, submission, update): """Return list of testables that can be built.""" results = VerificationResults() valid_files = set() file_mapping = submission.file_mapping() # Create a list of in-use file verifiers file_verifiers = set(fv for testable in self.testables for fv in testable.file_verifiers) for fv in file_verifiers: if fv.filename in file_mapping: errors, warnings = fv.verify(base_path, file_mapping[fv.filename]) if errors: results.set_errors_for_filename(errors, fv.filename) else: valid_files.add(fv.filename) if warnings: results.set_warnings_for_filename(warnings, fv.filename) del file_mapping[fv.filename] elif not fv.optional: results.set_errors_for_filename(['missing'], fv.filename) if file_mapping: results.set_extra_filenames(frozenset(file_mapping.keys())) # Determine valid testables retval = [] for testable in self.testables: missing = frozenset(x.filename for x in testable.file_verifiers if not x.optional) - valid_files if missing: results._missing_to_testable_ids.setdefault( missing, set()).add(testable.id) elif testable.file_verifiers: retval.append(testable) if update: # Reset existing attributes submission.test_case_results = [] submission.testable_results = [] # Set new information submission.verification_results = results submission.verified_at = func.now() return retval
[ "def", "verify_submission", "(", "self", ",", "base_path", ",", "submission", ",", "update", ")", ":", "results", "=", "VerificationResults", "(", ")", "valid_files", "=", "set", "(", ")", "file_mapping", "=", "submission", ".", "file_mapping", "(", ")", "# Create a list of in-use file verifiers", "file_verifiers", "=", "set", "(", "fv", "for", "testable", "in", "self", ".", "testables", "for", "fv", "in", "testable", ".", "file_verifiers", ")", "for", "fv", "in", "file_verifiers", ":", "if", "fv", ".", "filename", "in", "file_mapping", ":", "errors", ",", "warnings", "=", "fv", ".", "verify", "(", "base_path", ",", "file_mapping", "[", "fv", ".", "filename", "]", ")", "if", "errors", ":", "results", ".", "set_errors_for_filename", "(", "errors", ",", "fv", ".", "filename", ")", "else", ":", "valid_files", ".", "add", "(", "fv", ".", "filename", ")", "if", "warnings", ":", "results", ".", "set_warnings_for_filename", "(", "warnings", ",", "fv", ".", "filename", ")", "del", "file_mapping", "[", "fv", ".", "filename", "]", "elif", "not", "fv", ".", "optional", ":", "results", ".", "set_errors_for_filename", "(", "[", "'missing'", "]", ",", "fv", ".", "filename", ")", "if", "file_mapping", ":", "results", ".", "set_extra_filenames", "(", "frozenset", "(", "file_mapping", ".", "keys", "(", ")", ")", ")", "# Determine valid testables", "retval", "=", "[", "]", "for", "testable", "in", "self", ".", "testables", ":", "missing", "=", "frozenset", "(", "x", ".", "filename", "for", "x", "in", "testable", ".", "file_verifiers", "if", "not", "x", ".", "optional", ")", "-", "valid_files", "if", "missing", ":", "results", ".", "_missing_to_testable_ids", ".", "setdefault", "(", "missing", ",", "set", "(", ")", ")", ".", "add", "(", "testable", ".", "id", ")", "elif", "testable", ".", "file_verifiers", ":", "retval", ".", "append", "(", "testable", ")", "if", "update", ":", "# Reset existing attributes", "submission", ".", "test_case_results", "=", "[", "]", "submission", ".", "testable_results", "=", "[", "]", "# Set new information", "submission", ".", "verification_results", "=", "results", "submission", ".", "verified_at", "=", "func", ".", "now", "(", ")", "return", "retval" ]
Return list of testables that can be built.
[ "Return", "list", "of", "testables", "that", "can", "be", "built", "." ]
python
train
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L9493-L9529
def power_on_vm(name, datacenter=None, service_instance=None): ''' Powers on a virtual machine specified by it's name. name Name of the virtual machine datacenter Datacenter of the virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.power_on_vm name=my_vm ''' log.trace('Powering on virtual machine %s', name) vm_properties = [ 'name', 'summary.runtime.powerState' ] virtual_machine = salt.utils.vmware.get_vm_by_property( service_instance, name, datacenter=datacenter, vm_properties=vm_properties) if virtual_machine['summary.runtime.powerState'] == 'poweredOn': result = {'comment': 'Virtual machine is already powered on', 'changes': {'power_on': True}} return result salt.utils.vmware.power_cycle_vm(virtual_machine['object'], action='on') result = {'comment': 'Virtual machine power on action succeeded', 'changes': {'power_on': True}} return result
[ "def", "power_on_vm", "(", "name", ",", "datacenter", "=", "None", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Powering on virtual machine %s'", ",", "name", ")", "vm_properties", "=", "[", "'name'", ",", "'summary.runtime.powerState'", "]", "virtual_machine", "=", "salt", ".", "utils", ".", "vmware", ".", "get_vm_by_property", "(", "service_instance", ",", "name", ",", "datacenter", "=", "datacenter", ",", "vm_properties", "=", "vm_properties", ")", "if", "virtual_machine", "[", "'summary.runtime.powerState'", "]", "==", "'poweredOn'", ":", "result", "=", "{", "'comment'", ":", "'Virtual machine is already powered on'", ",", "'changes'", ":", "{", "'power_on'", ":", "True", "}", "}", "return", "result", "salt", ".", "utils", ".", "vmware", ".", "power_cycle_vm", "(", "virtual_machine", "[", "'object'", "]", ",", "action", "=", "'on'", ")", "result", "=", "{", "'comment'", ":", "'Virtual machine power on action succeeded'", ",", "'changes'", ":", "{", "'power_on'", ":", "True", "}", "}", "return", "result" ]
Powers on a virtual machine specified by it's name. name Name of the virtual machine datacenter Datacenter of the virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.power_on_vm name=my_vm
[ "Powers", "on", "a", "virtual", "machine", "specified", "by", "it", "s", "name", "." ]
python
train
penguinmenac3/starttf
starttf/estimators/tf_estimator.py
https://github.com/penguinmenac3/starttf/blob/f4086489d169757c0504e822165db2fea534b944/starttf/estimators/tf_estimator.py#L78-L201
def easy_train_and_evaluate(hyper_params, Model=None, create_loss=None, training_data=None, validation_data=None, inline_plotting=False, session_config=None, log_suffix=None, continue_training=False, continue_with_specific_checkpointpath=None): """ Train and evaluate your model without any boilerplate code. 1) Write your data using the starttf.tfrecords.autorecords.write_data method. 2) Create your hyper parameter file containing all required fields and then load it using starttf.utils.hyper_params.load_params method. Minimal Sample Hyperparams File: {"train": { "learning_rate": { "type": "const", "start_value": 0.001 }, "optimizer": { "type": "adam" }, "batch_size": 1024, "iters": 10000, "summary_iters": 100, "checkpoint_path": "checkpoints/mnist", "tf_records_path": "data/.records/mnist" } } 3) Pass everything required to this method and that's it. :param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params :param Model: A keras model. :param create_loss: A create_loss function like that in starttf.examples.mnist.loss. :param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook. :param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters. :param session_config: A configuration for the session. :param log_suffix: A suffix for the log folder, so you can remember what was special about the run. :return: """ time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H.%M.%S') chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp if log_suffix is not None: chkpt_path = chkpt_path + "_" + log_suffix if session_config is None: session_config = get_default_config() if continue_with_specific_checkpointpath: chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath print("Continue with checkpoint: {}".format(chkpt_path)) elif continue_training: chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)]) chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1] print("Latest found checkpoint: {}".format(chkpt_path)) if not os.path.exists(chkpt_path): os.makedirs(chkpt_path) # If hyperparam config is used, load and save code if Model is None: model_backup = os.path.join(chkpt_path, "model.py") copyfile(hyperparams["arch"]["model"].replace(".", os.sep), model_backup) arch_model = __import__(hyperparams["arch"]["model"], fromlist=["Model"]) Model = arch_model.Model if create_loss is None: loss_backup = os.path.join(chkpt_path, "loss.py") copyfile(hyperparams["arch"]["loss"].replace(".", os.sep), loss_backup) arch_loss = __import__(hyperparams["arch"]["loss"], fromlist=["create_loss"]) create_loss = arch_loss.create_loss # Load training data print("Load data") if training_data is None: training_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN), hyper_params.train.batch_size) if validation_data is None: validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION), hyper_params.train.batch_size) # Write hyper parameters to be able to track what config you had. with open(chkpt_path + "/hyperparameters.json", "w") as json_file: json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True)) estimator_spec = create_tf_estimator_spec(chkpt_path, Model, create_loss, inline_plotting) # Create a run configuration config = None if hyper_params.train.get("distributed", False): distribution = tf.contrib.distribute.MirroredStrategy() config = tf.estimator.RunConfig(model_dir=chkpt_path, save_summary_steps=hyper_params.train.summary_steps, train_distribute=distribution, save_checkpoints_steps=hyper_params.train.save_checkpoint_steps, keep_checkpoint_max=hyper_params.train.keep_checkpoint_max, keep_checkpoint_every_n_hours=1) else: config = tf.estimator.RunConfig(session_config=session_config, model_dir=chkpt_path, save_summary_steps=hyper_params.train.summary_steps, save_checkpoints_steps=hyper_params.train.save_checkpoint_steps, keep_checkpoint_max=hyper_params.train.keep_checkpoint_max, keep_checkpoint_every_n_hours=1) # Create the estimator. estimator = None if hyper_params.train.get("warm_start_checkpoint", None) is not None: warm_start_dir = hyper_params.train.warm_start_checkpoint estimator = tf.estimator.Estimator(estimator_spec, config=config, warm_start_from=warm_start_dir, params=hyper_params) else: estimator = tf.estimator.Estimator(estimator_spec, config=config, params=hyper_params) # Specify training and actually train. throttle_secs = hyper_params.train.get("throttle_secs", 120) train_spec = tf.estimator.TrainSpec(input_fn=training_data, max_steps=hyper_params.train.steps) eval_spec = tf.estimator.EvalSpec(input_fn=validation_data, throttle_secs=throttle_secs) print("Start training") tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) return estimator
[ "def", "easy_train_and_evaluate", "(", "hyper_params", ",", "Model", "=", "None", ",", "create_loss", "=", "None", ",", "training_data", "=", "None", ",", "validation_data", "=", "None", ",", "inline_plotting", "=", "False", ",", "session_config", "=", "None", ",", "log_suffix", "=", "None", ",", "continue_training", "=", "False", ",", "continue_with_specific_checkpointpath", "=", "None", ")", ":", "time_stamp", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "time", ".", "time", "(", ")", ")", ".", "strftime", "(", "'%Y-%m-%d_%H.%M.%S'", ")", "chkpt_path", "=", "hyper_params", ".", "train", ".", "checkpoint_path", "+", "\"/\"", "+", "time_stamp", "if", "log_suffix", "is", "not", "None", ":", "chkpt_path", "=", "chkpt_path", "+", "\"_\"", "+", "log_suffix", "if", "session_config", "is", "None", ":", "session_config", "=", "get_default_config", "(", ")", "if", "continue_with_specific_checkpointpath", ":", "chkpt_path", "=", "hyper_params", ".", "train", ".", "checkpoint_path", "+", "\"/\"", "+", "continue_with_specific_checkpointpath", "print", "(", "\"Continue with checkpoint: {}\"", ".", "format", "(", "chkpt_path", ")", ")", "elif", "continue_training", ":", "chkpts", "=", "sorted", "(", "[", "name", "for", "name", "in", "os", ".", "listdir", "(", "hyper_params", ".", "train", ".", "checkpoint_path", ")", "]", ")", "chkpt_path", "=", "hyper_params", ".", "train", ".", "checkpoint_path", "+", "\"/\"", "+", "chkpts", "[", "-", "1", "]", "print", "(", "\"Latest found checkpoint: {}\"", ".", "format", "(", "chkpt_path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "chkpt_path", ")", ":", "os", ".", "makedirs", "(", "chkpt_path", ")", "# If hyperparam config is used, load and save code", "if", "Model", "is", "None", ":", "model_backup", "=", "os", ".", "path", ".", "join", "(", "chkpt_path", ",", "\"model.py\"", ")", "copyfile", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"model\"", "]", ".", "replace", "(", "\".\"", ",", "os", ".", "sep", ")", ",", "model_backup", ")", "arch_model", "=", "__import__", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"model\"", "]", ",", "fromlist", "=", "[", "\"Model\"", "]", ")", "Model", "=", "arch_model", ".", "Model", "if", "create_loss", "is", "None", ":", "loss_backup", "=", "os", ".", "path", ".", "join", "(", "chkpt_path", ",", "\"loss.py\"", ")", "copyfile", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"loss\"", "]", ".", "replace", "(", "\".\"", ",", "os", ".", "sep", ")", ",", "loss_backup", ")", "arch_loss", "=", "__import__", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"loss\"", "]", ",", "fromlist", "=", "[", "\"create_loss\"", "]", ")", "create_loss", "=", "arch_loss", ".", "create_loss", "# Load training data", "print", "(", "\"Load data\"", ")", "if", "training_data", "is", "None", ":", "training_data", "=", "create_input_fn", "(", "os", ".", "path", ".", "join", "(", "hyper_params", ".", "train", ".", "tf_records_path", ",", "PHASE_TRAIN", ")", ",", "hyper_params", ".", "train", ".", "batch_size", ")", "if", "validation_data", "is", "None", ":", "validation_data", "=", "create_input_fn", "(", "os", ".", "path", ".", "join", "(", "hyper_params", ".", "train", ".", "tf_records_path", ",", "PHASE_VALIDATION", ")", ",", "hyper_params", ".", "train", ".", "batch_size", ")", "# Write hyper parameters to be able to track what config you had.", "with", "open", "(", "chkpt_path", "+", "\"/hyperparameters.json\"", ",", "\"w\"", ")", "as", "json_file", ":", "json_file", ".", "write", "(", "json", ".", "dumps", "(", "hyper_params", ".", "to_dict", "(", ")", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", "estimator_spec", "=", "create_tf_estimator_spec", "(", "chkpt_path", ",", "Model", ",", "create_loss", ",", "inline_plotting", ")", "# Create a run configuration", "config", "=", "None", "if", "hyper_params", ".", "train", ".", "get", "(", "\"distributed\"", ",", "False", ")", ":", "distribution", "=", "tf", ".", "contrib", ".", "distribute", ".", "MirroredStrategy", "(", ")", "config", "=", "tf", ".", "estimator", ".", "RunConfig", "(", "model_dir", "=", "chkpt_path", ",", "save_summary_steps", "=", "hyper_params", ".", "train", ".", "summary_steps", ",", "train_distribute", "=", "distribution", ",", "save_checkpoints_steps", "=", "hyper_params", ".", "train", ".", "save_checkpoint_steps", ",", "keep_checkpoint_max", "=", "hyper_params", ".", "train", ".", "keep_checkpoint_max", ",", "keep_checkpoint_every_n_hours", "=", "1", ")", "else", ":", "config", "=", "tf", ".", "estimator", ".", "RunConfig", "(", "session_config", "=", "session_config", ",", "model_dir", "=", "chkpt_path", ",", "save_summary_steps", "=", "hyper_params", ".", "train", ".", "summary_steps", ",", "save_checkpoints_steps", "=", "hyper_params", ".", "train", ".", "save_checkpoint_steps", ",", "keep_checkpoint_max", "=", "hyper_params", ".", "train", ".", "keep_checkpoint_max", ",", "keep_checkpoint_every_n_hours", "=", "1", ")", "# Create the estimator.", "estimator", "=", "None", "if", "hyper_params", ".", "train", ".", "get", "(", "\"warm_start_checkpoint\"", ",", "None", ")", "is", "not", "None", ":", "warm_start_dir", "=", "hyper_params", ".", "train", ".", "warm_start_checkpoint", "estimator", "=", "tf", ".", "estimator", ".", "Estimator", "(", "estimator_spec", ",", "config", "=", "config", ",", "warm_start_from", "=", "warm_start_dir", ",", "params", "=", "hyper_params", ")", "else", ":", "estimator", "=", "tf", ".", "estimator", ".", "Estimator", "(", "estimator_spec", ",", "config", "=", "config", ",", "params", "=", "hyper_params", ")", "# Specify training and actually train.", "throttle_secs", "=", "hyper_params", ".", "train", ".", "get", "(", "\"throttle_secs\"", ",", "120", ")", "train_spec", "=", "tf", ".", "estimator", ".", "TrainSpec", "(", "input_fn", "=", "training_data", ",", "max_steps", "=", "hyper_params", ".", "train", ".", "steps", ")", "eval_spec", "=", "tf", ".", "estimator", ".", "EvalSpec", "(", "input_fn", "=", "validation_data", ",", "throttle_secs", "=", "throttle_secs", ")", "print", "(", "\"Start training\"", ")", "tf", ".", "estimator", ".", "train_and_evaluate", "(", "estimator", ",", "train_spec", ",", "eval_spec", ")", "return", "estimator" ]
Train and evaluate your model without any boilerplate code. 1) Write your data using the starttf.tfrecords.autorecords.write_data method. 2) Create your hyper parameter file containing all required fields and then load it using starttf.utils.hyper_params.load_params method. Minimal Sample Hyperparams File: {"train": { "learning_rate": { "type": "const", "start_value": 0.001 }, "optimizer": { "type": "adam" }, "batch_size": 1024, "iters": 10000, "summary_iters": 100, "checkpoint_path": "checkpoints/mnist", "tf_records_path": "data/.records/mnist" } } 3) Pass everything required to this method and that's it. :param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params :param Model: A keras model. :param create_loss: A create_loss function like that in starttf.examples.mnist.loss. :param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook. :param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters. :param session_config: A configuration for the session. :param log_suffix: A suffix for the log folder, so you can remember what was special about the run. :return:
[ "Train", "and", "evaluate", "your", "model", "without", "any", "boilerplate", "code", "." ]
python
train
jjmontesl/python-clementine-remote
clementineremote/clementine.py
https://github.com/jjmontesl/python-clementine-remote/blob/af5198f8bb56a4845f4e081fd8a553f935c94cde/clementineremote/clementine.py#L147-L153
def play(self): """ Sends a "play" command to the player. """ msg = cr.Message() msg.type = cr.PLAY self.send_message(msg)
[ "def", "play", "(", "self", ")", ":", "msg", "=", "cr", ".", "Message", "(", ")", "msg", ".", "type", "=", "cr", ".", "PLAY", "self", ".", "send_message", "(", "msg", ")" ]
Sends a "play" command to the player.
[ "Sends", "a", "play", "command", "to", "the", "player", "." ]
python
train
py3270/py3270
py3270/__init__.py
https://github.com/py3270/py3270/blob/c3e91b519f3a18b4be4799a00a96341957a8831f/py3270/__init__.py#L337-L350
def is_connected(self): """ Return bool indicating connection state """ # need to wrap in try/except b/c of wc3270's socket connection dynamics try: # this is basically a no-op, but it results in the the current status # getting updated self.exec_command(b"Query(ConnectionState)") # connected status is like 'C(192.168.1.1)', disconnected is 'N' return self.status.connection_state.startswith(b"C(") except NotConnectedException: return False
[ "def", "is_connected", "(", "self", ")", ":", "# need to wrap in try/except b/c of wc3270's socket connection dynamics", "try", ":", "# this is basically a no-op, but it results in the the current status", "# getting updated", "self", ".", "exec_command", "(", "b\"Query(ConnectionState)\"", ")", "# connected status is like 'C(192.168.1.1)', disconnected is 'N'", "return", "self", ".", "status", ".", "connection_state", ".", "startswith", "(", "b\"C(\"", ")", "except", "NotConnectedException", ":", "return", "False" ]
Return bool indicating connection state
[ "Return", "bool", "indicating", "connection", "state" ]
python
valid
ninuxorg/nodeshot
nodeshot/networking/links/models/link.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/links/models/link.py#L112-L165
def save(self, *args, **kwargs): """ Custom save does the following: * determine link type if not specified * automatically fill 'node_a' and 'node_b' fields if necessary * draw line between two nodes * fill shortcut properties node_a_name and node_b_name """ if not self.type: if self.interface_a.type == INTERFACE_TYPES.get('wireless'): self.type = LINK_TYPES.get('radio') elif self.interface_a.type == INTERFACE_TYPES.get('ethernet'): self.type = LINK_TYPES.get('ethernet') else: self.type = LINK_TYPES.get('virtual') if self.interface_a_id: self.interface_a = Interface.objects.get(pk=self.interface_a_id) if self.interface_b_id: self.interface_b = Interface.objects.get(pk=self.interface_b_id) # fill in node_a and node_b if self.node_a is None and self.interface_a is not None: self.node_a = self.interface_a.node if self.node_b is None and self.interface_b is not None: self.node_b = self.interface_b.node # fill layer from node_a if self.layer is None: self.layer = self.node_a.layer # draw linestring if not self.line: self.line = LineString(self.node_a.point, self.node_b.point) # fill properties if self.data.get('node_a_name', None) is None: self.data['node_a_name'] = self.node_a.name self.data['node_b_name'] = self.node_b.name if self.data.get('node_a_slug', None) is None or self.data.get('node_b_slug', None) is None: self.data['node_a_slug'] = self.node_a.slug self.data['node_b_slug'] = self.node_b.slug if self.interface_a and self.data.get('interface_a_mac', None) is None: self.data['interface_a_mac'] = self.interface_a.mac if self.interface_b and self.data.get('interface_b_mac', None) is None: self.data['interface_b_mac'] = self.interface_b.mac if self.data.get('layer_slug') != self.layer.slug: self.data['layer_slug'] = self.layer.slug super(Link, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "type", ":", "if", "self", ".", "interface_a", ".", "type", "==", "INTERFACE_TYPES", ".", "get", "(", "'wireless'", ")", ":", "self", ".", "type", "=", "LINK_TYPES", ".", "get", "(", "'radio'", ")", "elif", "self", ".", "interface_a", ".", "type", "==", "INTERFACE_TYPES", ".", "get", "(", "'ethernet'", ")", ":", "self", ".", "type", "=", "LINK_TYPES", ".", "get", "(", "'ethernet'", ")", "else", ":", "self", ".", "type", "=", "LINK_TYPES", ".", "get", "(", "'virtual'", ")", "if", "self", ".", "interface_a_id", ":", "self", ".", "interface_a", "=", "Interface", ".", "objects", ".", "get", "(", "pk", "=", "self", ".", "interface_a_id", ")", "if", "self", ".", "interface_b_id", ":", "self", ".", "interface_b", "=", "Interface", ".", "objects", ".", "get", "(", "pk", "=", "self", ".", "interface_b_id", ")", "# fill in node_a and node_b", "if", "self", ".", "node_a", "is", "None", "and", "self", ".", "interface_a", "is", "not", "None", ":", "self", ".", "node_a", "=", "self", ".", "interface_a", ".", "node", "if", "self", ".", "node_b", "is", "None", "and", "self", ".", "interface_b", "is", "not", "None", ":", "self", ".", "node_b", "=", "self", ".", "interface_b", ".", "node", "# fill layer from node_a", "if", "self", ".", "layer", "is", "None", ":", "self", ".", "layer", "=", "self", ".", "node_a", ".", "layer", "# draw linestring", "if", "not", "self", ".", "line", ":", "self", ".", "line", "=", "LineString", "(", "self", ".", "node_a", ".", "point", ",", "self", ".", "node_b", ".", "point", ")", "# fill properties", "if", "self", ".", "data", ".", "get", "(", "'node_a_name'", ",", "None", ")", "is", "None", ":", "self", ".", "data", "[", "'node_a_name'", "]", "=", "self", ".", "node_a", ".", "name", "self", ".", "data", "[", "'node_b_name'", "]", "=", "self", ".", "node_b", ".", "name", "if", "self", ".", "data", ".", "get", "(", "'node_a_slug'", ",", "None", ")", "is", "None", "or", "self", ".", "data", ".", "get", "(", "'node_b_slug'", ",", "None", ")", "is", "None", ":", "self", ".", "data", "[", "'node_a_slug'", "]", "=", "self", ".", "node_a", ".", "slug", "self", ".", "data", "[", "'node_b_slug'", "]", "=", "self", ".", "node_b", ".", "slug", "if", "self", ".", "interface_a", "and", "self", ".", "data", ".", "get", "(", "'interface_a_mac'", ",", "None", ")", "is", "None", ":", "self", ".", "data", "[", "'interface_a_mac'", "]", "=", "self", ".", "interface_a", ".", "mac", "if", "self", ".", "interface_b", "and", "self", ".", "data", ".", "get", "(", "'interface_b_mac'", ",", "None", ")", "is", "None", ":", "self", ".", "data", "[", "'interface_b_mac'", "]", "=", "self", ".", "interface_b", ".", "mac", "if", "self", ".", "data", ".", "get", "(", "'layer_slug'", ")", "!=", "self", ".", "layer", ".", "slug", ":", "self", ".", "data", "[", "'layer_slug'", "]", "=", "self", ".", "layer", ".", "slug", "super", "(", "Link", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Custom save does the following: * determine link type if not specified * automatically fill 'node_a' and 'node_b' fields if necessary * draw line between two nodes * fill shortcut properties node_a_name and node_b_name
[ "Custom", "save", "does", "the", "following", ":", "*", "determine", "link", "type", "if", "not", "specified", "*", "automatically", "fill", "node_a", "and", "node_b", "fields", "if", "necessary", "*", "draw", "line", "between", "two", "nodes", "*", "fill", "shortcut", "properties", "node_a_name", "and", "node_b_name" ]
python
train
twosigma/marbles
marbles/mixins/marbles/mixins/mixins.py
https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1707-L1731
def assertTimeZoneNotEqual(self, dt, tz, msg=None): '''Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as determined by the '!=' operator. Parameters ---------- dt : datetime tz : timezone msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. TypeError If ``tz`` is not a timezone object. ''' if not isinstance(dt, datetime): raise TypeError('First argument is not a datetime object') if not isinstance(tz, timezone): raise TypeError('Second argument is not a timezone object') self.assertNotEqual(dt.tzinfo, tz, msg=msg)
[ "def", "assertTimeZoneNotEqual", "(", "self", ",", "dt", ",", "tz", ",", "msg", "=", "None", ")", ":", "if", "not", "isinstance", "(", "dt", ",", "datetime", ")", ":", "raise", "TypeError", "(", "'First argument is not a datetime object'", ")", "if", "not", "isinstance", "(", "tz", ",", "timezone", ")", ":", "raise", "TypeError", "(", "'Second argument is not a timezone object'", ")", "self", ".", "assertNotEqual", "(", "dt", ".", "tzinfo", ",", "tz", ",", "msg", "=", "msg", ")" ]
Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as determined by the '!=' operator. Parameters ---------- dt : datetime tz : timezone msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. TypeError If ``tz`` is not a timezone object.
[ "Fail", "if", "dt", "s", "tzinfo", "attribute", "equals", "tz", "as", "determined", "by", "the", "!", "=", "operator", "." ]
python
train
gem/oq-engine
openquake/calculators/extract.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/extract.py#L742-L752
def extract_mfd(dstore, what): """ Display num_ruptures by magnitude for event based calculations. Example: http://127.0.0.1:8800/v1/calc/30/extract/event_based_mfd """ dd = collections.defaultdict(int) for rup in dstore['ruptures'].value: dd[rup['mag']] += 1 dt = numpy.dtype([('mag', float), ('freq', int)]) magfreq = numpy.array(sorted(dd.items(), key=operator.itemgetter(0)), dt) return magfreq
[ "def", "extract_mfd", "(", "dstore", ",", "what", ")", ":", "dd", "=", "collections", ".", "defaultdict", "(", "int", ")", "for", "rup", "in", "dstore", "[", "'ruptures'", "]", ".", "value", ":", "dd", "[", "rup", "[", "'mag'", "]", "]", "+=", "1", "dt", "=", "numpy", ".", "dtype", "(", "[", "(", "'mag'", ",", "float", ")", ",", "(", "'freq'", ",", "int", ")", "]", ")", "magfreq", "=", "numpy", ".", "array", "(", "sorted", "(", "dd", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "0", ")", ")", ",", "dt", ")", "return", "magfreq" ]
Display num_ruptures by magnitude for event based calculations. Example: http://127.0.0.1:8800/v1/calc/30/extract/event_based_mfd
[ "Display", "num_ruptures", "by", "magnitude", "for", "event", "based", "calculations", ".", "Example", ":", "http", ":", "//", "127", ".", "0", ".", "0", ".", "1", ":", "8800", "/", "v1", "/", "calc", "/", "30", "/", "extract", "/", "event_based_mfd" ]
python
train
tensorflow/datasets
tensorflow_datasets/core/features/image_feature.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L162-L170
def load_metadata(self, data_dir, feature_name=None): """See base class for details.""" # Restore names if defined filepath = _get_metadata_filepath(data_dir, feature_name) if tf.io.gfile.exists(filepath): with tf.io.gfile.GFile(filepath, 'r') as f: info_data = json.load(f) self.set_encoding_format(info_data['encoding_format']) self.set_shape([None if d == -1 else d for d in info_data['shape']])
[ "def", "load_metadata", "(", "self", ",", "data_dir", ",", "feature_name", "=", "None", ")", ":", "# Restore names if defined", "filepath", "=", "_get_metadata_filepath", "(", "data_dir", ",", "feature_name", ")", "if", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "filepath", ")", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "filepath", ",", "'r'", ")", "as", "f", ":", "info_data", "=", "json", ".", "load", "(", "f", ")", "self", ".", "set_encoding_format", "(", "info_data", "[", "'encoding_format'", "]", ")", "self", ".", "set_shape", "(", "[", "None", "if", "d", "==", "-", "1", "else", "d", "for", "d", "in", "info_data", "[", "'shape'", "]", "]", ")" ]
See base class for details.
[ "See", "base", "class", "for", "details", "." ]
python
train
KnowledgeLinks/rdfframework
rdfframework/rdfclass/rdfproperty.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdfproperty.py#L423-L463
def filter_prop_defs(prop_defs, hierarchy, cls_names): """ Reads through the prop_defs and returns a dictionary filtered by the current class args: prop_defs: the defintions from the rdf vocabulary defintion cls_object: the class object to tie the property cls_names: the name of the classes """ def _is_valid(test_list, valid_list): """ reads the list of classes in appliesToClass and returns whether the test_list matches args: test_list: the list of clasees to test against valid_list: list of possible matches """ for test in test_list: if test in valid_list: return True return False new_dict = {} valid_classes = [Uri('kdr_AllClasses')] + cls_names + hierarchy for def_name, value in prop_defs.items(): new_dict[def_name] = [] empty_def = [] try: for item in value: if item.get('kds_appliesToClass'): if _is_valid(item['kds_appliesToClass'], valid_classes): new_dict[def_name].append(item) else: empty_def.append(item) if not new_dict[def_name]: new_dict[def_name] = empty_def except AttributeError: new_dict[def_name] = value return new_dict
[ "def", "filter_prop_defs", "(", "prop_defs", ",", "hierarchy", ",", "cls_names", ")", ":", "def", "_is_valid", "(", "test_list", ",", "valid_list", ")", ":", "\"\"\" reads the list of classes in appliesToClass and returns whether\n the test_list matches\n\n args:\n test_list: the list of clasees to test against\n valid_list: list of possible matches\n \"\"\"", "for", "test", "in", "test_list", ":", "if", "test", "in", "valid_list", ":", "return", "True", "return", "False", "new_dict", "=", "{", "}", "valid_classes", "=", "[", "Uri", "(", "'kdr_AllClasses'", ")", "]", "+", "cls_names", "+", "hierarchy", "for", "def_name", ",", "value", "in", "prop_defs", ".", "items", "(", ")", ":", "new_dict", "[", "def_name", "]", "=", "[", "]", "empty_def", "=", "[", "]", "try", ":", "for", "item", "in", "value", ":", "if", "item", ".", "get", "(", "'kds_appliesToClass'", ")", ":", "if", "_is_valid", "(", "item", "[", "'kds_appliesToClass'", "]", ",", "valid_classes", ")", ":", "new_dict", "[", "def_name", "]", ".", "append", "(", "item", ")", "else", ":", "empty_def", ".", "append", "(", "item", ")", "if", "not", "new_dict", "[", "def_name", "]", ":", "new_dict", "[", "def_name", "]", "=", "empty_def", "except", "AttributeError", ":", "new_dict", "[", "def_name", "]", "=", "value", "return", "new_dict" ]
Reads through the prop_defs and returns a dictionary filtered by the current class args: prop_defs: the defintions from the rdf vocabulary defintion cls_object: the class object to tie the property cls_names: the name of the classes
[ "Reads", "through", "the", "prop_defs", "and", "returns", "a", "dictionary", "filtered", "by", "the", "current", "class" ]
python
train
yamins81/tabular
tabular/colors.py
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/colors.py#L13-L28
def Point2HexColor(a, lfrac, tfrac): """ Return web-safe hex triplets. """ [H,S,V] = [math.floor(360 * a), lfrac, tfrac] RGB = hsvToRGB(H, S, V) H = [hex(int(math.floor(255 * x))) for x in RGB] HEX = [a[a.find('x') + 1:] for a in H] HEX = ['0' + h if len(h) == 1 else h for h in HEX] return '#' + ''.join(HEX)
[ "def", "Point2HexColor", "(", "a", ",", "lfrac", ",", "tfrac", ")", ":", "[", "H", ",", "S", ",", "V", "]", "=", "[", "math", ".", "floor", "(", "360", "*", "a", ")", ",", "lfrac", ",", "tfrac", "]", "RGB", "=", "hsvToRGB", "(", "H", ",", "S", ",", "V", ")", "H", "=", "[", "hex", "(", "int", "(", "math", ".", "floor", "(", "255", "*", "x", ")", ")", ")", "for", "x", "in", "RGB", "]", "HEX", "=", "[", "a", "[", "a", ".", "find", "(", "'x'", ")", "+", "1", ":", "]", "for", "a", "in", "H", "]", "HEX", "=", "[", "'0'", "+", "h", "if", "len", "(", "h", ")", "==", "1", "else", "h", "for", "h", "in", "HEX", "]", "return", "'#'", "+", "''", ".", "join", "(", "HEX", ")" ]
Return web-safe hex triplets.
[ "Return", "web", "-", "safe", "hex", "triplets", "." ]
python
train
thiagopbueno/tf-rddlsim
tfrddlsim/simulation/policy_simulator.py
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L157-L161
def _tensors(cls, fluents: Sequence[FluentPair]) -> Iterable[tf.Tensor]: '''Yields the `fluents`' tensors.''' for _, fluent in fluents: tensor = cls._output_size(fluent.tensor) yield tensor
[ "def", "_tensors", "(", "cls", ",", "fluents", ":", "Sequence", "[", "FluentPair", "]", ")", "->", "Iterable", "[", "tf", ".", "Tensor", "]", ":", "for", "_", ",", "fluent", "in", "fluents", ":", "tensor", "=", "cls", ".", "_output_size", "(", "fluent", ".", "tensor", ")", "yield", "tensor" ]
Yields the `fluents`' tensors.
[ "Yields", "the", "fluents", "tensors", "." ]
python
train
frmdstryr/enamlx
enamlx/qt/qt_graphics_view.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_graphics_view.py#L698-L709
def on_selection_changed(self): """ Callback invoked one the selection has changed. """ d = self.declaration selection = self.scene.selectedItems() self._guards |= 0x01 try: d.selected_items = [item.ref().declaration for item in selection if item.ref()] finally: self._guards &= ~0x01
[ "def", "on_selection_changed", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "selection", "=", "self", ".", "scene", ".", "selectedItems", "(", ")", "self", ".", "_guards", "|=", "0x01", "try", ":", "d", ".", "selected_items", "=", "[", "item", ".", "ref", "(", ")", ".", "declaration", "for", "item", "in", "selection", "if", "item", ".", "ref", "(", ")", "]", "finally", ":", "self", ".", "_guards", "&=", "~", "0x01" ]
Callback invoked one the selection has changed.
[ "Callback", "invoked", "one", "the", "selection", "has", "changed", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_trilloam.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_trilloam.py#L12-L23
def l2traceroute_input_src_mac(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") l2traceroute = ET.Element("l2traceroute") config = l2traceroute input = ET.SubElement(l2traceroute, "input") src_mac = ET.SubElement(input, "src-mac") src_mac.text = kwargs.pop('src_mac') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "l2traceroute_input_src_mac", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "l2traceroute", "=", "ET", ".", "Element", "(", "\"l2traceroute\"", ")", "config", "=", "l2traceroute", "input", "=", "ET", ".", "SubElement", "(", "l2traceroute", ",", "\"input\"", ")", "src_mac", "=", "ET", ".", "SubElement", "(", "input", ",", "\"src-mac\"", ")", "src_mac", ".", "text", "=", "kwargs", ".", "pop", "(", "'src_mac'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
chakki-works/seqeval
seqeval/metrics/sequence_labeling.py
https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L86-L113
def start_of_chunk(prev_tag, tag, prev_type, type_): """Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean. """ chunk_start = False if tag == 'B': chunk_start = True if tag == 'S': chunk_start = True if prev_tag == 'E' and tag == 'E': chunk_start = True if prev_tag == 'E' and tag == 'I': chunk_start = True if prev_tag == 'S' and tag == 'E': chunk_start = True if prev_tag == 'S' and tag == 'I': chunk_start = True if prev_tag == 'O' and tag == 'E': chunk_start = True if prev_tag == 'O' and tag == 'I': chunk_start = True if tag != 'O' and tag != '.' and prev_type != type_: chunk_start = True return chunk_start
[ "def", "start_of_chunk", "(", "prev_tag", ",", "tag", ",", "prev_type", ",", "type_", ")", ":", "chunk_start", "=", "False", "if", "tag", "==", "'B'", ":", "chunk_start", "=", "True", "if", "tag", "==", "'S'", ":", "chunk_start", "=", "True", "if", "prev_tag", "==", "'E'", "and", "tag", "==", "'E'", ":", "chunk_start", "=", "True", "if", "prev_tag", "==", "'E'", "and", "tag", "==", "'I'", ":", "chunk_start", "=", "True", "if", "prev_tag", "==", "'S'", "and", "tag", "==", "'E'", ":", "chunk_start", "=", "True", "if", "prev_tag", "==", "'S'", "and", "tag", "==", "'I'", ":", "chunk_start", "=", "True", "if", "prev_tag", "==", "'O'", "and", "tag", "==", "'E'", ":", "chunk_start", "=", "True", "if", "prev_tag", "==", "'O'", "and", "tag", "==", "'I'", ":", "chunk_start", "=", "True", "if", "tag", "!=", "'O'", "and", "tag", "!=", "'.'", "and", "prev_type", "!=", "type_", ":", "chunk_start", "=", "True", "return", "chunk_start" ]
Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean.
[ "Checks", "if", "a", "chunk", "started", "between", "the", "previous", "and", "current", "word", "." ]
python
train
prompt-toolkit/pymux
pymux/options.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/options.py#L100-L112
def set_value(self, pymux, value): """ Take a string, and return an integer. Raise SetOptionError when the given text does not parse to a positive integer. """ try: value = int(value) if value < 0: raise ValueError except ValueError: raise SetOptionError('Expecting an integer.') else: setattr(pymux, self.attribute_name, value)
[ "def", "set_value", "(", "self", ",", "pymux", ",", "value", ")", ":", "try", ":", "value", "=", "int", "(", "value", ")", "if", "value", "<", "0", ":", "raise", "ValueError", "except", "ValueError", ":", "raise", "SetOptionError", "(", "'Expecting an integer.'", ")", "else", ":", "setattr", "(", "pymux", ",", "self", ".", "attribute_name", ",", "value", ")" ]
Take a string, and return an integer. Raise SetOptionError when the given text does not parse to a positive integer.
[ "Take", "a", "string", "and", "return", "an", "integer", ".", "Raise", "SetOptionError", "when", "the", "given", "text", "does", "not", "parse", "to", "a", "positive", "integer", "." ]
python
train
twisted/mantissa
xmantissa/liveform.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/liveform.py#L1315-L1321
def repeater(self, req, tag): """ Render some UI for repeating our form. """ repeater = inevow.IQ(self.docFactory).onePattern('repeater') return repeater.fillSlots( 'object-description', self.parameter.modelObjectDescription)
[ "def", "repeater", "(", "self", ",", "req", ",", "tag", ")", ":", "repeater", "=", "inevow", ".", "IQ", "(", "self", ".", "docFactory", ")", ".", "onePattern", "(", "'repeater'", ")", "return", "repeater", ".", "fillSlots", "(", "'object-description'", ",", "self", ".", "parameter", ".", "modelObjectDescription", ")" ]
Render some UI for repeating our form.
[ "Render", "some", "UI", "for", "repeating", "our", "form", "." ]
python
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/ItemClasses/Note.py
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/ItemClasses/Note.py#L246-L267
def GetNotation(self, id, type): ''' method which searches for notation from <type> list at position <id> :param id: the number to look for - i.e if you're looking for the first one in wrap notation, id will be 0 :param type: post, pre or wrap :return: the notation class searched for or none ''' if type == "post": if (id == - 1 and len(self.postnotation) > 0) or (id != - 1 and len(self.postnotation) > id): return self.postnotation[id] if type == "pre": if (id == - 1 and len(self.prenotation) > 0) or (id != - 1 and len(self.postnotation) > id): return self.prenotation[id] if type == "wrap": if (id == - 1 and len(self.wrap_notation) > 0) or (id != - 1 and len(self.postnotation) > id): return self.wrap_notation[id]
[ "def", "GetNotation", "(", "self", ",", "id", ",", "type", ")", ":", "if", "type", "==", "\"post\"", ":", "if", "(", "id", "==", "-", "1", "and", "len", "(", "self", ".", "postnotation", ")", ">", "0", ")", "or", "(", "id", "!=", "-", "1", "and", "len", "(", "self", ".", "postnotation", ")", ">", "id", ")", ":", "return", "self", ".", "postnotation", "[", "id", "]", "if", "type", "==", "\"pre\"", ":", "if", "(", "id", "==", "-", "1", "and", "len", "(", "self", ".", "prenotation", ")", ">", "0", ")", "or", "(", "id", "!=", "-", "1", "and", "len", "(", "self", ".", "postnotation", ")", ">", "id", ")", ":", "return", "self", ".", "prenotation", "[", "id", "]", "if", "type", "==", "\"wrap\"", ":", "if", "(", "id", "==", "-", "1", "and", "len", "(", "self", ".", "wrap_notation", ")", ">", "0", ")", "or", "(", "id", "!=", "-", "1", "and", "len", "(", "self", ".", "postnotation", ")", ">", "id", ")", ":", "return", "self", ".", "wrap_notation", "[", "id", "]" ]
method which searches for notation from <type> list at position <id> :param id: the number to look for - i.e if you're looking for the first one in wrap notation, id will be 0 :param type: post, pre or wrap :return: the notation class searched for or none
[ "method", "which", "searches", "for", "notation", "from", "<type", ">", "list", "at", "position", "<id", ">", ":", "param", "id", ":", "the", "number", "to", "look", "for", "-", "i", ".", "e", "if", "you", "re", "looking", "for", "the", "first", "one", "in", "wrap", "notation", "id", "will", "be", "0", ":", "param", "type", ":", "post", "pre", "or", "wrap", ":", "return", ":", "the", "notation", "class", "searched", "for", "or", "none" ]
python
train
jashort/SmartFileSorter
smartfilesorter/ruleset.py
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L67-L82
def add_rule(self, config_name, value, plugins, destination): """ Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return: """ if config_name in plugins: rule = plugins[config_name](value) destination.append(rule) else: self.logger.error("Plugin with config_name {0} not found".format(config_name)) raise IndexError("Plugin with config_name {0} not found".format(config_name))
[ "def", "add_rule", "(", "self", ",", "config_name", ",", "value", ",", "plugins", ",", "destination", ")", ":", "if", "config_name", "in", "plugins", ":", "rule", "=", "plugins", "[", "config_name", "]", "(", "value", ")", "destination", ".", "append", "(", "rule", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "\"Plugin with config_name {0} not found\"", ".", "format", "(", "config_name", ")", ")", "raise", "IndexError", "(", "\"Plugin with config_name {0} not found\"", ".", "format", "(", "config_name", ")", ")" ]
Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return:
[ "Adds", "a", "rule", ".", "Use", "add_action_rule", "or", "add_match_rule", "instead", ":", "param", "rule_wrapper", ":", "Rule", "wrapper", "class", "(", "ActionRule", "or", "MatchRule", ")", ":", "param", "config_name", ":", "config_name", "of", "the", "plugin", "to", "add", ":", "param", "value", ":", "configuration", "information", "for", "the", "rule", ":", "param", "plugins", ":", "list", "of", "all", "available", "plugins", ":", "param", "destination", ":", "list", "to", "append", "plugin", "to", "(", "self", ".", "action_rules", "or", "self", ".", "match_rules", ")", ":", "return", ":" ]
python
train
stlehmann/pyads
pyads/pyads_ex.py
https://github.com/stlehmann/pyads/blob/44bd84394db2785332ac44b2948373916bea0f02/pyads/pyads_ex.py#L296-L335
def adsSyncWriteControlReqEx( port, address, ads_state, device_state, data, plc_data_type ): # type: (int, AmsAddr, int, int, Any, Type) -> None """Change the ADS state and the machine-state of the ADS-server. :param int port: local AMS port as returned by adsPortOpenEx() :param pyads.structs.AmsAddr adr: local or remote AmsAddr :param int ads_state: new ADS-state, according to ADSTATE constants :param int device_state: new machine-state :param data: additional data :param int plc_data_type: plc datatype, according to PLCTYPE constants """ sync_write_control_request = _adsDLL.AdsSyncWriteControlReqEx ams_address_pointer = ctypes.pointer(address.amsAddrStruct()) ads_state_c = ctypes.c_ulong(ads_state) device_state_c = ctypes.c_ulong(device_state) if plc_data_type == PLCTYPE_STRING: data = ctypes.c_char_p(data.encode("utf-8")) data_pointer = data data_length = len(data_pointer.value) + 1 else: data = plc_data_type(data) data_pointer = ctypes.pointer(data) data_length = ctypes.sizeof(data) error_code = sync_write_control_request( port, ams_address_pointer, ads_state_c, device_state_c, data_length, data_pointer, ) if error_code: raise ADSError(error_code)
[ "def", "adsSyncWriteControlReqEx", "(", "port", ",", "address", ",", "ads_state", ",", "device_state", ",", "data", ",", "plc_data_type", ")", ":", "# type: (int, AmsAddr, int, int, Any, Type) -> None", "sync_write_control_request", "=", "_adsDLL", ".", "AdsSyncWriteControlReqEx", "ams_address_pointer", "=", "ctypes", ".", "pointer", "(", "address", ".", "amsAddrStruct", "(", ")", ")", "ads_state_c", "=", "ctypes", ".", "c_ulong", "(", "ads_state", ")", "device_state_c", "=", "ctypes", ".", "c_ulong", "(", "device_state", ")", "if", "plc_data_type", "==", "PLCTYPE_STRING", ":", "data", "=", "ctypes", ".", "c_char_p", "(", "data", ".", "encode", "(", "\"utf-8\"", ")", ")", "data_pointer", "=", "data", "data_length", "=", "len", "(", "data_pointer", ".", "value", ")", "+", "1", "else", ":", "data", "=", "plc_data_type", "(", "data", ")", "data_pointer", "=", "ctypes", ".", "pointer", "(", "data", ")", "data_length", "=", "ctypes", ".", "sizeof", "(", "data", ")", "error_code", "=", "sync_write_control_request", "(", "port", ",", "ams_address_pointer", ",", "ads_state_c", ",", "device_state_c", ",", "data_length", ",", "data_pointer", ",", ")", "if", "error_code", ":", "raise", "ADSError", "(", "error_code", ")" ]
Change the ADS state and the machine-state of the ADS-server. :param int port: local AMS port as returned by adsPortOpenEx() :param pyads.structs.AmsAddr adr: local or remote AmsAddr :param int ads_state: new ADS-state, according to ADSTATE constants :param int device_state: new machine-state :param data: additional data :param int plc_data_type: plc datatype, according to PLCTYPE constants
[ "Change", "the", "ADS", "state", "and", "the", "machine", "-", "state", "of", "the", "ADS", "-", "server", "." ]
python
valid
undertheseanlp/underthesea
underthesea/corpus/ws.py
https://github.com/undertheseanlp/underthesea/blob/3663427da65e2b449e9135e3812edecb938b2319/underthesea/corpus/ws.py#L73-L88
def save(self, folder, format): """save wscorpus to files :param str folder: path to directory :type folder: string :param str format: either TEXT or COLUMN :type format: str """ try: mkdir(folder) except Exception: pass for document in self.documents: f = join(folder, document.id) content = u"\n".join(document.sentences) write(f, content)
[ "def", "save", "(", "self", ",", "folder", ",", "format", ")", ":", "try", ":", "mkdir", "(", "folder", ")", "except", "Exception", ":", "pass", "for", "document", "in", "self", ".", "documents", ":", "f", "=", "join", "(", "folder", ",", "document", ".", "id", ")", "content", "=", "u\"\\n\"", ".", "join", "(", "document", ".", "sentences", ")", "write", "(", "f", ",", "content", ")" ]
save wscorpus to files :param str folder: path to directory :type folder: string :param str format: either TEXT or COLUMN :type format: str
[ "save", "wscorpus", "to", "files" ]
python
train
Alir3z4/django-databrowse
django_databrowse/sites.py
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L119-L136
def register(self, *model_list, **options): """ Registers the given model(s) with the given databrowse site. The model(s) should be Model classes, not instances. If a databrowse class isn't given, it will use DefaultModelDatabrowse (the default databrowse options). If a model is already registered, this will raise AlreadyRegistered. """ databrowse_class = options.pop('databrowse_class', DefaultModelDatabrowse) for model in model_list: if model in self.registry: raise AlreadyRegistered('The model %s is already registered' % model.__name__) self.registry[model] = databrowse_class
[ "def", "register", "(", "self", ",", "*", "model_list", ",", "*", "*", "options", ")", ":", "databrowse_class", "=", "options", ".", "pop", "(", "'databrowse_class'", ",", "DefaultModelDatabrowse", ")", "for", "model", "in", "model_list", ":", "if", "model", "in", "self", ".", "registry", ":", "raise", "AlreadyRegistered", "(", "'The model %s is already registered'", "%", "model", ".", "__name__", ")", "self", ".", "registry", "[", "model", "]", "=", "databrowse_class" ]
Registers the given model(s) with the given databrowse site. The model(s) should be Model classes, not instances. If a databrowse class isn't given, it will use DefaultModelDatabrowse (the default databrowse options). If a model is already registered, this will raise AlreadyRegistered.
[ "Registers", "the", "given", "model", "(", "s", ")", "with", "the", "given", "databrowse", "site", "." ]
python
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L1321-L1336
def get_for_site(cls, handle, site, fall_back_to_default_site_menus=False): """Return a FlatMenu instance with a matching ``handle`` for the provided ``site``, or for the default site (if suitable). If no match is found, returns None.""" queryset = cls.objects.filter(handle__exact=handle) site_q = Q(site=site) if fall_back_to_default_site_menus: site_q |= Q(site__is_default_site=True) queryset = queryset.filter(site_q) # return the best match or None return queryset.annotate(matched_provided_site=Case( When(site_id=site.id, then=1), default=0, output_field=BooleanField() )).order_by('-matched_provided_site').first()
[ "def", "get_for_site", "(", "cls", ",", "handle", ",", "site", ",", "fall_back_to_default_site_menus", "=", "False", ")", ":", "queryset", "=", "cls", ".", "objects", ".", "filter", "(", "handle__exact", "=", "handle", ")", "site_q", "=", "Q", "(", "site", "=", "site", ")", "if", "fall_back_to_default_site_menus", ":", "site_q", "|=", "Q", "(", "site__is_default_site", "=", "True", ")", "queryset", "=", "queryset", ".", "filter", "(", "site_q", ")", "# return the best match or None", "return", "queryset", ".", "annotate", "(", "matched_provided_site", "=", "Case", "(", "When", "(", "site_id", "=", "site", ".", "id", ",", "then", "=", "1", ")", ",", "default", "=", "0", ",", "output_field", "=", "BooleanField", "(", ")", ")", ")", ".", "order_by", "(", "'-matched_provided_site'", ")", ".", "first", "(", ")" ]
Return a FlatMenu instance with a matching ``handle`` for the provided ``site``, or for the default site (if suitable). If no match is found, returns None.
[ "Return", "a", "FlatMenu", "instance", "with", "a", "matching", "handle", "for", "the", "provided", "site", "or", "for", "the", "default", "site", "(", "if", "suitable", ")", ".", "If", "no", "match", "is", "found", "returns", "None", "." ]
python
train
fastai/fastai
fastai/vision/data.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/data.py#L149-L157
def from_name_re(cls, path:PathOrStr, fnames:FilePathList, pat:str, valid_pct:float=0.2, **kwargs): "Create from list of `fnames` in `path` with re expression `pat`." pat = re.compile(pat) def _get_label(fn): if isinstance(fn, Path): fn = fn.as_posix() res = pat.search(str(fn)) assert res,f'Failed to find "{pat}" in "{fn}"' return res.group(1) return cls.from_name_func(path, fnames, _get_label, valid_pct=valid_pct, **kwargs)
[ "def", "from_name_re", "(", "cls", ",", "path", ":", "PathOrStr", ",", "fnames", ":", "FilePathList", ",", "pat", ":", "str", ",", "valid_pct", ":", "float", "=", "0.2", ",", "*", "*", "kwargs", ")", ":", "pat", "=", "re", ".", "compile", "(", "pat", ")", "def", "_get_label", "(", "fn", ")", ":", "if", "isinstance", "(", "fn", ",", "Path", ")", ":", "fn", "=", "fn", ".", "as_posix", "(", ")", "res", "=", "pat", ".", "search", "(", "str", "(", "fn", ")", ")", "assert", "res", ",", "f'Failed to find \"{pat}\" in \"{fn}\"'", "return", "res", ".", "group", "(", "1", ")", "return", "cls", ".", "from_name_func", "(", "path", ",", "fnames", ",", "_get_label", ",", "valid_pct", "=", "valid_pct", ",", "*", "*", "kwargs", ")" ]
Create from list of `fnames` in `path` with re expression `pat`.
[ "Create", "from", "list", "of", "fnames", "in", "path", "with", "re", "expression", "pat", "." ]
python
train
edx/edx-enterprise
enterprise/views.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/views.py#L776-L805
def get_available_course_modes(self, request, course_run_id, enterprise_catalog): """ Return the available course modes for the course run. The provided EnterpriseCustomerCatalog is used to filter and order the course modes returned using the EnterpriseCustomerCatalog's field "enabled_course_modes". """ modes = EnrollmentApiClient().get_course_modes(course_run_id) if not modes: LOGGER.warning('Unable to get course modes for course run id {course_run_id}.'.format( course_run_id=course_run_id )) messages.add_generic_info_message_for_error(request) if enterprise_catalog: # filter and order course modes according to the enterprise catalog modes = [mode for mode in modes if mode['slug'] in enterprise_catalog.enabled_course_modes] modes.sort(key=lambda course_mode: enterprise_catalog.enabled_course_modes.index(course_mode['slug'])) if not modes: LOGGER.info( 'No matching course modes found for course run {course_run_id} in ' 'EnterpriseCustomerCatalog [{enterprise_catalog_uuid}]'.format( course_run_id=course_run_id, enterprise_catalog_uuid=enterprise_catalog, ) ) messages.add_generic_info_message_for_error(request) return modes
[ "def", "get_available_course_modes", "(", "self", ",", "request", ",", "course_run_id", ",", "enterprise_catalog", ")", ":", "modes", "=", "EnrollmentApiClient", "(", ")", ".", "get_course_modes", "(", "course_run_id", ")", "if", "not", "modes", ":", "LOGGER", ".", "warning", "(", "'Unable to get course modes for course run id {course_run_id}.'", ".", "format", "(", "course_run_id", "=", "course_run_id", ")", ")", "messages", ".", "add_generic_info_message_for_error", "(", "request", ")", "if", "enterprise_catalog", ":", "# filter and order course modes according to the enterprise catalog", "modes", "=", "[", "mode", "for", "mode", "in", "modes", "if", "mode", "[", "'slug'", "]", "in", "enterprise_catalog", ".", "enabled_course_modes", "]", "modes", ".", "sort", "(", "key", "=", "lambda", "course_mode", ":", "enterprise_catalog", ".", "enabled_course_modes", ".", "index", "(", "course_mode", "[", "'slug'", "]", ")", ")", "if", "not", "modes", ":", "LOGGER", ".", "info", "(", "'No matching course modes found for course run {course_run_id} in '", "'EnterpriseCustomerCatalog [{enterprise_catalog_uuid}]'", ".", "format", "(", "course_run_id", "=", "course_run_id", ",", "enterprise_catalog_uuid", "=", "enterprise_catalog", ",", ")", ")", "messages", ".", "add_generic_info_message_for_error", "(", "request", ")", "return", "modes" ]
Return the available course modes for the course run. The provided EnterpriseCustomerCatalog is used to filter and order the course modes returned using the EnterpriseCustomerCatalog's field "enabled_course_modes".
[ "Return", "the", "available", "course", "modes", "for", "the", "course", "run", "." ]
python
valid
GNS3/gns3-server
gns3server/controller/topology.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/topology.py#L169-L194
def _convert_2_0_0(topo, topo_path): """ Convert topologies from GNS3 2.0.0 to 2.1 Changes: * Remove startup_script_path from VPCS and base config file for IOU and Dynamips """ topo["revision"] = 8 for node in topo.get("topology", {}).get("nodes", []): if "properties" in node: if node["node_type"] == "vpcs": if "startup_script_path" in node["properties"]: del node["properties"]["startup_script_path"] if "startup_script" in node["properties"]: del node["properties"]["startup_script"] elif node["node_type"] == "dynamips" or node["node_type"] == "iou": if "startup_config" in node["properties"]: del node["properties"]["startup_config"] if "private_config" in node["properties"]: del node["properties"]["private_config"] if "startup_config_content" in node["properties"]: del node["properties"]["startup_config_content"] if "private_config_content" in node["properties"]: del node["properties"]["private_config_content"] return topo
[ "def", "_convert_2_0_0", "(", "topo", ",", "topo_path", ")", ":", "topo", "[", "\"revision\"", "]", "=", "8", "for", "node", "in", "topo", ".", "get", "(", "\"topology\"", ",", "{", "}", ")", ".", "get", "(", "\"nodes\"", ",", "[", "]", ")", ":", "if", "\"properties\"", "in", "node", ":", "if", "node", "[", "\"node_type\"", "]", "==", "\"vpcs\"", ":", "if", "\"startup_script_path\"", "in", "node", "[", "\"properties\"", "]", ":", "del", "node", "[", "\"properties\"", "]", "[", "\"startup_script_path\"", "]", "if", "\"startup_script\"", "in", "node", "[", "\"properties\"", "]", ":", "del", "node", "[", "\"properties\"", "]", "[", "\"startup_script\"", "]", "elif", "node", "[", "\"node_type\"", "]", "==", "\"dynamips\"", "or", "node", "[", "\"node_type\"", "]", "==", "\"iou\"", ":", "if", "\"startup_config\"", "in", "node", "[", "\"properties\"", "]", ":", "del", "node", "[", "\"properties\"", "]", "[", "\"startup_config\"", "]", "if", "\"private_config\"", "in", "node", "[", "\"properties\"", "]", ":", "del", "node", "[", "\"properties\"", "]", "[", "\"private_config\"", "]", "if", "\"startup_config_content\"", "in", "node", "[", "\"properties\"", "]", ":", "del", "node", "[", "\"properties\"", "]", "[", "\"startup_config_content\"", "]", "if", "\"private_config_content\"", "in", "node", "[", "\"properties\"", "]", ":", "del", "node", "[", "\"properties\"", "]", "[", "\"private_config_content\"", "]", "return", "topo" ]
Convert topologies from GNS3 2.0.0 to 2.1 Changes: * Remove startup_script_path from VPCS and base config file for IOU and Dynamips
[ "Convert", "topologies", "from", "GNS3", "2", ".", "0", ".", "0", "to", "2", ".", "1" ]
python
train
TeamHG-Memex/eli5
eli5/sklearn_crfsuite/explain_weights.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn_crfsuite/explain_weights.py#L115-L127
def sorted_for_ner(crf_classes): """ Return labels sorted in a default order suitable for NER tasks: >>> sorted_for_ner(['B-ORG', 'B-PER', 'O', 'I-PER']) ['O', 'B-ORG', 'B-PER', 'I-PER'] """ def key(cls): if len(cls) > 2 and cls[1] == '-': # group names like B-ORG and I-ORG together return cls.split('-', 1)[1], cls return '', cls return sorted(crf_classes, key=key)
[ "def", "sorted_for_ner", "(", "crf_classes", ")", ":", "def", "key", "(", "cls", ")", ":", "if", "len", "(", "cls", ")", ">", "2", "and", "cls", "[", "1", "]", "==", "'-'", ":", "# group names like B-ORG and I-ORG together", "return", "cls", ".", "split", "(", "'-'", ",", "1", ")", "[", "1", "]", ",", "cls", "return", "''", ",", "cls", "return", "sorted", "(", "crf_classes", ",", "key", "=", "key", ")" ]
Return labels sorted in a default order suitable for NER tasks: >>> sorted_for_ner(['B-ORG', 'B-PER', 'O', 'I-PER']) ['O', 'B-ORG', 'B-PER', 'I-PER']
[ "Return", "labels", "sorted", "in", "a", "default", "order", "suitable", "for", "NER", "tasks", ":" ]
python
train
materialsproject/pymatgen
pymatgen/io/babel.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/babel.py#L100-L111
def localopt(self, forcefield='mmff94', steps=500): """ A wrapper to pybel's localopt method to optimize a Molecule. Args: forcefield: Default is mmff94. Options are 'gaff', 'ghemical', 'mmff94', 'mmff94s', and 'uff'. steps: Default is 500. """ pbmol = pb.Molecule(self._obmol) pbmol.localopt(forcefield=forcefield, steps=steps) self._obmol = pbmol.OBMol
[ "def", "localopt", "(", "self", ",", "forcefield", "=", "'mmff94'", ",", "steps", "=", "500", ")", ":", "pbmol", "=", "pb", ".", "Molecule", "(", "self", ".", "_obmol", ")", "pbmol", ".", "localopt", "(", "forcefield", "=", "forcefield", ",", "steps", "=", "steps", ")", "self", ".", "_obmol", "=", "pbmol", ".", "OBMol" ]
A wrapper to pybel's localopt method to optimize a Molecule. Args: forcefield: Default is mmff94. Options are 'gaff', 'ghemical', 'mmff94', 'mmff94s', and 'uff'. steps: Default is 500.
[ "A", "wrapper", "to", "pybel", "s", "localopt", "method", "to", "optimize", "a", "Molecule", "." ]
python
train
gbowerman/azurerm
azurerm/amsrp.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/amsrp.py#L61-L78
def delete_media_service_rg(access_token, subscription_id, rgname, msname): '''Delete a media service. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. msname (str): Media service name. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '/providers/microsoft.media/mediaservices/', msname, '?api-version=', MEDIA_API]) return do_delete(endpoint, access_token)
[ "def", "delete_media_service_rg", "(", "access_token", ",", "subscription_id", ",", "rgname", ",", "msname", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "rgname", ",", "'/providers/microsoft.media/mediaservices/'", ",", "msname", ",", "'?api-version='", ",", "MEDIA_API", "]", ")", "return", "do_delete", "(", "endpoint", ",", "access_token", ")" ]
Delete a media service. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. msname (str): Media service name. Returns: HTTP response.
[ "Delete", "a", "media", "service", "." ]
python
train
robotools/fontParts
Lib/fontParts/base/normalizers.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/normalizers.py#L366-L376
def normalizeGlyphRightMargin(value): """ Normalizes glyph right margin. * **value** must be a :ref:`type-int-float` or `None`. * Returned value is the same type as the input value. """ if not isinstance(value, (int, float)) and value is not None: raise TypeError("Glyph right margin must be an :ref:`type-int-float`, " "not %s." % type(value).__name__) return value
[ "def", "normalizeGlyphRightMargin", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "int", ",", "float", ")", ")", "and", "value", "is", "not", "None", ":", "raise", "TypeError", "(", "\"Glyph right margin must be an :ref:`type-int-float`, \"", "\"not %s.\"", "%", "type", "(", "value", ")", ".", "__name__", ")", "return", "value" ]
Normalizes glyph right margin. * **value** must be a :ref:`type-int-float` or `None`. * Returned value is the same type as the input value.
[ "Normalizes", "glyph", "right", "margin", "." ]
python
train
wheeler-microfluidics/dmf-control-board-firmware
dmf_control_board_firmware/__init__.py
https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L470-L533
def Z_device(self, filter_order=None, window_size=None, tol=0.05): ''' Compute the impedance *(including resistive and capacitive load)* of the DMF device *(i.e., dielectric and droplet)*. See :func:`calibrate.compute_from_transfer_function` for details. ''' ind = mlab.find(self.fb_resistor >= 0) Z1 = np.empty(self.fb_resistor.shape) Z1.fill(np.nan) # convert to masked array Z1 = np.ma.masked_invalid(Z1) R2 = self.calibration.R_fb[self.fb_resistor[ind]] C2 = self.calibration.C_fb[self.fb_resistor[ind]] Z1[ind] = compute_from_transfer_function(self.calibration.hw_version .major, 'Z1', V1=self.V_total()[ind], V2=self.V_fb[ind], R2=R2, C2=C2, f=self.frequency) Z1 = np.ma.masked_invalid(pd.Series(Z1, pd.to_datetime(self.time, unit='s') ).interpolate(method='time').values) Z1.fill_value = np.nan Z1.data[Z1.mask] = Z1.fill_value # if we're filtering and we don't have a window size specified, # automatically determine one if filter_order and window_size is None: window_size = self._get_window_size(tol) # if the filter_order or window size is None or if the window size is # smaller than filter_order + 2, don't filter if (filter_order is None or window_size is None or window_size < filter_order + 2): pass else: # if the window size is less than half the sample length if window_size and window_size < len(Z1) / 2: # suppress polyfit warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") Z1 = savgol_filter(Z1, window_size, filter_order) else: # fit a line result = self.mean_velocity(tol=tol) if result['dt'] and \ result['dt'] > 0.1 * self.time[-1] and result['p'][0] > 0: if self.calibration._c_drop: c_drop = self.calibration.c_drop(self.frequency) else: c_drop = self.capacitance()[-1] / self.area if self.calibration._c_filler: c_filler = self.calibration.c_filler(self.frequency) else: c_filler = 0 x = result['p'][0]*self.time + result['p'][1] C = self.area * (x * (c_drop - c_filler) / \ np.sqrt(self.area) + c_filler) Z1 = 1.0 / (2.0 * math.pi * self.frequency * C) Z1[mlab.find(self.time==result['t_end'])[0]+1:] = \ Z1[mlab.find(self.time==result['t_end'])[0]] else: Z1 = np.mean(Z1)*np.ones(Z1.shape) return Z1
[ "def", "Z_device", "(", "self", ",", "filter_order", "=", "None", ",", "window_size", "=", "None", ",", "tol", "=", "0.05", ")", ":", "ind", "=", "mlab", ".", "find", "(", "self", ".", "fb_resistor", ">=", "0", ")", "Z1", "=", "np", ".", "empty", "(", "self", ".", "fb_resistor", ".", "shape", ")", "Z1", ".", "fill", "(", "np", ".", "nan", ")", "# convert to masked array", "Z1", "=", "np", ".", "ma", ".", "masked_invalid", "(", "Z1", ")", "R2", "=", "self", ".", "calibration", ".", "R_fb", "[", "self", ".", "fb_resistor", "[", "ind", "]", "]", "C2", "=", "self", ".", "calibration", ".", "C_fb", "[", "self", ".", "fb_resistor", "[", "ind", "]", "]", "Z1", "[", "ind", "]", "=", "compute_from_transfer_function", "(", "self", ".", "calibration", ".", "hw_version", ".", "major", ",", "'Z1'", ",", "V1", "=", "self", ".", "V_total", "(", ")", "[", "ind", "]", ",", "V2", "=", "self", ".", "V_fb", "[", "ind", "]", ",", "R2", "=", "R2", ",", "C2", "=", "C2", ",", "f", "=", "self", ".", "frequency", ")", "Z1", "=", "np", ".", "ma", ".", "masked_invalid", "(", "pd", ".", "Series", "(", "Z1", ",", "pd", ".", "to_datetime", "(", "self", ".", "time", ",", "unit", "=", "'s'", ")", ")", ".", "interpolate", "(", "method", "=", "'time'", ")", ".", "values", ")", "Z1", ".", "fill_value", "=", "np", ".", "nan", "Z1", ".", "data", "[", "Z1", ".", "mask", "]", "=", "Z1", ".", "fill_value", "# if we're filtering and we don't have a window size specified,", "# automatically determine one", "if", "filter_order", "and", "window_size", "is", "None", ":", "window_size", "=", "self", ".", "_get_window_size", "(", "tol", ")", "# if the filter_order or window size is None or if the window size is", "# smaller than filter_order + 2, don't filter", "if", "(", "filter_order", "is", "None", "or", "window_size", "is", "None", "or", "window_size", "<", "filter_order", "+", "2", ")", ":", "pass", "else", ":", "# if the window size is less than half the sample length", "if", "window_size", "and", "window_size", "<", "len", "(", "Z1", ")", "/", "2", ":", "# suppress polyfit warnings", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "Z1", "=", "savgol_filter", "(", "Z1", ",", "window_size", ",", "filter_order", ")", "else", ":", "# fit a line", "result", "=", "self", ".", "mean_velocity", "(", "tol", "=", "tol", ")", "if", "result", "[", "'dt'", "]", "and", "result", "[", "'dt'", "]", ">", "0.1", "*", "self", ".", "time", "[", "-", "1", "]", "and", "result", "[", "'p'", "]", "[", "0", "]", ">", "0", ":", "if", "self", ".", "calibration", ".", "_c_drop", ":", "c_drop", "=", "self", ".", "calibration", ".", "c_drop", "(", "self", ".", "frequency", ")", "else", ":", "c_drop", "=", "self", ".", "capacitance", "(", ")", "[", "-", "1", "]", "/", "self", ".", "area", "if", "self", ".", "calibration", ".", "_c_filler", ":", "c_filler", "=", "self", ".", "calibration", ".", "c_filler", "(", "self", ".", "frequency", ")", "else", ":", "c_filler", "=", "0", "x", "=", "result", "[", "'p'", "]", "[", "0", "]", "*", "self", ".", "time", "+", "result", "[", "'p'", "]", "[", "1", "]", "C", "=", "self", ".", "area", "*", "(", "x", "*", "(", "c_drop", "-", "c_filler", ")", "/", "np", ".", "sqrt", "(", "self", ".", "area", ")", "+", "c_filler", ")", "Z1", "=", "1.0", "/", "(", "2.0", "*", "math", ".", "pi", "*", "self", ".", "frequency", "*", "C", ")", "Z1", "[", "mlab", ".", "find", "(", "self", ".", "time", "==", "result", "[", "'t_end'", "]", ")", "[", "0", "]", "+", "1", ":", "]", "=", "Z1", "[", "mlab", ".", "find", "(", "self", ".", "time", "==", "result", "[", "'t_end'", "]", ")", "[", "0", "]", "]", "else", ":", "Z1", "=", "np", ".", "mean", "(", "Z1", ")", "*", "np", ".", "ones", "(", "Z1", ".", "shape", ")", "return", "Z1" ]
Compute the impedance *(including resistive and capacitive load)* of the DMF device *(i.e., dielectric and droplet)*. See :func:`calibrate.compute_from_transfer_function` for details.
[ "Compute", "the", "impedance", "*", "(", "including", "resistive", "and", "capacitive", "load", ")", "*", "of", "the", "DMF", "device", "*", "(", "i", ".", "e", ".", "dielectric", "and", "droplet", ")", "*", "." ]
python
train
sosreport/sos
sos/plugins/__init__.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/plugins/__init__.py#L680-L693
def add_forbidden_path(self, forbidden): """Specify a path, or list of paths, to not copy, even if it's part of a copy_specs[] entry. """ if isinstance(forbidden, six.string_types): forbidden = [forbidden] if self.use_sysroot(): forbidden = [self.join_sysroot(f) for f in forbidden] for forbid in forbidden: self._log_info("adding forbidden path '%s'" % forbid) for path in glob.glob(forbid): self.forbidden_paths.append(path)
[ "def", "add_forbidden_path", "(", "self", ",", "forbidden", ")", ":", "if", "isinstance", "(", "forbidden", ",", "six", ".", "string_types", ")", ":", "forbidden", "=", "[", "forbidden", "]", "if", "self", ".", "use_sysroot", "(", ")", ":", "forbidden", "=", "[", "self", ".", "join_sysroot", "(", "f", ")", "for", "f", "in", "forbidden", "]", "for", "forbid", "in", "forbidden", ":", "self", ".", "_log_info", "(", "\"adding forbidden path '%s'\"", "%", "forbid", ")", "for", "path", "in", "glob", ".", "glob", "(", "forbid", ")", ":", "self", ".", "forbidden_paths", ".", "append", "(", "path", ")" ]
Specify a path, or list of paths, to not copy, even if it's part of a copy_specs[] entry.
[ "Specify", "a", "path", "or", "list", "of", "paths", "to", "not", "copy", "even", "if", "it", "s", "part", "of", "a", "copy_specs", "[]", "entry", "." ]
python
train
fred49/linshare-api
linshareapi/user/threadentries.py
https://github.com/fred49/linshare-api/blob/be646c25aa8ba3718abb6869c620b157d53d6e41/linshareapi/user/threadentries.py#L321-L333
def update(self, data): """ Update meta of one document.""" self.debug(data) self._check(data) wg_uuid = data.get('workGroup') self.log.debug("wg_uuid : %s ", wg_uuid) uuid = data.get('uuid') url = "%(base)s/%(wg_uuid)s/nodes/%(uuid)s" % { 'base': self.local_base_url, 'wg_uuid': wg_uuid, 'uuid': uuid } return self.core.update(url, data)
[ "def", "update", "(", "self", ",", "data", ")", ":", "self", ".", "debug", "(", "data", ")", "self", ".", "_check", "(", "data", ")", "wg_uuid", "=", "data", ".", "get", "(", "'workGroup'", ")", "self", ".", "log", ".", "debug", "(", "\"wg_uuid : %s \"", ",", "wg_uuid", ")", "uuid", "=", "data", ".", "get", "(", "'uuid'", ")", "url", "=", "\"%(base)s/%(wg_uuid)s/nodes/%(uuid)s\"", "%", "{", "'base'", ":", "self", ".", "local_base_url", ",", "'wg_uuid'", ":", "wg_uuid", ",", "'uuid'", ":", "uuid", "}", "return", "self", ".", "core", ".", "update", "(", "url", ",", "data", ")" ]
Update meta of one document.
[ "Update", "meta", "of", "one", "document", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/graph/graph_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/graph/graph_client.py#L338-L355
def create_user(self, creation_context, group_descriptors=None): """CreateUser. [Preview API] Materialize an existing AAD or MSA user into the VSTS account. :param :class:`<GraphUserCreationContext> <azure.devops.v5_1.graph.models.GraphUserCreationContext>` creation_context: The subset of the full graph user used to uniquely find the graph subject in an external provider. :param [str] group_descriptors: A comma separated list of descriptors of groups you want the graph user to join :rtype: :class:`<GraphUser> <azure.devops.v5_1.graph.models.GraphUser>` """ query_parameters = {} if group_descriptors is not None: group_descriptors = ",".join(group_descriptors) query_parameters['groupDescriptors'] = self._serialize.query('group_descriptors', group_descriptors, 'str') content = self._serialize.body(creation_context, 'GraphUserCreationContext') response = self._send(http_method='POST', location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5', version='5.1-preview.1', query_parameters=query_parameters, content=content) return self._deserialize('GraphUser', response)
[ "def", "create_user", "(", "self", ",", "creation_context", ",", "group_descriptors", "=", "None", ")", ":", "query_parameters", "=", "{", "}", "if", "group_descriptors", "is", "not", "None", ":", "group_descriptors", "=", "\",\"", ".", "join", "(", "group_descriptors", ")", "query_parameters", "[", "'groupDescriptors'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'group_descriptors'", ",", "group_descriptors", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "creation_context", ",", "'GraphUserCreationContext'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'005e26ec-6b77-4e4f-a986-b3827bf241f5'", ",", "version", "=", "'5.1-preview.1'", ",", "query_parameters", "=", "query_parameters", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'GraphUser'", ",", "response", ")" ]
CreateUser. [Preview API] Materialize an existing AAD or MSA user into the VSTS account. :param :class:`<GraphUserCreationContext> <azure.devops.v5_1.graph.models.GraphUserCreationContext>` creation_context: The subset of the full graph user used to uniquely find the graph subject in an external provider. :param [str] group_descriptors: A comma separated list of descriptors of groups you want the graph user to join :rtype: :class:`<GraphUser> <azure.devops.v5_1.graph.models.GraphUser>`
[ "CreateUser", ".", "[", "Preview", "API", "]", "Materialize", "an", "existing", "AAD", "or", "MSA", "user", "into", "the", "VSTS", "account", ".", ":", "param", ":", "class", ":", "<GraphUserCreationContext", ">", "<azure", ".", "devops", ".", "v5_1", ".", "graph", ".", "models", ".", "GraphUserCreationContext", ">", "creation_context", ":", "The", "subset", "of", "the", "full", "graph", "user", "used", "to", "uniquely", "find", "the", "graph", "subject", "in", "an", "external", "provider", ".", ":", "param", "[", "str", "]", "group_descriptors", ":", "A", "comma", "separated", "list", "of", "descriptors", "of", "groups", "you", "want", "the", "graph", "user", "to", "join", ":", "rtype", ":", ":", "class", ":", "<GraphUser", ">", "<azure", ".", "devops", ".", "v5_1", ".", "graph", ".", "models", ".", "GraphUser", ">" ]
python
train
sryza/spark-timeseries
python/sparkts/utils.py
https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/utils.py#L37-L50
def datetime_to_nanos(dt): """ Accepts a string, Pandas Timestamp, or long, and returns nanos since the epoch. """ if isinstance(dt, pd.Timestamp): return dt.value elif isinstance(dt, str): return pd.Timestamp(dt).value elif isinstance(dt, long): return dt elif isinstance(dt, datetime): return long(dt.strftime("%s%f")) * 1000 raise ValueError
[ "def", "datetime_to_nanos", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "pd", ".", "Timestamp", ")", ":", "return", "dt", ".", "value", "elif", "isinstance", "(", "dt", ",", "str", ")", ":", "return", "pd", ".", "Timestamp", "(", "dt", ")", ".", "value", "elif", "isinstance", "(", "dt", ",", "long", ")", ":", "return", "dt", "elif", "isinstance", "(", "dt", ",", "datetime", ")", ":", "return", "long", "(", "dt", ".", "strftime", "(", "\"%s%f\"", ")", ")", "*", "1000", "raise", "ValueError" ]
Accepts a string, Pandas Timestamp, or long, and returns nanos since the epoch.
[ "Accepts", "a", "string", "Pandas", "Timestamp", "or", "long", "and", "returns", "nanos", "since", "the", "epoch", "." ]
python
train
estnltk/estnltk
estnltk/syntax/utils.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L596-L604
def get_root( self, **kwargs ): ''' Returns this tree if it has no parents, or, alternatively, moves up via the parent links of this tree until reaching the tree with no parents, and returnes the parentless tree as the root. ''' if self.parent == None: return self else: return self.parent.get_root( **kwargs )
[ "def", "get_root", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "parent", "==", "None", ":", "return", "self", "else", ":", "return", "self", ".", "parent", ".", "get_root", "(", "*", "*", "kwargs", ")" ]
Returns this tree if it has no parents, or, alternatively, moves up via the parent links of this tree until reaching the tree with no parents, and returnes the parentless tree as the root.
[ "Returns", "this", "tree", "if", "it", "has", "no", "parents", "or", "alternatively", "moves", "up", "via", "the", "parent", "links", "of", "this", "tree", "until", "reaching", "the", "tree", "with", "no", "parents", "and", "returnes", "the", "parentless", "tree", "as", "the", "root", "." ]
python
train
avihad/twistes
twistes/client.py
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/client.py#L666-L692
def close(self): """ close all http connections. returns a deferred that fires once they're all closed. """ def validate_client(client): """ Validate that the connection is for the current client :param client: :return: """ host, port = client.addr parsed_url = urlparse(self._hostname) return host == parsed_url.hostname and port == parsed_url.port # read https://github.com/twisted/treq/issues/86 # to understand the following... def _check_fds(_): fds = set(reactor.getReaders() + reactor.getReaders()) if not [fd for fd in fds if isinstance(fd, Client) and validate_client(fd)]: return return deferLater(reactor, 0, _check_fds, None) pool = self._async_http_client_params["pool"] return pool.closeCachedConnections().addBoth(_check_fds)
[ "def", "close", "(", "self", ")", ":", "def", "validate_client", "(", "client", ")", ":", "\"\"\"\n Validate that the connection is for the current client\n :param client:\n :return:\n \"\"\"", "host", ",", "port", "=", "client", ".", "addr", "parsed_url", "=", "urlparse", "(", "self", ".", "_hostname", ")", "return", "host", "==", "parsed_url", ".", "hostname", "and", "port", "==", "parsed_url", ".", "port", "# read https://github.com/twisted/treq/issues/86", "# to understand the following...", "def", "_check_fds", "(", "_", ")", ":", "fds", "=", "set", "(", "reactor", ".", "getReaders", "(", ")", "+", "reactor", ".", "getReaders", "(", ")", ")", "if", "not", "[", "fd", "for", "fd", "in", "fds", "if", "isinstance", "(", "fd", ",", "Client", ")", "and", "validate_client", "(", "fd", ")", "]", ":", "return", "return", "deferLater", "(", "reactor", ",", "0", ",", "_check_fds", ",", "None", ")", "pool", "=", "self", ".", "_async_http_client_params", "[", "\"pool\"", "]", "return", "pool", ".", "closeCachedConnections", "(", ")", ".", "addBoth", "(", "_check_fds", ")" ]
close all http connections. returns a deferred that fires once they're all closed.
[ "close", "all", "http", "connections", ".", "returns", "a", "deferred", "that", "fires", "once", "they", "re", "all", "closed", "." ]
python
train
senaite/senaite.core
bika/lims/content/analysisrequest.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisrequest.py#L1722-L1727
def printInvoice(self, REQUEST=None, RESPONSE=None): """Print invoice """ invoice = self.getInvoice() invoice_url = invoice.absolute_url() RESPONSE.redirect('{}/invoice_print'.format(invoice_url))
[ "def", "printInvoice", "(", "self", ",", "REQUEST", "=", "None", ",", "RESPONSE", "=", "None", ")", ":", "invoice", "=", "self", ".", "getInvoice", "(", ")", "invoice_url", "=", "invoice", ".", "absolute_url", "(", ")", "RESPONSE", ".", "redirect", "(", "'{}/invoice_print'", ".", "format", "(", "invoice_url", ")", ")" ]
Print invoice
[ "Print", "invoice" ]
python
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L129-L148
def check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta_df, col_meta_df): """ Makes sure that (if entered) id inputs entered are of one type (string id or index) Input: - rid (list or None): if not None, a list of rids - ridx (list or None): if not None, a list of indexes - cid (list or None): if not None, a list of cids - cidx (list or None): if not None, a list of indexes Output: - a tuple of the ordered ridx and cidx """ (row_type, row_ids) = check_id_idx_exclusivity(rid, ridx) (col_type, col_ids) = check_id_idx_exclusivity(cid, cidx) row_ids = check_and_convert_ids(row_type, row_ids, row_meta_df) ordered_ridx = get_ordered_idx(row_type, row_ids, row_meta_df) col_ids = check_and_convert_ids(col_type, col_ids, col_meta_df) ordered_cidx = get_ordered_idx(col_type, col_ids, col_meta_df) return (ordered_ridx, ordered_cidx)
[ "def", "check_and_order_id_inputs", "(", "rid", ",", "ridx", ",", "cid", ",", "cidx", ",", "row_meta_df", ",", "col_meta_df", ")", ":", "(", "row_type", ",", "row_ids", ")", "=", "check_id_idx_exclusivity", "(", "rid", ",", "ridx", ")", "(", "col_type", ",", "col_ids", ")", "=", "check_id_idx_exclusivity", "(", "cid", ",", "cidx", ")", "row_ids", "=", "check_and_convert_ids", "(", "row_type", ",", "row_ids", ",", "row_meta_df", ")", "ordered_ridx", "=", "get_ordered_idx", "(", "row_type", ",", "row_ids", ",", "row_meta_df", ")", "col_ids", "=", "check_and_convert_ids", "(", "col_type", ",", "col_ids", ",", "col_meta_df", ")", "ordered_cidx", "=", "get_ordered_idx", "(", "col_type", ",", "col_ids", ",", "col_meta_df", ")", "return", "(", "ordered_ridx", ",", "ordered_cidx", ")" ]
Makes sure that (if entered) id inputs entered are of one type (string id or index) Input: - rid (list or None): if not None, a list of rids - ridx (list or None): if not None, a list of indexes - cid (list or None): if not None, a list of cids - cidx (list or None): if not None, a list of indexes Output: - a tuple of the ordered ridx and cidx
[ "Makes", "sure", "that", "(", "if", "entered", ")", "id", "inputs", "entered", "are", "of", "one", "type", "(", "string", "id", "or", "index", ")", "Input", ":", "-", "rid", "(", "list", "or", "None", ")", ":", "if", "not", "None", "a", "list", "of", "rids", "-", "ridx", "(", "list", "or", "None", ")", ":", "if", "not", "None", "a", "list", "of", "indexes", "-", "cid", "(", "list", "or", "None", ")", ":", "if", "not", "None", "a", "list", "of", "cids", "-", "cidx", "(", "list", "or", "None", ")", ":", "if", "not", "None", "a", "list", "of", "indexes", "Output", ":", "-", "a", "tuple", "of", "the", "ordered", "ridx", "and", "cidx" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1818-L1841
def shot_add_asset(self, *args, **kwargs): """Add more assets to the shot. :returns: None :rtype: None :raises: None """ if not self.cur_shot: return dialog = AssetAdderDialog(shot=self.cur_shot) dialog.exec_() assets = dialog.assets atypes = {} for c in self.shot_asset_model.root.childItems: atypes[c.internal_data()] = c for asset in assets: atypeitem = atypes.get(asset.atype) if not atypeitem: atypedata = djitemdata.AtypeItemData(asset.atype) atypeitem = treemodel.TreeItem(atypedata, self.shot_asset_model.root) atypes[asset.atype] = atypeitem assetdata = djitemdata.AssetItemData(asset) treemodel.TreeItem(assetdata, atypeitem) self.cur_shot.save()
[ "def", "shot_add_asset", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cur_shot", ":", "return", "dialog", "=", "AssetAdderDialog", "(", "shot", "=", "self", ".", "cur_shot", ")", "dialog", ".", "exec_", "(", ")", "assets", "=", "dialog", ".", "assets", "atypes", "=", "{", "}", "for", "c", "in", "self", ".", "shot_asset_model", ".", "root", ".", "childItems", ":", "atypes", "[", "c", ".", "internal_data", "(", ")", "]", "=", "c", "for", "asset", "in", "assets", ":", "atypeitem", "=", "atypes", ".", "get", "(", "asset", ".", "atype", ")", "if", "not", "atypeitem", ":", "atypedata", "=", "djitemdata", ".", "AtypeItemData", "(", "asset", ".", "atype", ")", "atypeitem", "=", "treemodel", ".", "TreeItem", "(", "atypedata", ",", "self", ".", "shot_asset_model", ".", "root", ")", "atypes", "[", "asset", ".", "atype", "]", "=", "atypeitem", "assetdata", "=", "djitemdata", ".", "AssetItemData", "(", "asset", ")", "treemodel", ".", "TreeItem", "(", "assetdata", ",", "atypeitem", ")", "self", ".", "cur_shot", ".", "save", "(", ")" ]
Add more assets to the shot. :returns: None :rtype: None :raises: None
[ "Add", "more", "assets", "to", "the", "shot", "." ]
python
train
galaxyproject/gravity
gravity/process_manager/supervisor_manager.py
https://github.com/galaxyproject/gravity/blob/2f792497fc60874f881c9ef74a5905a286a9ce3e/gravity/process_manager/supervisor_manager.py#L148-L155
def __get_supervisor(self): """ Return the supervisor proxy object Should probably use this more rather than supervisorctl directly """ options = supervisorctl.ClientOptions() options.realize(args=['-c', self.supervisord_conf_path]) return supervisorctl.Controller(options).get_supervisor()
[ "def", "__get_supervisor", "(", "self", ")", ":", "options", "=", "supervisorctl", ".", "ClientOptions", "(", ")", "options", ".", "realize", "(", "args", "=", "[", "'-c'", ",", "self", ".", "supervisord_conf_path", "]", ")", "return", "supervisorctl", ".", "Controller", "(", "options", ")", ".", "get_supervisor", "(", ")" ]
Return the supervisor proxy object Should probably use this more rather than supervisorctl directly
[ "Return", "the", "supervisor", "proxy", "object" ]
python
train
Unidata/siphon
siphon/simplewebservice/igra2.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L409-L439
def _clean_header_df(self, df): """Format the header dataframe and add units.""" if self.suffix == '-drvd.txt': df.units = {'release_time': 'second', 'precipitable_water': 'millimeter', 'inv_pressure': 'hPa', 'inv_height': 'meter', 'inv_strength': 'Kelvin', 'mixed_layer_pressure': 'hPa', 'mixed_layer_height': 'meter', 'freezing_point_pressure': 'hPa', 'freezing_point_height': 'meter', 'lcl_pressure': 'hPa', 'lcl_height': 'meter', 'lfc_pressure': 'hPa', 'lfc_height': 'meter', 'lnb_pressure': 'hPa', 'lnb_height': 'meter', 'lifted_index': 'degC', 'showalter_index': 'degC', 'k_index': 'degC', 'total_totals_index': 'degC', 'cape': 'Joule / kilogram', 'convective_inhibition': 'Joule / kilogram'} else: df.units = {'release_time': 'second', 'latitude': 'degrees', 'longitude': 'degrees'} return df
[ "def", "_clean_header_df", "(", "self", ",", "df", ")", ":", "if", "self", ".", "suffix", "==", "'-drvd.txt'", ":", "df", ".", "units", "=", "{", "'release_time'", ":", "'second'", ",", "'precipitable_water'", ":", "'millimeter'", ",", "'inv_pressure'", ":", "'hPa'", ",", "'inv_height'", ":", "'meter'", ",", "'inv_strength'", ":", "'Kelvin'", ",", "'mixed_layer_pressure'", ":", "'hPa'", ",", "'mixed_layer_height'", ":", "'meter'", ",", "'freezing_point_pressure'", ":", "'hPa'", ",", "'freezing_point_height'", ":", "'meter'", ",", "'lcl_pressure'", ":", "'hPa'", ",", "'lcl_height'", ":", "'meter'", ",", "'lfc_pressure'", ":", "'hPa'", ",", "'lfc_height'", ":", "'meter'", ",", "'lnb_pressure'", ":", "'hPa'", ",", "'lnb_height'", ":", "'meter'", ",", "'lifted_index'", ":", "'degC'", ",", "'showalter_index'", ":", "'degC'", ",", "'k_index'", ":", "'degC'", ",", "'total_totals_index'", ":", "'degC'", ",", "'cape'", ":", "'Joule / kilogram'", ",", "'convective_inhibition'", ":", "'Joule / kilogram'", "}", "else", ":", "df", ".", "units", "=", "{", "'release_time'", ":", "'second'", ",", "'latitude'", ":", "'degrees'", ",", "'longitude'", ":", "'degrees'", "}", "return", "df" ]
Format the header dataframe and add units.
[ "Format", "the", "header", "dataframe", "and", "add", "units", "." ]
python
train
ipfs/py-ipfs-api
ipfsapi/client.py
https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L2156-L2173
def add_json(self, json_obj, **kwargs): """Adds a json-serializable Python dict as a json file to IPFS. .. code-block:: python >>> c.add_json({'one': 1, 'two': 2, 'three': 3}) 'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob' Parameters ---------- json_obj : dict A json-serializable Python dictionary Returns ------- str : Hash of the added IPFS object """ return self.add_bytes(encoding.Json().encode(json_obj), **kwargs)
[ "def", "add_json", "(", "self", ",", "json_obj", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "add_bytes", "(", "encoding", ".", "Json", "(", ")", ".", "encode", "(", "json_obj", ")", ",", "*", "*", "kwargs", ")" ]
Adds a json-serializable Python dict as a json file to IPFS. .. code-block:: python >>> c.add_json({'one': 1, 'two': 2, 'three': 3}) 'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob' Parameters ---------- json_obj : dict A json-serializable Python dictionary Returns ------- str : Hash of the added IPFS object
[ "Adds", "a", "json", "-", "serializable", "Python", "dict", "as", "a", "json", "file", "to", "IPFS", "." ]
python
train
rm-hull/luma.emulator
luma/emulator/render.py
https://github.com/rm-hull/luma.emulator/blob/ca3db028b33d17cda9247ea5189873ff0408d013/luma/emulator/render.py#L31-L37
def scale2x(self, surface): """ Scales using the AdvanceMAME Scale2X algorithm which does a 'jaggie-less' scale of bitmap graphics. """ assert(self._scale == 2) return self._pygame.transform.scale2x(surface)
[ "def", "scale2x", "(", "self", ",", "surface", ")", ":", "assert", "(", "self", ".", "_scale", "==", "2", ")", "return", "self", ".", "_pygame", ".", "transform", ".", "scale2x", "(", "surface", ")" ]
Scales using the AdvanceMAME Scale2X algorithm which does a 'jaggie-less' scale of bitmap graphics.
[ "Scales", "using", "the", "AdvanceMAME", "Scale2X", "algorithm", "which", "does", "a", "jaggie", "-", "less", "scale", "of", "bitmap", "graphics", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/networking/ethernet_networks.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/networking/ethernet_networks.py#L116-L151
def dissociate_values_or_ranges(self, vlan_id_range): """ Build a list of vlan ids given a combination of ranges and/or values Examples: >>> enet.dissociate_values_or_ranges('1-2,5') [1, 2, 5] >>> enet.dissociate_values_or_ranges('5') [1, 2, 3, 4, 5] >>> enet.dissociate_values_or_ranges('4-5,7-8') [4, 5, 7, 8] Args: vlan_id_range: A combination of values or ranges. For example, '1-10,50,51,500-700'. Returns: list: vlan ids """ values_or_ranges = vlan_id_range.split(',') vlan_ids = [] # The expected result is different if the vlan_id_range contains only one value if len(values_or_ranges) == 1 and '-' not in values_or_ranges[0]: vlan_ids = list(range(1, int(values_or_ranges[0]) + 1)) else: for value_or_range in values_or_ranges: value_or_range.strip() if '-' not in value_or_range: vlan_ids.append(int(value_or_range)) else: start, end = value_or_range.split('-') range_ids = range(int(start), int(end) + 1) vlan_ids.extend(range_ids) return vlan_ids
[ "def", "dissociate_values_or_ranges", "(", "self", ",", "vlan_id_range", ")", ":", "values_or_ranges", "=", "vlan_id_range", ".", "split", "(", "','", ")", "vlan_ids", "=", "[", "]", "# The expected result is different if the vlan_id_range contains only one value", "if", "len", "(", "values_or_ranges", ")", "==", "1", "and", "'-'", "not", "in", "values_or_ranges", "[", "0", "]", ":", "vlan_ids", "=", "list", "(", "range", "(", "1", ",", "int", "(", "values_or_ranges", "[", "0", "]", ")", "+", "1", ")", ")", "else", ":", "for", "value_or_range", "in", "values_or_ranges", ":", "value_or_range", ".", "strip", "(", ")", "if", "'-'", "not", "in", "value_or_range", ":", "vlan_ids", ".", "append", "(", "int", "(", "value_or_range", ")", ")", "else", ":", "start", ",", "end", "=", "value_or_range", ".", "split", "(", "'-'", ")", "range_ids", "=", "range", "(", "int", "(", "start", ")", ",", "int", "(", "end", ")", "+", "1", ")", "vlan_ids", ".", "extend", "(", "range_ids", ")", "return", "vlan_ids" ]
Build a list of vlan ids given a combination of ranges and/or values Examples: >>> enet.dissociate_values_or_ranges('1-2,5') [1, 2, 5] >>> enet.dissociate_values_or_ranges('5') [1, 2, 3, 4, 5] >>> enet.dissociate_values_or_ranges('4-5,7-8') [4, 5, 7, 8] Args: vlan_id_range: A combination of values or ranges. For example, '1-10,50,51,500-700'. Returns: list: vlan ids
[ "Build", "a", "list", "of", "vlan", "ids", "given", "a", "combination", "of", "ranges", "and", "/", "or", "values" ]
python
train
python-security/pyt
pyt/vulnerabilities/vulnerabilities.py
https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/vulnerabilities/vulnerabilities.py#L272-L297
def get_vulnerability_chains( current_node, sink, def_use, chain=[] ): """Traverses the def-use graph to find all paths from source to sink that cause a vulnerability. Args: current_node() sink() def_use(dict): chain(list(Node)): A path of nodes between source and sink. """ for use in def_use[current_node]: if use == sink: yield chain else: vuln_chain = list(chain) vuln_chain.append(use) yield from get_vulnerability_chains( use, sink, def_use, vuln_chain )
[ "def", "get_vulnerability_chains", "(", "current_node", ",", "sink", ",", "def_use", ",", "chain", "=", "[", "]", ")", ":", "for", "use", "in", "def_use", "[", "current_node", "]", ":", "if", "use", "==", "sink", ":", "yield", "chain", "else", ":", "vuln_chain", "=", "list", "(", "chain", ")", "vuln_chain", ".", "append", "(", "use", ")", "yield", "from", "get_vulnerability_chains", "(", "use", ",", "sink", ",", "def_use", ",", "vuln_chain", ")" ]
Traverses the def-use graph to find all paths from source to sink that cause a vulnerability. Args: current_node() sink() def_use(dict): chain(list(Node)): A path of nodes between source and sink.
[ "Traverses", "the", "def", "-", "use", "graph", "to", "find", "all", "paths", "from", "source", "to", "sink", "that", "cause", "a", "vulnerability", "." ]
python
train
numenta/nupic
src/nupic/algorithms/anomaly_likelihood.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/anomaly_likelihood.py#L312-L354
def write(self, proto): """ capnp serialization method for the anomaly likelihood object :param proto: (Object) capnp proto object specified in nupic.regions.anomaly_likelihood.capnp """ proto.iteration = self._iteration pHistScores = proto.init('historicalScores', len(self._historicalScores)) for i, score in enumerate(list(self._historicalScores)): _, value, anomalyScore = score record = pHistScores[i] record.value = float(value) record.anomalyScore = float(anomalyScore) if self._distribution: proto.distribution.name = self._distribution["distribution"]["name"] proto.distribution.mean = float(self._distribution["distribution"]["mean"]) proto.distribution.variance = float(self._distribution["distribution"]["variance"]) proto.distribution.stdev = float(self._distribution["distribution"]["stdev"]) proto.distribution.movingAverage.windowSize = float(self._distribution["movingAverage"]["windowSize"]) historicalValues = self._distribution["movingAverage"]["historicalValues"] pHistValues = proto.distribution.movingAverage.init( "historicalValues", len(historicalValues)) for i, value in enumerate(historicalValues): pHistValues[i] = float(value) #proto.distribution.movingAverage.historicalValues = self._distribution["movingAverage"]["historicalValues"] proto.distribution.movingAverage.total = float(self._distribution["movingAverage"]["total"]) historicalLikelihoods = self._distribution["historicalLikelihoods"] pHistLikelihoods = proto.distribution.init("historicalLikelihoods", len(historicalLikelihoods)) for i, likelihood in enumerate(historicalLikelihoods): pHistLikelihoods[i] = float(likelihood) proto.probationaryPeriod = self._probationaryPeriod proto.learningPeriod = self._learningPeriod proto.reestimationPeriod = self._reestimationPeriod proto.historicWindowSize = self._historicalScores.maxlen
[ "def", "write", "(", "self", ",", "proto", ")", ":", "proto", ".", "iteration", "=", "self", ".", "_iteration", "pHistScores", "=", "proto", ".", "init", "(", "'historicalScores'", ",", "len", "(", "self", ".", "_historicalScores", ")", ")", "for", "i", ",", "score", "in", "enumerate", "(", "list", "(", "self", ".", "_historicalScores", ")", ")", ":", "_", ",", "value", ",", "anomalyScore", "=", "score", "record", "=", "pHistScores", "[", "i", "]", "record", ".", "value", "=", "float", "(", "value", ")", "record", ".", "anomalyScore", "=", "float", "(", "anomalyScore", ")", "if", "self", ".", "_distribution", ":", "proto", ".", "distribution", ".", "name", "=", "self", ".", "_distribution", "[", "\"distribution\"", "]", "[", "\"name\"", "]", "proto", ".", "distribution", ".", "mean", "=", "float", "(", "self", ".", "_distribution", "[", "\"distribution\"", "]", "[", "\"mean\"", "]", ")", "proto", ".", "distribution", ".", "variance", "=", "float", "(", "self", ".", "_distribution", "[", "\"distribution\"", "]", "[", "\"variance\"", "]", ")", "proto", ".", "distribution", ".", "stdev", "=", "float", "(", "self", ".", "_distribution", "[", "\"distribution\"", "]", "[", "\"stdev\"", "]", ")", "proto", ".", "distribution", ".", "movingAverage", ".", "windowSize", "=", "float", "(", "self", ".", "_distribution", "[", "\"movingAverage\"", "]", "[", "\"windowSize\"", "]", ")", "historicalValues", "=", "self", ".", "_distribution", "[", "\"movingAverage\"", "]", "[", "\"historicalValues\"", "]", "pHistValues", "=", "proto", ".", "distribution", ".", "movingAverage", ".", "init", "(", "\"historicalValues\"", ",", "len", "(", "historicalValues", ")", ")", "for", "i", ",", "value", "in", "enumerate", "(", "historicalValues", ")", ":", "pHistValues", "[", "i", "]", "=", "float", "(", "value", ")", "#proto.distribution.movingAverage.historicalValues = self._distribution[\"movingAverage\"][\"historicalValues\"]", "proto", ".", "distribution", ".", "movingAverage", ".", "total", "=", "float", "(", "self", ".", "_distribution", "[", "\"movingAverage\"", "]", "[", "\"total\"", "]", ")", "historicalLikelihoods", "=", "self", ".", "_distribution", "[", "\"historicalLikelihoods\"", "]", "pHistLikelihoods", "=", "proto", ".", "distribution", ".", "init", "(", "\"historicalLikelihoods\"", ",", "len", "(", "historicalLikelihoods", ")", ")", "for", "i", ",", "likelihood", "in", "enumerate", "(", "historicalLikelihoods", ")", ":", "pHistLikelihoods", "[", "i", "]", "=", "float", "(", "likelihood", ")", "proto", ".", "probationaryPeriod", "=", "self", ".", "_probationaryPeriod", "proto", ".", "learningPeriod", "=", "self", ".", "_learningPeriod", "proto", ".", "reestimationPeriod", "=", "self", ".", "_reestimationPeriod", "proto", ".", "historicWindowSize", "=", "self", ".", "_historicalScores", ".", "maxlen" ]
capnp serialization method for the anomaly likelihood object :param proto: (Object) capnp proto object specified in nupic.regions.anomaly_likelihood.capnp
[ "capnp", "serialization", "method", "for", "the", "anomaly", "likelihood", "object" ]
python
valid
titusjan/argos
argos/qt/registry.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/qt/registry.py#L308-L326
def loadOrInitSettings(self, groupName=None): """ Reads the registry items from the persistent settings store, falls back on the default plugins if there are no settings in the store for this registry. """ groupName = groupName if groupName else self.settingsGroupName settings = QtCore.QSettings() #for key in sorted(settings.allKeys()): # print(key) if containsSettingsGroup(groupName, settings): self.loadSettings(groupName) else: logger.info("Group {!r} not found, falling back on default settings".format(groupName)) for item in self.getDefaultItems(): self.registerItem(item) self.saveSettings(groupName) assert containsSettingsGroup(groupName, settings), \ "Sanity check failed. {} not found".format(groupName)
[ "def", "loadOrInitSettings", "(", "self", ",", "groupName", "=", "None", ")", ":", "groupName", "=", "groupName", "if", "groupName", "else", "self", ".", "settingsGroupName", "settings", "=", "QtCore", ".", "QSettings", "(", ")", "#for key in sorted(settings.allKeys()):", "# print(key)", "if", "containsSettingsGroup", "(", "groupName", ",", "settings", ")", ":", "self", ".", "loadSettings", "(", "groupName", ")", "else", ":", "logger", ".", "info", "(", "\"Group {!r} not found, falling back on default settings\"", ".", "format", "(", "groupName", ")", ")", "for", "item", "in", "self", ".", "getDefaultItems", "(", ")", ":", "self", ".", "registerItem", "(", "item", ")", "self", ".", "saveSettings", "(", "groupName", ")", "assert", "containsSettingsGroup", "(", "groupName", ",", "settings", ")", ",", "\"Sanity check failed. {} not found\"", ".", "format", "(", "groupName", ")" ]
Reads the registry items from the persistent settings store, falls back on the default plugins if there are no settings in the store for this registry.
[ "Reads", "the", "registry", "items", "from", "the", "persistent", "settings", "store", "falls", "back", "on", "the", "default", "plugins", "if", "there", "are", "no", "settings", "in", "the", "store", "for", "this", "registry", "." ]
python
train
rosenbrockc/acorn
acorn/logging/database.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L48-L77
def list_tasks(target=None): """Returns a list of all the projects and tasks available in the `acorn` database directory. Args: target (str): directory to list the projects for. Defaults to the configured database directory. Returns: dict: keys are project names; values are lists of tasks associated with the project. """ from os import getcwd, chdir from glob import glob original = getcwd() if target is None:# pragma: no cover target = _dbdir() chdir(target) result = {} for filename in glob("*.*.json"): project, task = filename.split('.')[0:2] if project not in result: result[project] = [] result[project].append(task) #Set the working directory back to what it was. chdir(original) return result
[ "def", "list_tasks", "(", "target", "=", "None", ")", ":", "from", "os", "import", "getcwd", ",", "chdir", "from", "glob", "import", "glob", "original", "=", "getcwd", "(", ")", "if", "target", "is", "None", ":", "# pragma: no cover", "target", "=", "_dbdir", "(", ")", "chdir", "(", "target", ")", "result", "=", "{", "}", "for", "filename", "in", "glob", "(", "\"*.*.json\"", ")", ":", "project", ",", "task", "=", "filename", ".", "split", "(", "'.'", ")", "[", "0", ":", "2", "]", "if", "project", "not", "in", "result", ":", "result", "[", "project", "]", "=", "[", "]", "result", "[", "project", "]", ".", "append", "(", "task", ")", "#Set the working directory back to what it was.", "chdir", "(", "original", ")", "return", "result" ]
Returns a list of all the projects and tasks available in the `acorn` database directory. Args: target (str): directory to list the projects for. Defaults to the configured database directory. Returns: dict: keys are project names; values are lists of tasks associated with the project.
[ "Returns", "a", "list", "of", "all", "the", "projects", "and", "tasks", "available", "in", "the", "acorn", "database", "directory", "." ]
python
train
dereneaton/ipyrad
ipyrad/assemble/cluster_across.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L762-L766
def get_nloci(data): """ return nloci from the tmp h5 arr""" bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5") with h5py.File(bseeds) as io5: return io5["seedsarr"].shape[0]
[ "def", "get_nloci", "(", "data", ")", ":", "bseeds", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "across", ",", "data", ".", "name", "+", "\".tmparrs.h5\"", ")", "with", "h5py", ".", "File", "(", "bseeds", ")", "as", "io5", ":", "return", "io5", "[", "\"seedsarr\"", "]", ".", "shape", "[", "0", "]" ]
return nloci from the tmp h5 arr
[ "return", "nloci", "from", "the", "tmp", "h5", "arr" ]
python
valid
soerenwolfers/swutil
swutil/config.py
https://github.com/soerenwolfers/swutil/blob/2d598f2deac8b7e20df95dbc68017e5ab5d6180c/swutil/config.py#L21-L29
def lower(option,value): ''' Enforces lower case options and option values where appropriate ''' if type(option) is str: option=option.lower() if type(value) is str: value=value.lower() return (option,value)
[ "def", "lower", "(", "option", ",", "value", ")", ":", "if", "type", "(", "option", ")", "is", "str", ":", "option", "=", "option", ".", "lower", "(", ")", "if", "type", "(", "value", ")", "is", "str", ":", "value", "=", "value", ".", "lower", "(", ")", "return", "(", "option", ",", "value", ")" ]
Enforces lower case options and option values where appropriate
[ "Enforces", "lower", "case", "options", "and", "option", "values", "where", "appropriate" ]
python
valid
vingd/vingd-api-python
vingd/client.py
https://github.com/vingd/vingd-api-python/blob/7548a49973a472f7277c8ef847563faa7b6f3706/vingd/client.py#L564-L593
def authorized_create_user(self, identities=None, primary=None, permissions=None): """Creates Vingd user (profile & account), links it with the provided identities (to be verified later), and sets the delegate-user permissions (creator being the delegate). Returns Vingd user's `huid` (hashed user id). Example:: vingd.authorized_create_user( identities={"facebook": "12312312", "mail": "[email protected]"}, primary="facebook", permissions=["get.account.balance", "purchase.object"] ) If `identities` and `primary` are unspecified, a "zombie" ("headless") account is created (i.e. account with no identities associated, user-unreachable). :rtype: ``dict`` :returns: ``{'huid': <huid>}`` :raises GeneralException: :resource: ``id/objects/<oid>/purchases`` :access: authorized users with ACL flag ``user.create`` """ return self.request('post', 'id/users/', json.dumps({ 'identities': identities, 'primary_identity': primary, 'delegate_permissions': permissions }))
[ "def", "authorized_create_user", "(", "self", ",", "identities", "=", "None", ",", "primary", "=", "None", ",", "permissions", "=", "None", ")", ":", "return", "self", ".", "request", "(", "'post'", ",", "'id/users/'", ",", "json", ".", "dumps", "(", "{", "'identities'", ":", "identities", ",", "'primary_identity'", ":", "primary", ",", "'delegate_permissions'", ":", "permissions", "}", ")", ")" ]
Creates Vingd user (profile & account), links it with the provided identities (to be verified later), and sets the delegate-user permissions (creator being the delegate). Returns Vingd user's `huid` (hashed user id). Example:: vingd.authorized_create_user( identities={"facebook": "12312312", "mail": "[email protected]"}, primary="facebook", permissions=["get.account.balance", "purchase.object"] ) If `identities` and `primary` are unspecified, a "zombie" ("headless") account is created (i.e. account with no identities associated, user-unreachable). :rtype: ``dict`` :returns: ``{'huid': <huid>}`` :raises GeneralException: :resource: ``id/objects/<oid>/purchases`` :access: authorized users with ACL flag ``user.create``
[ "Creates", "Vingd", "user", "(", "profile", "&", "account", ")", "links", "it", "with", "the", "provided", "identities", "(", "to", "be", "verified", "later", ")", "and", "sets", "the", "delegate", "-", "user", "permissions", "(", "creator", "being", "the", "delegate", ")", ".", "Returns", "Vingd", "user", "s", "huid", "(", "hashed", "user", "id", ")", ".", "Example", "::", "vingd", ".", "authorized_create_user", "(", "identities", "=", "{", "facebook", ":", "12312312", "mail", ":", "user" ]
python
train
authomatic/authomatic
authomatic/core.py
https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/core.py#L727-L735
def expire_in(self, value): """ Computes :attr:`.expiration_time` when the value is set. """ # pylint:disable=attribute-defined-outside-init if value: self._expiration_time = int(time.time()) + int(value) self._expire_in = value
[ "def", "expire_in", "(", "self", ",", "value", ")", ":", "# pylint:disable=attribute-defined-outside-init", "if", "value", ":", "self", ".", "_expiration_time", "=", "int", "(", "time", ".", "time", "(", ")", ")", "+", "int", "(", "value", ")", "self", ".", "_expire_in", "=", "value" ]
Computes :attr:`.expiration_time` when the value is set.
[ "Computes", ":", "attr", ":", ".", "expiration_time", "when", "the", "value", "is", "set", "." ]
python
test
celery/cell
cell/actors.py
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L460-L489
def _on_message(self, body, message): """What to do when a message is received. This is a kombu consumer callback taking the standard ``body`` and ``message`` arguments. Note that if the properties of the message contains a value for ``reply_to`` then a proper implementation is expected to send a reply. """ if message.properties.get('reply_to'): handler = self.handle_call else: handler = self.handle_cast def handle(): # Do not ack the message if an exceptional error occurs, # but do ack the message if SystemExit or KeyboardInterrupt # is raised, as this is probably intended. try: handler(body, message) except Exception: raise except BaseException: message.ack() raise else: message.ack() handle()
[ "def", "_on_message", "(", "self", ",", "body", ",", "message", ")", ":", "if", "message", ".", "properties", ".", "get", "(", "'reply_to'", ")", ":", "handler", "=", "self", ".", "handle_call", "else", ":", "handler", "=", "self", ".", "handle_cast", "def", "handle", "(", ")", ":", "# Do not ack the message if an exceptional error occurs,", "# but do ack the message if SystemExit or KeyboardInterrupt", "# is raised, as this is probably intended.", "try", ":", "handler", "(", "body", ",", "message", ")", "except", "Exception", ":", "raise", "except", "BaseException", ":", "message", ".", "ack", "(", ")", "raise", "else", ":", "message", ".", "ack", "(", ")", "handle", "(", ")" ]
What to do when a message is received. This is a kombu consumer callback taking the standard ``body`` and ``message`` arguments. Note that if the properties of the message contains a value for ``reply_to`` then a proper implementation is expected to send a reply.
[ "What", "to", "do", "when", "a", "message", "is", "received", "." ]
python
train
Dentosal/python-sc2
sc2/bot_ai.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/bot_ai.py#L177-L241
async def distribute_workers(self): """ Distributes workers across all the bases taken. WARNING: This is quite slow when there are lots of workers or multiple bases. """ # TODO: # OPTIMIZE: Assign idle workers smarter # OPTIMIZE: Never use same worker mutltiple times owned_expansions = self.owned_expansions worker_pool = [] actions = [] for idle_worker in self.workers.idle: mf = self.state.mineral_field.closest_to(idle_worker) actions.append(idle_worker.gather(mf)) for location, townhall in owned_expansions.items(): workers = self.workers.closer_than(20, location) actual = townhall.assigned_harvesters ideal = townhall.ideal_harvesters excess = actual - ideal if actual > ideal: worker_pool.extend(workers.random_group_of(min(excess, len(workers)))) continue for g in self.geysers: workers = self.workers.closer_than(5, g) actual = g.assigned_harvesters ideal = g.ideal_harvesters excess = actual - ideal if actual > ideal: worker_pool.extend(workers.random_group_of(min(excess, len(workers)))) continue for g in self.geysers: actual = g.assigned_harvesters ideal = g.ideal_harvesters deficit = ideal - actual for _ in range(deficit): if worker_pool: w = worker_pool.pop() if len(w.orders) == 1 and w.orders[0].ability.id is AbilityId.HARVEST_RETURN: actions.append(w.move(g)) actions.append(w.return_resource(queue=True)) else: actions.append(w.gather(g)) for location, townhall in owned_expansions.items(): actual = townhall.assigned_harvesters ideal = townhall.ideal_harvesters deficit = ideal - actual for x in range(0, deficit): if worker_pool: w = worker_pool.pop() mf = self.state.mineral_field.closest_to(townhall) if len(w.orders) == 1 and w.orders[0].ability.id is AbilityId.HARVEST_RETURN: actions.append(w.move(townhall)) actions.append(w.return_resource(queue=True)) actions.append(w.gather(mf, queue=True)) else: actions.append(w.gather(mf)) await self.do_actions(actions)
[ "async", "def", "distribute_workers", "(", "self", ")", ":", "# TODO:", "# OPTIMIZE: Assign idle workers smarter", "# OPTIMIZE: Never use same worker mutltiple times", "owned_expansions", "=", "self", ".", "owned_expansions", "worker_pool", "=", "[", "]", "actions", "=", "[", "]", "for", "idle_worker", "in", "self", ".", "workers", ".", "idle", ":", "mf", "=", "self", ".", "state", ".", "mineral_field", ".", "closest_to", "(", "idle_worker", ")", "actions", ".", "append", "(", "idle_worker", ".", "gather", "(", "mf", ")", ")", "for", "location", ",", "townhall", "in", "owned_expansions", ".", "items", "(", ")", ":", "workers", "=", "self", ".", "workers", ".", "closer_than", "(", "20", ",", "location", ")", "actual", "=", "townhall", ".", "assigned_harvesters", "ideal", "=", "townhall", ".", "ideal_harvesters", "excess", "=", "actual", "-", "ideal", "if", "actual", ">", "ideal", ":", "worker_pool", ".", "extend", "(", "workers", ".", "random_group_of", "(", "min", "(", "excess", ",", "len", "(", "workers", ")", ")", ")", ")", "continue", "for", "g", "in", "self", ".", "geysers", ":", "workers", "=", "self", ".", "workers", ".", "closer_than", "(", "5", ",", "g", ")", "actual", "=", "g", ".", "assigned_harvesters", "ideal", "=", "g", ".", "ideal_harvesters", "excess", "=", "actual", "-", "ideal", "if", "actual", ">", "ideal", ":", "worker_pool", ".", "extend", "(", "workers", ".", "random_group_of", "(", "min", "(", "excess", ",", "len", "(", "workers", ")", ")", ")", ")", "continue", "for", "g", "in", "self", ".", "geysers", ":", "actual", "=", "g", ".", "assigned_harvesters", "ideal", "=", "g", ".", "ideal_harvesters", "deficit", "=", "ideal", "-", "actual", "for", "_", "in", "range", "(", "deficit", ")", ":", "if", "worker_pool", ":", "w", "=", "worker_pool", ".", "pop", "(", ")", "if", "len", "(", "w", ".", "orders", ")", "==", "1", "and", "w", ".", "orders", "[", "0", "]", ".", "ability", ".", "id", "is", "AbilityId", ".", "HARVEST_RETURN", ":", "actions", ".", "append", "(", "w", ".", "move", "(", "g", ")", ")", "actions", ".", "append", "(", "w", ".", "return_resource", "(", "queue", "=", "True", ")", ")", "else", ":", "actions", ".", "append", "(", "w", ".", "gather", "(", "g", ")", ")", "for", "location", ",", "townhall", "in", "owned_expansions", ".", "items", "(", ")", ":", "actual", "=", "townhall", ".", "assigned_harvesters", "ideal", "=", "townhall", ".", "ideal_harvesters", "deficit", "=", "ideal", "-", "actual", "for", "x", "in", "range", "(", "0", ",", "deficit", ")", ":", "if", "worker_pool", ":", "w", "=", "worker_pool", ".", "pop", "(", ")", "mf", "=", "self", ".", "state", ".", "mineral_field", ".", "closest_to", "(", "townhall", ")", "if", "len", "(", "w", ".", "orders", ")", "==", "1", "and", "w", ".", "orders", "[", "0", "]", ".", "ability", ".", "id", "is", "AbilityId", ".", "HARVEST_RETURN", ":", "actions", ".", "append", "(", "w", ".", "move", "(", "townhall", ")", ")", "actions", ".", "append", "(", "w", ".", "return_resource", "(", "queue", "=", "True", ")", ")", "actions", ".", "append", "(", "w", ".", "gather", "(", "mf", ",", "queue", "=", "True", ")", ")", "else", ":", "actions", ".", "append", "(", "w", ".", "gather", "(", "mf", ")", ")", "await", "self", ".", "do_actions", "(", "actions", ")" ]
Distributes workers across all the bases taken. WARNING: This is quite slow when there are lots of workers or multiple bases.
[ "Distributes", "workers", "across", "all", "the", "bases", "taken", ".", "WARNING", ":", "This", "is", "quite", "slow", "when", "there", "are", "lots", "of", "workers", "or", "multiple", "bases", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L492-L508
def selection_range(self): """ Returns the selected lines boundaries (start line, end line) :return: tuple(int, int) """ editor = self._editor doc = editor.document() start = doc.findBlock( editor.textCursor().selectionStart()).blockNumber() end = doc.findBlock( editor.textCursor().selectionEnd()).blockNumber() text_cursor = QTextCursor(editor.textCursor()) text_cursor.setPosition(editor.textCursor().selectionEnd()) if text_cursor.columnNumber() == 0 and start != end: end -= 1 return start, end
[ "def", "selection_range", "(", "self", ")", ":", "editor", "=", "self", ".", "_editor", "doc", "=", "editor", ".", "document", "(", ")", "start", "=", "doc", ".", "findBlock", "(", "editor", ".", "textCursor", "(", ")", ".", "selectionStart", "(", ")", ")", ".", "blockNumber", "(", ")", "end", "=", "doc", ".", "findBlock", "(", "editor", ".", "textCursor", "(", ")", ".", "selectionEnd", "(", ")", ")", ".", "blockNumber", "(", ")", "text_cursor", "=", "QTextCursor", "(", "editor", ".", "textCursor", "(", ")", ")", "text_cursor", ".", "setPosition", "(", "editor", ".", "textCursor", "(", ")", ".", "selectionEnd", "(", ")", ")", "if", "text_cursor", ".", "columnNumber", "(", ")", "==", "0", "and", "start", "!=", "end", ":", "end", "-=", "1", "return", "start", ",", "end" ]
Returns the selected lines boundaries (start line, end line) :return: tuple(int, int)
[ "Returns", "the", "selected", "lines", "boundaries", "(", "start", "line", "end", "line", ")" ]
python
train
saltstack/salt
salt/modules/k8s.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L237-L292
def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret
[ "def", "label_present", "(", "name", ",", "value", ",", "node", "=", "None", ",", "apiserver_url", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "# Get salt minion ID", "node", "=", "_guess_node_id", "(", "node", ")", "# Try to get kubernetes master", "apiserver_url", "=", "_guess_apiserver", "(", "apiserver_url", ")", "if", "apiserver_url", "is", "None", ":", "return", "False", "# Get all labels", "labels", "=", "_get_labels", "(", "node", ",", "apiserver_url", ")", "if", "name", "not", "in", "labels", ":", "# This is a new label", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "value", "}", "labels", "[", "name", "]", "=", "str", "(", "value", ")", "res", "=", "_set_labels", "(", "node", ",", "apiserver_url", ",", "labels", ")", "if", "res", ".", "get", "(", "'status'", ")", "==", "409", ":", "# there is an update during operation, need to retry", "log", ".", "debug", "(", "\"Got 409, will try later\"", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "\"Could not create label {0}, please retry\"", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "\"Label {0} created\"", ".", "format", "(", "name", ")", "elif", "labels", ".", "get", "(", "name", ")", "!=", "str", "(", "value", ")", ":", "# This is a old label and we are going to edit it", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "str", "(", "value", ")", "}", "labels", "[", "name", "]", "=", "str", "(", "value", ")", "res", "=", "_set_labels", "(", "node", ",", "apiserver_url", ",", "labels", ")", "if", "res", ".", "get", "(", "'status'", ")", "==", "409", ":", "# there is an update during operation, need to retry", "log", ".", "debug", "(", "\"Got 409, will try later\"", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "\"Could not update label {0}, please retry\"", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "\"Label {0} updated\"", ".", "format", "(", "name", ")", "else", ":", "# This is a old label and it has already the wanted value", "ret", "[", "'comment'", "]", "=", "\"Label {0} already set\"", ".", "format", "(", "name", ")", "return", "ret" ]
.. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local
[ "..", "versionadded", "::", "2016", ".", "3", ".", "0" ]
python
train
apache/incubator-heron
heron/tools/common/src/python/access/heron_api.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/common/src/python/access/heron_api.py#L648-L673
def get_container_file_download_url(cluster, environ, topology, container, path, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(FILE_DOWNLOAD_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) return request_url
[ "def", "get_container_file_download_url", "(", "cluster", ",", "environ", ",", "topology", ",", "container", ",", "path", ",", "role", "=", "None", ")", ":", "params", "=", "dict", "(", "cluster", "=", "cluster", ",", "environ", "=", "environ", ",", "topology", "=", "topology", ",", "container", "=", "container", ",", "path", "=", "path", ")", "if", "role", "is", "not", "None", ":", "params", "[", "'role'", "]", "=", "role", "request_url", "=", "tornado", ".", "httputil", ".", "url_concat", "(", "create_url", "(", "FILE_DOWNLOAD_URL_FMT", ")", ",", "params", ")", "if", "role", "is", "not", "None", ":", "request_url", "=", "tornado", ".", "httputil", ".", "url_concat", "(", "request_url", ",", "dict", "(", "role", "=", "role", ")", ")", "return", "request_url" ]
:param cluster: :param environ: :param topology: :param container: :param path: :param role: :return:
[ ":", "param", "cluster", ":", ":", "param", "environ", ":", ":", "param", "topology", ":", ":", "param", "container", ":", ":", "param", "path", ":", ":", "param", "role", ":", ":", "return", ":" ]
python
valid
pyroscope/pyrocore
src/pyrocore/util/load_config.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/util/load_config.py#L85-L89
def _update_config(self, namespace): # pylint: disable=no-self-use """ Inject the items from the given dict into the configuration. """ for key, val in namespace.items(): setattr(config, key, val)
[ "def", "_update_config", "(", "self", ",", "namespace", ")", ":", "# pylint: disable=no-self-use", "for", "key", ",", "val", "in", "namespace", ".", "items", "(", ")", ":", "setattr", "(", "config", ",", "key", ",", "val", ")" ]
Inject the items from the given dict into the configuration.
[ "Inject", "the", "items", "from", "the", "given", "dict", "into", "the", "configuration", "." ]
python
train
git-afsantos/bonsai
bonsai/model.py
https://github.com/git-afsantos/bonsai/blob/aa5af3f535b3b506bfc95c107c501fc9c4bcd072/bonsai/model.py#L642-L650
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ if self.parenthesis: return '{}({})'.format(' ' * indent, pretty_str(self.value)) return pretty_str(self.value, indent=indent)
[ "def", "pretty_str", "(", "self", ",", "indent", "=", "0", ")", ":", "if", "self", ".", "parenthesis", ":", "return", "'{}({})'", ".", "format", "(", "' '", "*", "indent", ",", "pretty_str", "(", "self", ".", "value", ")", ")", "return", "pretty_str", "(", "self", ".", "value", ",", "indent", "=", "indent", ")" ]
Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation.
[ "Return", "a", "human", "-", "readable", "string", "representation", "of", "this", "object", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work/work_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work/work_client.py#L722-L758
def update_board_chart(self, chart, team_context, board, name): """UpdateBoardChart. Update a board chart :param :class:`<BoardChart> <azure.devops.v5_0.work.models.BoardChart>` chart: :param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation :param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id :param str name: The chart name :rtype: :class:`<BoardChart> <azure.devops.v5_0.work.models.BoardChart>` """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') content = self._serialize.body(chart, 'BoardChart') response = self._send(http_method='PATCH', location_id='45fe888c-239e-49fd-958c-df1a1ab21d97', version='5.0', route_values=route_values, content=content) return self._deserialize('BoardChart', response)
[ "def", "update_board_chart", "(", "self", ",", "chart", ",", "team_context", ",", "board", ",", "name", ")", ":", "project", "=", "None", "team", "=", "None", "if", "team_context", "is", "not", "None", ":", "if", "team_context", ".", "project_id", ":", "project", "=", "team_context", ".", "project_id", "else", ":", "project", "=", "team_context", ".", "project", "if", "team_context", ".", "team_id", ":", "team", "=", "team_context", ".", "team_id", "else", ":", "team", "=", "team_context", ".", "team", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'string'", ")", "if", "team", "is", "not", "None", ":", "route_values", "[", "'team'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'team'", ",", "team", ",", "'string'", ")", "if", "board", "is", "not", "None", ":", "route_values", "[", "'board'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'board'", ",", "board", ",", "'str'", ")", "if", "name", "is", "not", "None", ":", "route_values", "[", "'name'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'name'", ",", "name", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "chart", ",", "'BoardChart'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'45fe888c-239e-49fd-958c-df1a1ab21d97'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'BoardChart'", ",", "response", ")" ]
UpdateBoardChart. Update a board chart :param :class:`<BoardChart> <azure.devops.v5_0.work.models.BoardChart>` chart: :param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation :param str board: Identifier for board, either board's backlog level name (Eg:"Stories") or Id :param str name: The chart name :rtype: :class:`<BoardChart> <azure.devops.v5_0.work.models.BoardChart>`
[ "UpdateBoardChart", ".", "Update", "a", "board", "chart", ":", "param", ":", "class", ":", "<BoardChart", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work", ".", "models", ".", "BoardChart", ">", "chart", ":", ":", "param", ":", "class", ":", "<TeamContext", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work", ".", "models", ".", "TeamContext", ">", "team_context", ":", "The", "team", "context", "for", "the", "operation", ":", "param", "str", "board", ":", "Identifier", "for", "board", "either", "board", "s", "backlog", "level", "name", "(", "Eg", ":", "Stories", ")", "or", "Id", ":", "param", "str", "name", ":", "The", "chart", "name", ":", "rtype", ":", ":", "class", ":", "<BoardChart", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work", ".", "models", ".", "BoardChart", ">" ]
python
train
prthkms/alex
alex/handler.py
https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L131-L153
def system_info(query): """system_info(query) -- print system specific information like OS, kernel, architecture etc. """ proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "operating system : "+str(out), proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "kernel : "+str(out), proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "kernel release : "+str(out), proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "architecture : "+str(out), proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "network node name : "+str(out),
[ "def", "system_info", "(", "query", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"uname -o\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "print", "\"operating system : \"", "+", "str", "(", "out", ")", ",", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"uname\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "print", "\"kernel : \"", "+", "str", "(", "out", ")", ",", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"uname -r\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "print", "\"kernel release : \"", "+", "str", "(", "out", ")", ",", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"uname -m\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "print", "\"architecture : \"", "+", "str", "(", "out", ")", ",", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"uname -n\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "print", "\"network node name : \"", "+", "str", "(", "out", ")", "," ]
system_info(query) -- print system specific information like OS, kernel, architecture etc.
[ "system_info", "(", "query", ")", "--", "print", "system", "specific", "information", "like", "OS", "kernel", "architecture", "etc", "." ]
python
train
mushkevych/scheduler
synergy/conf/__init__.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/conf/__init__.py#L157-L171
def _setup(self): """ Load the context module pointed to by the environment variable. This is used the first time we need the context at all, if the user has not previously configured the context manually. """ context_module = os.environ.get(ENVIRONMENT_CONTEXT_VARIABLE, 'context') if not context_module: raise ImproperlyConfigured( 'Requested context points to an empty variable. ' 'You must either define the environment variable {0} ' 'or call context.configure() before accessing the context.' .format(ENVIRONMENT_CONTEXT_VARIABLE)) self._wrapped = Settings(context_module, default_settings=global_context)
[ "def", "_setup", "(", "self", ")", ":", "context_module", "=", "os", ".", "environ", ".", "get", "(", "ENVIRONMENT_CONTEXT_VARIABLE", ",", "'context'", ")", "if", "not", "context_module", ":", "raise", "ImproperlyConfigured", "(", "'Requested context points to an empty variable. '", "'You must either define the environment variable {0} '", "'or call context.configure() before accessing the context.'", ".", "format", "(", "ENVIRONMENT_CONTEXT_VARIABLE", ")", ")", "self", ".", "_wrapped", "=", "Settings", "(", "context_module", ",", "default_settings", "=", "global_context", ")" ]
Load the context module pointed to by the environment variable. This is used the first time we need the context at all, if the user has not previously configured the context manually.
[ "Load", "the", "context", "module", "pointed", "to", "by", "the", "environment", "variable", ".", "This", "is", "used", "the", "first", "time", "we", "need", "the", "context", "at", "all", "if", "the", "user", "has", "not", "previously", "configured", "the", "context", "manually", "." ]
python
train
pantsbuild/pants
pants-plugins/src/python/internal_backend/utilities/register.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/pants-plugins/src/python/internal_backend/utilities/register.py#L67-L86
def contrib_setup_py(name, description, additional_classifiers=None, **kwargs): """Creates the setup_py for a pants contrib plugin artifact. :param str name: The name of the package; must start with 'pantsbuild.pants.contrib.'. :param str description: A brief description of what the plugin provides. :param list additional_classifiers: Any additional trove classifiers that apply to the plugin, see: https://pypi.org/pypi?%3Aaction=list_classifiers :param kwargs: Any additional keyword arguments to be passed to `setuptools.setup <https://pythonhosted.org/setuptools/setuptools.html>`_. :returns: A setup_py suitable for building and publishing pants components. """ if not name.startswith('pantsbuild.pants.contrib.'): raise ValueError("Contrib plugin package names must start with 'pantsbuild.pants.contrib.', " "given {}".format(name)) return pants_setup_py(name, description, additional_classifiers=additional_classifiers, namespace_packages=['pants', 'pants.contrib'], **kwargs)
[ "def", "contrib_setup_py", "(", "name", ",", "description", ",", "additional_classifiers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "name", ".", "startswith", "(", "'pantsbuild.pants.contrib.'", ")", ":", "raise", "ValueError", "(", "\"Contrib plugin package names must start with 'pantsbuild.pants.contrib.', \"", "\"given {}\"", ".", "format", "(", "name", ")", ")", "return", "pants_setup_py", "(", "name", ",", "description", ",", "additional_classifiers", "=", "additional_classifiers", ",", "namespace_packages", "=", "[", "'pants'", ",", "'pants.contrib'", "]", ",", "*", "*", "kwargs", ")" ]
Creates the setup_py for a pants contrib plugin artifact. :param str name: The name of the package; must start with 'pantsbuild.pants.contrib.'. :param str description: A brief description of what the plugin provides. :param list additional_classifiers: Any additional trove classifiers that apply to the plugin, see: https://pypi.org/pypi?%3Aaction=list_classifiers :param kwargs: Any additional keyword arguments to be passed to `setuptools.setup <https://pythonhosted.org/setuptools/setuptools.html>`_. :returns: A setup_py suitable for building and publishing pants components.
[ "Creates", "the", "setup_py", "for", "a", "pants", "contrib", "plugin", "artifact", "." ]
python
train
RLBot/RLBot
src/main/python/rlbot/gui/qt_root.py
https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/gui/qt_root.py#L510-L535
def update_teams_listwidgets(self): """ Clears all items from the listwidgets and then adds everything from the self.agents list again to the right team :return: """ self.bot_names_to_agent_dict.clear() self.blue_listwidget.clear() self.orange_listwidget.clear() for agent in self.agents: name = self.validate_name(agent.ingame_name, agent) if not agent.get_team(): self.blue_listwidget.addItem(name) else: self.orange_listwidget.addItem(name) self.bot_names_to_agent_dict[name] = agent self.enable_disable_on_bot_select_deselect() # if max bot count reached: disable + button if not self.index_manager.has_free_slots(): self.blue_plus_toolbutton.setDisabled(True) self.orange_plus_toolbutton.setDisabled(True) else: self.blue_plus_toolbutton.setDisabled(False) self.orange_plus_toolbutton.setDisabled(False)
[ "def", "update_teams_listwidgets", "(", "self", ")", ":", "self", ".", "bot_names_to_agent_dict", ".", "clear", "(", ")", "self", ".", "blue_listwidget", ".", "clear", "(", ")", "self", ".", "orange_listwidget", ".", "clear", "(", ")", "for", "agent", "in", "self", ".", "agents", ":", "name", "=", "self", ".", "validate_name", "(", "agent", ".", "ingame_name", ",", "agent", ")", "if", "not", "agent", ".", "get_team", "(", ")", ":", "self", ".", "blue_listwidget", ".", "addItem", "(", "name", ")", "else", ":", "self", ".", "orange_listwidget", ".", "addItem", "(", "name", ")", "self", ".", "bot_names_to_agent_dict", "[", "name", "]", "=", "agent", "self", ".", "enable_disable_on_bot_select_deselect", "(", ")", "# if max bot count reached: disable + button", "if", "not", "self", ".", "index_manager", ".", "has_free_slots", "(", ")", ":", "self", ".", "blue_plus_toolbutton", ".", "setDisabled", "(", "True", ")", "self", ".", "orange_plus_toolbutton", ".", "setDisabled", "(", "True", ")", "else", ":", "self", ".", "blue_plus_toolbutton", ".", "setDisabled", "(", "False", ")", "self", ".", "orange_plus_toolbutton", ".", "setDisabled", "(", "False", ")" ]
Clears all items from the listwidgets and then adds everything from the self.agents list again to the right team :return:
[ "Clears", "all", "items", "from", "the", "listwidgets", "and", "then", "adds", "everything", "from", "the", "self", ".", "agents", "list", "again", "to", "the", "right", "team", ":", "return", ":" ]
python
train
Parsl/parsl
parsl/dataflow/dflow.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/dflow.py#L492-L527
def _gather_all_deps(self, args, kwargs): """Count the number of unresolved futures on which a task depends. Args: - args (List[args]) : The list of args list to the fn - kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn Returns: - count, [list of dependencies] """ # Check the positional args depends = [] count = 0 for dep in args: if isinstance(dep, Future): if self.tasks[dep.tid]['status'] not in FINAL_STATES: count += 1 depends.extend([dep]) # Check for explicit kwargs ex, fu_1=<fut> for key in kwargs: dep = kwargs[key] if isinstance(dep, Future): if self.tasks[dep.tid]['status'] not in FINAL_STATES: count += 1 depends.extend([dep]) # Check for futures in inputs=[<fut>...] for dep in kwargs.get('inputs', []): if isinstance(dep, Future): if self.tasks[dep.tid]['status'] not in FINAL_STATES: count += 1 depends.extend([dep]) return count, depends
[ "def", "_gather_all_deps", "(", "self", ",", "args", ",", "kwargs", ")", ":", "# Check the positional args", "depends", "=", "[", "]", "count", "=", "0", "for", "dep", "in", "args", ":", "if", "isinstance", "(", "dep", ",", "Future", ")", ":", "if", "self", ".", "tasks", "[", "dep", ".", "tid", "]", "[", "'status'", "]", "not", "in", "FINAL_STATES", ":", "count", "+=", "1", "depends", ".", "extend", "(", "[", "dep", "]", ")", "# Check for explicit kwargs ex, fu_1=<fut>", "for", "key", "in", "kwargs", ":", "dep", "=", "kwargs", "[", "key", "]", "if", "isinstance", "(", "dep", ",", "Future", ")", ":", "if", "self", ".", "tasks", "[", "dep", ".", "tid", "]", "[", "'status'", "]", "not", "in", "FINAL_STATES", ":", "count", "+=", "1", "depends", ".", "extend", "(", "[", "dep", "]", ")", "# Check for futures in inputs=[<fut>...]", "for", "dep", "in", "kwargs", ".", "get", "(", "'inputs'", ",", "[", "]", ")", ":", "if", "isinstance", "(", "dep", ",", "Future", ")", ":", "if", "self", ".", "tasks", "[", "dep", ".", "tid", "]", "[", "'status'", "]", "not", "in", "FINAL_STATES", ":", "count", "+=", "1", "depends", ".", "extend", "(", "[", "dep", "]", ")", "return", "count", ",", "depends" ]
Count the number of unresolved futures on which a task depends. Args: - args (List[args]) : The list of args list to the fn - kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn Returns: - count, [list of dependencies]
[ "Count", "the", "number", "of", "unresolved", "futures", "on", "which", "a", "task", "depends", "." ]
python
valid
thomasleese/mo
mo/project.py
https://github.com/thomasleese/mo/blob/b757f52b42e51ad19c14724ceb7c5db5d52abaea/mo/project.py#L230-L263
def find_task(self, name): """ Find a task by name. If a task with the exact name cannot be found, then tasks with similar names are searched for. Returns ------- Task If the task is found. Raises ------ NoSuchTaskError If the task cannot be found. """ try: return self.tasks[name] except KeyError: pass similarities = [] for task_name, task in self.tasks.items(): ratio = SequenceMatcher(None, name, task_name).ratio() if ratio >= 0.75: similarities.append(task) if len(similarities) == 1: return similarities[0] else: raise NoSuchTaskError(similarities)
[ "def", "find_task", "(", "self", ",", "name", ")", ":", "try", ":", "return", "self", ".", "tasks", "[", "name", "]", "except", "KeyError", ":", "pass", "similarities", "=", "[", "]", "for", "task_name", ",", "task", "in", "self", ".", "tasks", ".", "items", "(", ")", ":", "ratio", "=", "SequenceMatcher", "(", "None", ",", "name", ",", "task_name", ")", ".", "ratio", "(", ")", "if", "ratio", ">=", "0.75", ":", "similarities", ".", "append", "(", "task", ")", "if", "len", "(", "similarities", ")", "==", "1", ":", "return", "similarities", "[", "0", "]", "else", ":", "raise", "NoSuchTaskError", "(", "similarities", ")" ]
Find a task by name. If a task with the exact name cannot be found, then tasks with similar names are searched for. Returns ------- Task If the task is found. Raises ------ NoSuchTaskError If the task cannot be found.
[ "Find", "a", "task", "by", "name", "." ]
python
train
databio/pypiper
pypiper/manager.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L776-L807
def checkprint(self, cmd, shell=None, nofail=False): """ Just like callprint, but checks output -- so you can get a variable in python corresponding to the return value of the command you call. This is equivalent to running subprocess.check_output() instead of subprocess.call(). :param str | Iterable[str] cmd: Bash command(s) to be run. :param bool | str shell: If command requires should be run in its own shell. Optional. Default: "guess" -- `run()` will try to guess if the command should be run in a shell (based on the presence of a pipe (|) or redirect (>), To force a process to run as a direct subprocess, set `shell` to False; to force a shell, set True. :param bool nofail: Should the pipeline bail on a nonzero return from a process? Default: False Nofail can be used to implement non-essential parts of the pipeline; if these processes fail, they will not cause the pipeline to bail out. """ self._report_command(cmd) likely_shell = check_shell(cmd, shell) if shell is None: shell = likely_shell if not shell: if likely_shell: print("Should this command run in a shell instead of directly in a subprocess?") cmd = shlex.split(cmd) try: return subprocess.check_output(cmd, shell=shell) except Exception as e: self._triage_error(e, nofail)
[ "def", "checkprint", "(", "self", ",", "cmd", ",", "shell", "=", "None", ",", "nofail", "=", "False", ")", ":", "self", ".", "_report_command", "(", "cmd", ")", "likely_shell", "=", "check_shell", "(", "cmd", ",", "shell", ")", "if", "shell", "is", "None", ":", "shell", "=", "likely_shell", "if", "not", "shell", ":", "if", "likely_shell", ":", "print", "(", "\"Should this command run in a shell instead of directly in a subprocess?\"", ")", "cmd", "=", "shlex", ".", "split", "(", "cmd", ")", "try", ":", "return", "subprocess", ".", "check_output", "(", "cmd", ",", "shell", "=", "shell", ")", "except", "Exception", "as", "e", ":", "self", ".", "_triage_error", "(", "e", ",", "nofail", ")" ]
Just like callprint, but checks output -- so you can get a variable in python corresponding to the return value of the command you call. This is equivalent to running subprocess.check_output() instead of subprocess.call(). :param str | Iterable[str] cmd: Bash command(s) to be run. :param bool | str shell: If command requires should be run in its own shell. Optional. Default: "guess" -- `run()` will try to guess if the command should be run in a shell (based on the presence of a pipe (|) or redirect (>), To force a process to run as a direct subprocess, set `shell` to False; to force a shell, set True. :param bool nofail: Should the pipeline bail on a nonzero return from a process? Default: False Nofail can be used to implement non-essential parts of the pipeline; if these processes fail, they will not cause the pipeline to bail out.
[ "Just", "like", "callprint", "but", "checks", "output", "--", "so", "you", "can", "get", "a", "variable", "in", "python", "corresponding", "to", "the", "return", "value", "of", "the", "command", "you", "call", ".", "This", "is", "equivalent", "to", "running", "subprocess", ".", "check_output", "()", "instead", "of", "subprocess", ".", "call", "()", ".", ":", "param", "str", "|", "Iterable", "[", "str", "]", "cmd", ":", "Bash", "command", "(", "s", ")", "to", "be", "run", ".", ":", "param", "bool", "|", "str", "shell", ":", "If", "command", "requires", "should", "be", "run", "in", "its", "own", "shell", ".", "Optional", ".", "Default", ":", "guess", "--", "run", "()", "will", "try", "to", "guess", "if", "the", "command", "should", "be", "run", "in", "a", "shell", "(", "based", "on", "the", "presence", "of", "a", "pipe", "(", "|", ")", "or", "redirect", "(", ">", ")", "To", "force", "a", "process", "to", "run", "as", "a", "direct", "subprocess", "set", "shell", "to", "False", ";", "to", "force", "a", "shell", "set", "True", ".", ":", "param", "bool", "nofail", ":", "Should", "the", "pipeline", "bail", "on", "a", "nonzero", "return", "from", "a", "process?", "Default", ":", "False", "Nofail", "can", "be", "used", "to", "implement", "non", "-", "essential", "parts", "of", "the", "pipeline", ";", "if", "these", "processes", "fail", "they", "will", "not", "cause", "the", "pipeline", "to", "bail", "out", "." ]
python
train
codeinn/vcs
vcs/backends/__init__.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/__init__.py#L37-L47
def get_backend(alias): """ Returns ``Repository`` class identified by the given alias or raises VCSError if alias is not recognized or backend class cannot be imported. """ if alias not in settings.BACKENDS: raise VCSError("Given alias '%s' is not recognized! Allowed aliases:\n" "%s" % (alias, pformat(settings.BACKENDS.keys()))) backend_path = settings.BACKENDS[alias] klass = import_class(backend_path) return klass
[ "def", "get_backend", "(", "alias", ")", ":", "if", "alias", "not", "in", "settings", ".", "BACKENDS", ":", "raise", "VCSError", "(", "\"Given alias '%s' is not recognized! Allowed aliases:\\n\"", "\"%s\"", "%", "(", "alias", ",", "pformat", "(", "settings", ".", "BACKENDS", ".", "keys", "(", ")", ")", ")", ")", "backend_path", "=", "settings", ".", "BACKENDS", "[", "alias", "]", "klass", "=", "import_class", "(", "backend_path", ")", "return", "klass" ]
Returns ``Repository`` class identified by the given alias or raises VCSError if alias is not recognized or backend class cannot be imported.
[ "Returns", "Repository", "class", "identified", "by", "the", "given", "alias", "or", "raises", "VCSError", "if", "alias", "is", "not", "recognized", "or", "backend", "class", "cannot", "be", "imported", "." ]
python
train
NatLibFi/Skosify
skosify/skosify.py
https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L451-L468
def transform_deprecated_concepts(rdf, cs): """Transform deprecated concepts so they are in their own concept scheme.""" deprecated_concepts = [] for conc in rdf.subjects(RDF.type, SKOSEXT.DeprecatedConcept): rdf.add((conc, RDF.type, SKOS.Concept)) rdf.add((conc, OWL.deprecated, Literal("true", datatype=XSD.boolean))) deprecated_concepts.append(conc) if len(deprecated_concepts) > 0: ns = cs.replace(localname(cs), '') dcs = create_concept_scheme( rdf, ns, 'deprecatedconceptscheme') logging.debug("creating deprecated concept scheme %s", dcs) for conc in deprecated_concepts: rdf.add((conc, SKOS.inScheme, dcs))
[ "def", "transform_deprecated_concepts", "(", "rdf", ",", "cs", ")", ":", "deprecated_concepts", "=", "[", "]", "for", "conc", "in", "rdf", ".", "subjects", "(", "RDF", ".", "type", ",", "SKOSEXT", ".", "DeprecatedConcept", ")", ":", "rdf", ".", "add", "(", "(", "conc", ",", "RDF", ".", "type", ",", "SKOS", ".", "Concept", ")", ")", "rdf", ".", "add", "(", "(", "conc", ",", "OWL", ".", "deprecated", ",", "Literal", "(", "\"true\"", ",", "datatype", "=", "XSD", ".", "boolean", ")", ")", ")", "deprecated_concepts", ".", "append", "(", "conc", ")", "if", "len", "(", "deprecated_concepts", ")", ">", "0", ":", "ns", "=", "cs", ".", "replace", "(", "localname", "(", "cs", ")", ",", "''", ")", "dcs", "=", "create_concept_scheme", "(", "rdf", ",", "ns", ",", "'deprecatedconceptscheme'", ")", "logging", ".", "debug", "(", "\"creating deprecated concept scheme %s\"", ",", "dcs", ")", "for", "conc", "in", "deprecated_concepts", ":", "rdf", ".", "add", "(", "(", "conc", ",", "SKOS", ".", "inScheme", ",", "dcs", ")", ")" ]
Transform deprecated concepts so they are in their own concept scheme.
[ "Transform", "deprecated", "concepts", "so", "they", "are", "in", "their", "own", "concept", "scheme", "." ]
python
train
Spinmob/spinmob
egg/_gui.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L132-L137
def block_events(self): """ Prevents the widget from sending signals. """ self._widget.blockSignals(True) self._widget.setUpdatesEnabled(False)
[ "def", "block_events", "(", "self", ")", ":", "self", ".", "_widget", ".", "blockSignals", "(", "True", ")", "self", ".", "_widget", ".", "setUpdatesEnabled", "(", "False", ")" ]
Prevents the widget from sending signals.
[ "Prevents", "the", "widget", "from", "sending", "signals", "." ]
python
train
chrislim2888/IP2Location-Python
IP2Location.py
https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L129-L132
def get_country_short(self, ip): ''' Get country_short ''' rec = self.get_all(ip) return rec and rec.country_short
[ "def", "get_country_short", "(", "self", ",", "ip", ")", ":", "rec", "=", "self", ".", "get_all", "(", "ip", ")", "return", "rec", "and", "rec", ".", "country_short" ]
Get country_short
[ "Get", "country_short" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/imageencode.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/imageencode.py#L8-L35
def encode(input, output_filename): """Encodes the input data with reed-solomon error correction in 223 byte blocks, and outputs each block along with 32 parity bytes to a new file by the given filename. input is a file-like object The outputted image will be in png format, and will be 255 by x pixels with one color channel. X is the number of 255 byte blocks from the input. Each block of data will be one row, therefore, the data can be recovered if no more than 16 pixels per row are altered. """ coder = rs.RSCoder(255,223) output = [] while True: block = input.read(223) if not block: break code = coder.encode_fast(block) output.append(code) sys.stderr.write(".") sys.stderr.write("\n") out = Image.new("L", (rowstride,len(output))) out.putdata("".join(output)) out.save(output_filename)
[ "def", "encode", "(", "input", ",", "output_filename", ")", ":", "coder", "=", "rs", ".", "RSCoder", "(", "255", ",", "223", ")", "output", "=", "[", "]", "while", "True", ":", "block", "=", "input", ".", "read", "(", "223", ")", "if", "not", "block", ":", "break", "code", "=", "coder", ".", "encode_fast", "(", "block", ")", "output", ".", "append", "(", "code", ")", "sys", ".", "stderr", ".", "write", "(", "\".\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\n\"", ")", "out", "=", "Image", ".", "new", "(", "\"L\"", ",", "(", "rowstride", ",", "len", "(", "output", ")", ")", ")", "out", ".", "putdata", "(", "\"\"", ".", "join", "(", "output", ")", ")", "out", ".", "save", "(", "output_filename", ")" ]
Encodes the input data with reed-solomon error correction in 223 byte blocks, and outputs each block along with 32 parity bytes to a new file by the given filename. input is a file-like object The outputted image will be in png format, and will be 255 by x pixels with one color channel. X is the number of 255 byte blocks from the input. Each block of data will be one row, therefore, the data can be recovered if no more than 16 pixels per row are altered.
[ "Encodes", "the", "input", "data", "with", "reed", "-", "solomon", "error", "correction", "in", "223", "byte", "blocks", "and", "outputs", "each", "block", "along", "with", "32", "parity", "bytes", "to", "a", "new", "file", "by", "the", "given", "filename", "." ]
python
train
onecodex/onecodex
onecodex/utils.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/utils.py#L274-L280
def collapse_user(fp): """ Converts a path back to ~/ from expanduser() """ home_dir = os.path.expanduser("~") abs_path = os.path.abspath(fp) return abs_path.replace(home_dir, "~")
[ "def", "collapse_user", "(", "fp", ")", ":", "home_dir", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "abs_path", "=", "os", ".", "path", ".", "abspath", "(", "fp", ")", "return", "abs_path", ".", "replace", "(", "home_dir", ",", "\"~\"", ")" ]
Converts a path back to ~/ from expanduser()
[ "Converts", "a", "path", "back", "to", "~", "/", "from", "expanduser", "()" ]
python
train
materialsvirtuallab/monty
monty/dev.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/dev.py#L198-L228
def install_excepthook(hook_type="color", **kwargs): """ This function replaces the original python traceback with an improved version from Ipython. Use `color` for colourful traceback formatting, `verbose` for Ka-Ping Yee's "cgitb.py" version kwargs are the keyword arguments passed to the constructor. See IPython.core.ultratb.py for more info. Return: 0 if hook is installed successfully. """ try: from IPython.core import ultratb except ImportError: import warnings warnings.warn( "Cannot install excepthook, IPyhon.core.ultratb not available") return 1 # Select the hook. hook = dict( color=ultratb.ColorTB, verbose=ultratb.VerboseTB, ).get(hook_type.lower(), None) if hook is None: return 2 import sys sys.excepthook = hook(**kwargs) return 0
[ "def", "install_excepthook", "(", "hook_type", "=", "\"color\"", ",", "*", "*", "kwargs", ")", ":", "try", ":", "from", "IPython", ".", "core", "import", "ultratb", "except", "ImportError", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"Cannot install excepthook, IPyhon.core.ultratb not available\"", ")", "return", "1", "# Select the hook.", "hook", "=", "dict", "(", "color", "=", "ultratb", ".", "ColorTB", ",", "verbose", "=", "ultratb", ".", "VerboseTB", ",", ")", ".", "get", "(", "hook_type", ".", "lower", "(", ")", ",", "None", ")", "if", "hook", "is", "None", ":", "return", "2", "import", "sys", "sys", ".", "excepthook", "=", "hook", "(", "*", "*", "kwargs", ")", "return", "0" ]
This function replaces the original python traceback with an improved version from Ipython. Use `color` for colourful traceback formatting, `verbose` for Ka-Ping Yee's "cgitb.py" version kwargs are the keyword arguments passed to the constructor. See IPython.core.ultratb.py for more info. Return: 0 if hook is installed successfully.
[ "This", "function", "replaces", "the", "original", "python", "traceback", "with", "an", "improved", "version", "from", "Ipython", ".", "Use", "color", "for", "colourful", "traceback", "formatting", "verbose", "for", "Ka", "-", "Ping", "Yee", "s", "cgitb", ".", "py", "version", "kwargs", "are", "the", "keyword", "arguments", "passed", "to", "the", "constructor", ".", "See", "IPython", ".", "core", ".", "ultratb", ".", "py", "for", "more", "info", "." ]
python
train
log2timeline/plaso
plaso/engine/profilers.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/profilers.py#L240-L252
def Sample(self, operation, description, data_size, compressed_data_size): """Takes a sample of data read or written for profiling. Args: operation (str): operation, either 'read' or 'write'. description (str): description of the data read. data_size (int): size of the data read in bytes. compressed_data_size (int): size of the compressed data read in bytes. """ sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:s}\t{3:d}\t{4:d}\n'.format( sample_time, operation, description, data_size, compressed_data_size) self._WritesString(sample)
[ "def", "Sample", "(", "self", ",", "operation", ",", "description", ",", "data_size", ",", "compressed_data_size", ")", ":", "sample_time", "=", "time", ".", "time", "(", ")", "sample", "=", "'{0:f}\\t{1:s}\\t{2:s}\\t{3:d}\\t{4:d}\\n'", ".", "format", "(", "sample_time", ",", "operation", ",", "description", ",", "data_size", ",", "compressed_data_size", ")", "self", ".", "_WritesString", "(", "sample", ")" ]
Takes a sample of data read or written for profiling. Args: operation (str): operation, either 'read' or 'write'. description (str): description of the data read. data_size (int): size of the data read in bytes. compressed_data_size (int): size of the compressed data read in bytes.
[ "Takes", "a", "sample", "of", "data", "read", "or", "written", "for", "profiling", "." ]
python
train
StanfordVL/robosuite
robosuite/environments/baxter_peg_in_hole.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/baxter_peg_in_hole.py#L60-L87
def _load_model(self): """ Loads the peg and the hole models. """ super()._load_model() self.mujoco_robot.set_base_xpos([0, 0, 0]) # Add arena and robot self.model = MujocoWorldBase() self.arena = EmptyArena() if self.use_indicator_object: self.arena.add_pos_indicator() self.model.merge(self.arena) self.model.merge(self.mujoco_robot) # Load hole object self.hole_obj = self.hole.get_collision(name="hole", site=True) self.hole_obj.set("quat", "0 0 0.707 0.707") self.hole_obj.set("pos", "0.11 0 0.18") self.model.merge_asset(self.hole) self.model.worldbody.find(".//body[@name='left_hand']").append(self.hole_obj) # Load cylinder object self.cyl_obj = self.cylinder.get_collision(name="cylinder", site=True) self.cyl_obj.set("pos", "0 0 0.15") self.model.merge_asset(self.cylinder) self.model.worldbody.find(".//body[@name='right_hand']").append(self.cyl_obj) self.model.worldbody.find(".//geom[@name='cylinder']").set("rgba", "0 1 0 1")
[ "def", "_load_model", "(", "self", ")", ":", "super", "(", ")", ".", "_load_model", "(", ")", "self", ".", "mujoco_robot", ".", "set_base_xpos", "(", "[", "0", ",", "0", ",", "0", "]", ")", "# Add arena and robot", "self", ".", "model", "=", "MujocoWorldBase", "(", ")", "self", ".", "arena", "=", "EmptyArena", "(", ")", "if", "self", ".", "use_indicator_object", ":", "self", ".", "arena", ".", "add_pos_indicator", "(", ")", "self", ".", "model", ".", "merge", "(", "self", ".", "arena", ")", "self", ".", "model", ".", "merge", "(", "self", ".", "mujoco_robot", ")", "# Load hole object", "self", ".", "hole_obj", "=", "self", ".", "hole", ".", "get_collision", "(", "name", "=", "\"hole\"", ",", "site", "=", "True", ")", "self", ".", "hole_obj", ".", "set", "(", "\"quat\"", ",", "\"0 0 0.707 0.707\"", ")", "self", ".", "hole_obj", ".", "set", "(", "\"pos\"", ",", "\"0.11 0 0.18\"", ")", "self", ".", "model", ".", "merge_asset", "(", "self", ".", "hole", ")", "self", ".", "model", ".", "worldbody", ".", "find", "(", "\".//body[@name='left_hand']\"", ")", ".", "append", "(", "self", ".", "hole_obj", ")", "# Load cylinder object", "self", ".", "cyl_obj", "=", "self", ".", "cylinder", ".", "get_collision", "(", "name", "=", "\"cylinder\"", ",", "site", "=", "True", ")", "self", ".", "cyl_obj", ".", "set", "(", "\"pos\"", ",", "\"0 0 0.15\"", ")", "self", ".", "model", ".", "merge_asset", "(", "self", ".", "cylinder", ")", "self", ".", "model", ".", "worldbody", ".", "find", "(", "\".//body[@name='right_hand']\"", ")", ".", "append", "(", "self", ".", "cyl_obj", ")", "self", ".", "model", ".", "worldbody", ".", "find", "(", "\".//geom[@name='cylinder']\"", ")", ".", "set", "(", "\"rgba\"", ",", "\"0 1 0 1\"", ")" ]
Loads the peg and the hole models.
[ "Loads", "the", "peg", "and", "the", "hole", "models", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/rl_loop/train_and_validate.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/rl_loop/train_and_validate.py#L123-L131
def validate_pro(): """Validate on professional data.""" cmd = ['python3', 'validate.py', FLAGS.pro_dataset, '--use_tpu', '--tpu_name={}'.format(TPU_NAME), '--work_dir={}'.format(fsdb.working_dir()), '--flagfile=rl_loop/distributed_flags', '--validate_name=pro'] mask_flags.run(cmd)
[ "def", "validate_pro", "(", ")", ":", "cmd", "=", "[", "'python3'", ",", "'validate.py'", ",", "FLAGS", ".", "pro_dataset", ",", "'--use_tpu'", ",", "'--tpu_name={}'", ".", "format", "(", "TPU_NAME", ")", ",", "'--work_dir={}'", ".", "format", "(", "fsdb", ".", "working_dir", "(", ")", ")", ",", "'--flagfile=rl_loop/distributed_flags'", ",", "'--validate_name=pro'", "]", "mask_flags", ".", "run", "(", "cmd", ")" ]
Validate on professional data.
[ "Validate", "on", "professional", "data", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/link.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/link.py#L579-L587
def _register_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown): """Runs this link, captures output to logfile, and records the job in self.jobs""" fullkey = JobDetails.make_fullkey(self.full_linkname, key) if fullkey in self.jobs: job_details = self.jobs[fullkey] job_details.status = status else: job_details = self._register_job(key, self.args, logfile, status)
[ "def", "_register_self", "(", "self", ",", "logfile", ",", "key", "=", "JobDetails", ".", "topkey", ",", "status", "=", "JobStatus", ".", "unknown", ")", ":", "fullkey", "=", "JobDetails", ".", "make_fullkey", "(", "self", ".", "full_linkname", ",", "key", ")", "if", "fullkey", "in", "self", ".", "jobs", ":", "job_details", "=", "self", ".", "jobs", "[", "fullkey", "]", "job_details", ".", "status", "=", "status", "else", ":", "job_details", "=", "self", ".", "_register_job", "(", "key", ",", "self", ".", "args", ",", "logfile", ",", "status", ")" ]
Runs this link, captures output to logfile, and records the job in self.jobs
[ "Runs", "this", "link", "captures", "output", "to", "logfile", "and", "records", "the", "job", "in", "self", ".", "jobs" ]
python
train
djgagne/hagelslag
hagelslag/processing/EnsembleProducts.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnsembleProducts.py#L793-L818
def init_file(self, filename, time_units="seconds since 1970-01-01T00:00"): """ Initializes netCDF file for writing Args: filename: Name of the netCDF file time_units: Units for the time variable in format "<time> since <date string>" Returns: Dataset object """ if os.access(filename, os.R_OK): out_data = Dataset(filename, "r+") else: out_data = Dataset(filename, "w") if len(self.data.shape) == 2: for d, dim in enumerate(["y", "x"]): out_data.createDimension(dim, self.data.shape[d]) else: for d, dim in enumerate(["y", "x"]): out_data.createDimension(dim, self.data.shape[d+1]) out_data.createDimension("time", len(self.times)) time_var = out_data.createVariable("time", "i8", ("time",)) time_var[:] = date2num(self.times.to_pydatetime(), time_units) time_var.units = time_units out_data.Conventions = "CF-1.6" return out_data
[ "def", "init_file", "(", "self", ",", "filename", ",", "time_units", "=", "\"seconds since 1970-01-01T00:00\"", ")", ":", "if", "os", ".", "access", "(", "filename", ",", "os", ".", "R_OK", ")", ":", "out_data", "=", "Dataset", "(", "filename", ",", "\"r+\"", ")", "else", ":", "out_data", "=", "Dataset", "(", "filename", ",", "\"w\"", ")", "if", "len", "(", "self", ".", "data", ".", "shape", ")", "==", "2", ":", "for", "d", ",", "dim", "in", "enumerate", "(", "[", "\"y\"", ",", "\"x\"", "]", ")", ":", "out_data", ".", "createDimension", "(", "dim", ",", "self", ".", "data", ".", "shape", "[", "d", "]", ")", "else", ":", "for", "d", ",", "dim", "in", "enumerate", "(", "[", "\"y\"", ",", "\"x\"", "]", ")", ":", "out_data", ".", "createDimension", "(", "dim", ",", "self", ".", "data", ".", "shape", "[", "d", "+", "1", "]", ")", "out_data", ".", "createDimension", "(", "\"time\"", ",", "len", "(", "self", ".", "times", ")", ")", "time_var", "=", "out_data", ".", "createVariable", "(", "\"time\"", ",", "\"i8\"", ",", "(", "\"time\"", ",", ")", ")", "time_var", "[", ":", "]", "=", "date2num", "(", "self", ".", "times", ".", "to_pydatetime", "(", ")", ",", "time_units", ")", "time_var", ".", "units", "=", "time_units", "out_data", ".", "Conventions", "=", "\"CF-1.6\"", "return", "out_data" ]
Initializes netCDF file for writing Args: filename: Name of the netCDF file time_units: Units for the time variable in format "<time> since <date string>" Returns: Dataset object
[ "Initializes", "netCDF", "file", "for", "writing" ]
python
train
saltstack/salt
salt/cloud/clouds/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L3278-L3323
def rescan_hba(kwargs=None, call=None): ''' To rescan a specified HBA or all the HBAs on the Host System CLI Example: .. code-block:: bash salt-cloud -f rescan_hba my-vmware-config host="hostSystemName" salt-cloud -f rescan_hba my-vmware-config hba="hbaDeviceName" host="hostSystemName" ''' if call != 'function': raise SaltCloudSystemExit( 'The rescan_hba function must be called with ' '-f or --function.' ) hba = kwargs.get('hba') if kwargs and 'hba' in kwargs else None host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None if not host_name: raise SaltCloudSystemExit( 'You must specify name of the host system.' ) host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) try: if hba: log.info('Rescanning HBA %s on host %s', hba, host_name) host_ref.configManager.storageSystem.RescanHba(hba) ret = 'rescanned HBA {0}'.format(hba) else: log.info('Rescanning all HBAs on host %s', host_name) host_ref.configManager.storageSystem.RescanAllHba() ret = 'rescanned all HBAs' except Exception as exc: log.error( 'Error while rescaning HBA on host %s: %s', host_name, exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return {host_name: 'failed to rescan HBA'} return {host_name: ret}
[ "def", "rescan_hba", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The rescan_hba function must be called with '", "'-f or --function.'", ")", "hba", "=", "kwargs", ".", "get", "(", "'hba'", ")", "if", "kwargs", "and", "'hba'", "in", "kwargs", "else", "None", "host_name", "=", "kwargs", ".", "get", "(", "'host'", ")", "if", "kwargs", "and", "'host'", "in", "kwargs", "else", "None", "if", "not", "host_name", ":", "raise", "SaltCloudSystemExit", "(", "'You must specify name of the host system.'", ")", "host_ref", "=", "salt", ".", "utils", ".", "vmware", ".", "get_mor_by_property", "(", "_get_si", "(", ")", ",", "vim", ".", "HostSystem", ",", "host_name", ")", "try", ":", "if", "hba", ":", "log", ".", "info", "(", "'Rescanning HBA %s on host %s'", ",", "hba", ",", "host_name", ")", "host_ref", ".", "configManager", ".", "storageSystem", ".", "RescanHba", "(", "hba", ")", "ret", "=", "'rescanned HBA {0}'", ".", "format", "(", "hba", ")", "else", ":", "log", ".", "info", "(", "'Rescanning all HBAs on host %s'", ",", "host_name", ")", "host_ref", ".", "configManager", ".", "storageSystem", ".", "RescanAllHba", "(", ")", "ret", "=", "'rescanned all HBAs'", "except", "Exception", "as", "exc", ":", "log", ".", "error", "(", "'Error while rescaning HBA on host %s: %s'", ",", "host_name", ",", "exc", ",", "# Show the traceback if the debug logging level is enabled", "exc_info_on_loglevel", "=", "logging", ".", "DEBUG", ")", "return", "{", "host_name", ":", "'failed to rescan HBA'", "}", "return", "{", "host_name", ":", "ret", "}" ]
To rescan a specified HBA or all the HBAs on the Host System CLI Example: .. code-block:: bash salt-cloud -f rescan_hba my-vmware-config host="hostSystemName" salt-cloud -f rescan_hba my-vmware-config hba="hbaDeviceName" host="hostSystemName"
[ "To", "rescan", "a", "specified", "HBA", "or", "all", "the", "HBAs", "on", "the", "Host", "System" ]
python
train
openego/ding0
ding0/grid/mv_grid/util/data_input.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/util/data_input.py#L81-L105
def _parse_depot_section(f): """Parse TSPLIB DEPOT_SECTION data part from file descriptor f Args ---- f : str File descriptor Returns ------- int an array of depots """ depots = [] for line in f: line = strip(line) if line == '-1' or line == 'EOF': # End of section break else: depots.append(line) if len(depots) != 1: raise ParseException('One and only one depot is supported') return int(depots[0])
[ "def", "_parse_depot_section", "(", "f", ")", ":", "depots", "=", "[", "]", "for", "line", "in", "f", ":", "line", "=", "strip", "(", "line", ")", "if", "line", "==", "'-1'", "or", "line", "==", "'EOF'", ":", "# End of section", "break", "else", ":", "depots", ".", "append", "(", "line", ")", "if", "len", "(", "depots", ")", "!=", "1", ":", "raise", "ParseException", "(", "'One and only one depot is supported'", ")", "return", "int", "(", "depots", "[", "0", "]", ")" ]
Parse TSPLIB DEPOT_SECTION data part from file descriptor f Args ---- f : str File descriptor Returns ------- int an array of depots
[ "Parse", "TSPLIB", "DEPOT_SECTION", "data", "part", "from", "file", "descriptor", "f", "Args", "----", "f", ":", "str", "File", "descriptor", "Returns", "-------", "int", "an", "array", "of", "depots" ]
python
train
qualisys/qualisys_python_sdk
qtm/discovery.py
https://github.com/qualisys/qualisys_python_sdk/blob/127d7eeebc2b38b5cafdfa5d1d0198437fedd274/qtm/discovery.py#L29-L34
def connection_made(self, transport): """ On socket creation """ self.transport = transport sock = transport.get_extra_info("socket") self.port = sock.getsockname()[1]
[ "def", "connection_made", "(", "self", ",", "transport", ")", ":", "self", ".", "transport", "=", "transport", "sock", "=", "transport", ".", "get_extra_info", "(", "\"socket\"", ")", "self", ".", "port", "=", "sock", ".", "getsockname", "(", ")", "[", "1", "]" ]
On socket creation
[ "On", "socket", "creation" ]
python
valid
andreikop/qutepart
qutepart/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L1468-L1483
def _onShortcutPasteLine(self): """Paste lines from the clipboard """ lines = self.lines[self._selectedLinesSlice()] text = QApplication.clipboard().text() if text: with self: if self.textCursor().hasSelection(): startBlockNumber, endBlockNumber = self._selectedBlockNumbers() del self.lines[self._selectedLinesSlice()] self.lines.insert(startBlockNumber, text) else: line, col = self.cursorPosition if col > 0: line = line + 1 self.lines.insert(line, text)
[ "def", "_onShortcutPasteLine", "(", "self", ")", ":", "lines", "=", "self", ".", "lines", "[", "self", ".", "_selectedLinesSlice", "(", ")", "]", "text", "=", "QApplication", ".", "clipboard", "(", ")", ".", "text", "(", ")", "if", "text", ":", "with", "self", ":", "if", "self", ".", "textCursor", "(", ")", ".", "hasSelection", "(", ")", ":", "startBlockNumber", ",", "endBlockNumber", "=", "self", ".", "_selectedBlockNumbers", "(", ")", "del", "self", ".", "lines", "[", "self", ".", "_selectedLinesSlice", "(", ")", "]", "self", ".", "lines", ".", "insert", "(", "startBlockNumber", ",", "text", ")", "else", ":", "line", ",", "col", "=", "self", ".", "cursorPosition", "if", "col", ">", "0", ":", "line", "=", "line", "+", "1", "self", ".", "lines", ".", "insert", "(", "line", ",", "text", ")" ]
Paste lines from the clipboard
[ "Paste", "lines", "from", "the", "clipboard" ]
python
train
inveniosoftware-contrib/invenio-groups
invenio_groups/models.py
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/models.py#L294-L301
def query_by_names(cls, names): """Query group by a list of group names. :param list names: List of the group names. :returns: Query object. """ assert isinstance(names, list) return cls.query.filter(cls.name.in_(names))
[ "def", "query_by_names", "(", "cls", ",", "names", ")", ":", "assert", "isinstance", "(", "names", ",", "list", ")", "return", "cls", ".", "query", ".", "filter", "(", "cls", ".", "name", ".", "in_", "(", "names", ")", ")" ]
Query group by a list of group names. :param list names: List of the group names. :returns: Query object.
[ "Query", "group", "by", "a", "list", "of", "group", "names", "." ]
python
valid
sdispater/cleo
cleo/commands/command.py
https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L148-L155
def ask(self, question, default=None): """ Prompt the user for input. """ if isinstance(question, Question): return self._io.ask_question(question) return self._io.ask(question, default)
[ "def", "ask", "(", "self", ",", "question", ",", "default", "=", "None", ")", ":", "if", "isinstance", "(", "question", ",", "Question", ")", ":", "return", "self", ".", "_io", ".", "ask_question", "(", "question", ")", "return", "self", ".", "_io", ".", "ask", "(", "question", ",", "default", ")" ]
Prompt the user for input.
[ "Prompt", "the", "user", "for", "input", "." ]
python
train
cgrok/clashroyale
clashroyale/official_api/client.py
https://github.com/cgrok/clashroyale/blob/2618f4da22a84ad3e36d2446e23436d87c423163/clashroyale/official_api/client.py#L417-L431
def search_tournaments(self, name: str, **params: keys): """Search for a tournament by its name Parameters ---------- name: str The name of a tournament \*\*limit: Optional[int] = None Limit the number of items returned in the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.TOURNAMENT params['name'] = name return self._get_model(url, PartialTournament, **params)
[ "def", "search_tournaments", "(", "self", ",", "name", ":", "str", ",", "*", "*", "params", ":", "keys", ")", ":", "url", "=", "self", ".", "api", ".", "TOURNAMENT", "params", "[", "'name'", "]", "=", "name", "return", "self", ".", "_get_model", "(", "url", ",", "PartialTournament", ",", "*", "*", "params", ")" ]
Search for a tournament by its name Parameters ---------- name: str The name of a tournament \*\*limit: Optional[int] = None Limit the number of items returned in the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
[ "Search", "for", "a", "tournament", "by", "its", "name" ]
python
valid
cloud-custodian/cloud-custodian
c7n/log.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/log.py#L123-L129
def flush(self): """Ensure all logging output has been flushed.""" if self.shutdown: return self.flush_buffers(force=True) self.queue.put(FLUSH_MARKER) self.queue.join()
[ "def", "flush", "(", "self", ")", ":", "if", "self", ".", "shutdown", ":", "return", "self", ".", "flush_buffers", "(", "force", "=", "True", ")", "self", ".", "queue", ".", "put", "(", "FLUSH_MARKER", ")", "self", ".", "queue", ".", "join", "(", ")" ]
Ensure all logging output has been flushed.
[ "Ensure", "all", "logging", "output", "has", "been", "flushed", "." ]
python
train
dgraph-io/pydgraph
pydgraph/client.py
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client.py#L71-L87
def alter(self, operation, timeout=None, metadata=None, credentials=None): """Runs a modification via this client.""" new_metadata = self.add_login_metadata(metadata) try: return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self.retry_login() new_metadata = self.add_login_metadata(metadata) return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error
[ "def", "alter", "(", "self", ",", "operation", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "new_metadata", "=", "self", ".", "add_login_metadata", "(", "metadata", ")", "try", ":", "return", "self", ".", "any_client", "(", ")", ".", "alter", "(", "operation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "if", "util", ".", "is_jwt_expired", "(", "error", ")", ":", "self", ".", "retry_login", "(", ")", "new_metadata", "=", "self", ".", "add_login_metadata", "(", "metadata", ")", "return", "self", ".", "any_client", "(", ")", ".", "alter", "(", "operation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "else", ":", "raise", "error" ]
Runs a modification via this client.
[ "Runs", "a", "modification", "via", "this", "client", "." ]
python
train
aboSamoor/polyglot
polyglot/base.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/base.py#L159-L166
def read(self, size=None): """ Read `size` of bytes.""" if size is None: return self.buf.read() + self.open_file.read() contents = self.buf.read(size) if len(contents) < size: contents += self.open_file.read(size - len(contents)) return contents
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "return", "self", ".", "buf", ".", "read", "(", ")", "+", "self", ".", "open_file", ".", "read", "(", ")", "contents", "=", "self", ".", "buf", ".", "read", "(", "size", ")", "if", "len", "(", "contents", ")", "<", "size", ":", "contents", "+=", "self", ".", "open_file", ".", "read", "(", "size", "-", "len", "(", "contents", ")", ")", "return", "contents" ]
Read `size` of bytes.
[ "Read", "size", "of", "bytes", "." ]
python
train
saltstack/salt
salt/pillar/vmware_pillar.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/vmware_pillar.py#L520-L529
def _serializer(obj): ''' helper function to serialize some objects for prettier return ''' import datetime if isinstance(obj, datetime.datetime): if obj.utcoffset() is not None: obj = obj - obj.utcoffset() return obj.__str__() return obj
[ "def", "_serializer", "(", "obj", ")", ":", "import", "datetime", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "if", "obj", ".", "utcoffset", "(", ")", "is", "not", "None", ":", "obj", "=", "obj", "-", "obj", ".", "utcoffset", "(", ")", "return", "obj", ".", "__str__", "(", ")", "return", "obj" ]
helper function to serialize some objects for prettier return
[ "helper", "function", "to", "serialize", "some", "objects", "for", "prettier", "return" ]
python
train
peeringdb/peeringdb-py
peeringdb/commands.py
https://github.com/peeringdb/peeringdb-py/blob/cf2060a1d5ef879a01cf849e54b7756909ab2661/peeringdb/commands.py#L16-L24
def _handler(func): "Decorate a command handler" def _wrapped(*a, **k): r = func(*a, **k) if r is None: r = 0 return r return staticmethod(_wrapped)
[ "def", "_handler", "(", "func", ")", ":", "def", "_wrapped", "(", "*", "a", ",", "*", "*", "k", ")", ":", "r", "=", "func", "(", "*", "a", ",", "*", "*", "k", ")", "if", "r", "is", "None", ":", "r", "=", "0", "return", "r", "return", "staticmethod", "(", "_wrapped", ")" ]
Decorate a command handler
[ "Decorate", "a", "command", "handler" ]
python
train
learningequality/ricecooker
ricecooker/config.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/config.py#L194-L203
def get_restore_path(filename): """ get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file """ path = os.path.join(RESTORE_DIRECTORY, FILE_STORE_LOCATION) if not os.path.exists(path): os.makedirs(path) return os.path.join(path, filename + '.pickle')
[ "def", "get_restore_path", "(", "filename", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "RESTORE_DIRECTORY", ",", "FILE_STORE_LOCATION", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "return", "os", ".", "path", ".", "join", "(", "path", ",", "filename", "+", "'.pickle'", ")" ]
get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file
[ "get_restore_path", ":", "returns", "path", "to", "directory", "for", "restoration", "points", "Args", ":", "filename", "(", "str", ")", ":", "Name", "of", "file", "to", "store", "Returns", ":", "string", "path", "to", "file" ]
python
train
jborean93/ntlm-auth
ntlm_auth/compute_response.py
https://github.com/jborean93/ntlm-auth/blob/2c7cd81516d9bfd42e8ff473a534d876b21ebb38/ntlm_auth/compute_response.py#L382-L430
def _get_NTLMv2_temp(timestamp, client_challenge, target_info): """ [MS-NLMP] v28.0 2016-07-14 2.2.2.7 NTLMv2_CLIENT_CHALLENGE - variable length The NTLMv2_CLIENT_CHALLENGE structure defines the client challenge in the AUTHENTICATE_MESSAGE. This structure is used only when NTLM v2 authentication is configured and is transported in the NTLMv2_RESPONSE structure. The method to create this structure is defined in 3.3.2 NTLMv2 Authentication. In this method this variable is known as the temp value. The target_info variable corresponds to the ServerName variable used in that documentation. This is in reality a lot more than just the ServerName and contains the AV_PAIRS structure we need to transport with the message like Channel Binding tokens and others. By default this will be the target_info returned from the CHALLENGE_MESSAGE plus MSV_AV_CHANNEL_BINDINGS if specified otherwise it is a new target_info set with MSV_AV_TIMESTAMP to the current time. :param timestamp: An 8-byte timestamp in windows format, 100 nanoseconds since 1601-01-01 :param client_challenge: A random 8-byte response generated by the `client for the AUTHENTICATE_MESSAGE :param target_info: The target_info structure from the CHALLENGE_MESSAGE with the CBT attached if required :return temp: The CLIENT_CHALLENGE structure that will be added to the NtChallengeResponse structure """ resp_type = b'\x01' hi_resp_type = b'\x01' reserved1 = b'\x00' * 2 reserved2 = b'\x00' * 4 reserved3 = b'\x00' * 4 # This byte is not in the structure defined in 2.2.2.7 but is in the # computation guide, works with it present reserved4 = b'\x00' * 4 temp = resp_type temp += hi_resp_type temp += reserved1 temp += reserved2 temp += timestamp temp += client_challenge temp += reserved3 temp += target_info.pack() temp += reserved4 return temp
[ "def", "_get_NTLMv2_temp", "(", "timestamp", ",", "client_challenge", ",", "target_info", ")", ":", "resp_type", "=", "b'\\x01'", "hi_resp_type", "=", "b'\\x01'", "reserved1", "=", "b'\\x00'", "*", "2", "reserved2", "=", "b'\\x00'", "*", "4", "reserved3", "=", "b'\\x00'", "*", "4", "# This byte is not in the structure defined in 2.2.2.7 but is in the", "# computation guide, works with it present", "reserved4", "=", "b'\\x00'", "*", "4", "temp", "=", "resp_type", "temp", "+=", "hi_resp_type", "temp", "+=", "reserved1", "temp", "+=", "reserved2", "temp", "+=", "timestamp", "temp", "+=", "client_challenge", "temp", "+=", "reserved3", "temp", "+=", "target_info", ".", "pack", "(", ")", "temp", "+=", "reserved4", "return", "temp" ]
[MS-NLMP] v28.0 2016-07-14 2.2.2.7 NTLMv2_CLIENT_CHALLENGE - variable length The NTLMv2_CLIENT_CHALLENGE structure defines the client challenge in the AUTHENTICATE_MESSAGE. This structure is used only when NTLM v2 authentication is configured and is transported in the NTLMv2_RESPONSE structure. The method to create this structure is defined in 3.3.2 NTLMv2 Authentication. In this method this variable is known as the temp value. The target_info variable corresponds to the ServerName variable used in that documentation. This is in reality a lot more than just the ServerName and contains the AV_PAIRS structure we need to transport with the message like Channel Binding tokens and others. By default this will be the target_info returned from the CHALLENGE_MESSAGE plus MSV_AV_CHANNEL_BINDINGS if specified otherwise it is a new target_info set with MSV_AV_TIMESTAMP to the current time. :param timestamp: An 8-byte timestamp in windows format, 100 nanoseconds since 1601-01-01 :param client_challenge: A random 8-byte response generated by the `client for the AUTHENTICATE_MESSAGE :param target_info: The target_info structure from the CHALLENGE_MESSAGE with the CBT attached if required :return temp: The CLIENT_CHALLENGE structure that will be added to the NtChallengeResponse structure
[ "[", "MS", "-", "NLMP", "]", "v28", ".", "0", "2016", "-", "07", "-", "14" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1378-L1393
def delete_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False): """Delete the DCNM OUT partition and update the result. """ res = fw_const.DCNM_OUT_PART_DEL_SUCCESS tenant_name = fw_dict.get('tenant_name') ret = True try: self._delete_partition(tenant_id, tenant_name) except Exception as exc: LOG.error("deletion of Out Partition failed for tenant " "%(tenant)s, Exception %(exc)s", {'tenant': tenant_id, 'exc': str(exc)}) res = fw_const.DCNM_OUT_PART_DEL_FAIL ret = False self.update_fw_db_result(tenant_id, dcnm_status=res) LOG.info("Out partition deleted") return ret
[ "def", "delete_dcnm_out_part", "(", "self", ",", "tenant_id", ",", "fw_dict", ",", "is_fw_virt", "=", "False", ")", ":", "res", "=", "fw_const", ".", "DCNM_OUT_PART_DEL_SUCCESS", "tenant_name", "=", "fw_dict", ".", "get", "(", "'tenant_name'", ")", "ret", "=", "True", "try", ":", "self", ".", "_delete_partition", "(", "tenant_id", ",", "tenant_name", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "error", "(", "\"deletion of Out Partition failed for tenant \"", "\"%(tenant)s, Exception %(exc)s\"", ",", "{", "'tenant'", ":", "tenant_id", ",", "'exc'", ":", "str", "(", "exc", ")", "}", ")", "res", "=", "fw_const", ".", "DCNM_OUT_PART_DEL_FAIL", "ret", "=", "False", "self", ".", "update_fw_db_result", "(", "tenant_id", ",", "dcnm_status", "=", "res", ")", "LOG", ".", "info", "(", "\"Out partition deleted\"", ")", "return", "ret" ]
Delete the DCNM OUT partition and update the result.
[ "Delete", "the", "DCNM", "OUT", "partition", "and", "update", "the", "result", "." ]
python
train
tmr232/Sark
sark/structure.py
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/structure.py#L84-L100
def get_struct(name): """Get a struct by it's name. Args: name: The name of the struct Returns: The struct's id Raises: exceptions.SarkStructNotFound: is the struct does not exist. """ sid = idc.GetStrucIdByName(name) if sid == idaapi.BADADDR: raise exceptions.SarkStructNotFound() return sid
[ "def", "get_struct", "(", "name", ")", ":", "sid", "=", "idc", ".", "GetStrucIdByName", "(", "name", ")", "if", "sid", "==", "idaapi", ".", "BADADDR", ":", "raise", "exceptions", ".", "SarkStructNotFound", "(", ")", "return", "sid" ]
Get a struct by it's name. Args: name: The name of the struct Returns: The struct's id Raises: exceptions.SarkStructNotFound: is the struct does not exist.
[ "Get", "a", "struct", "by", "it", "s", "name", "." ]
python
train
secdev/scapy
scapy/contrib/pnio_rpc.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/pnio_rpc.py#L445-L453
def get_response(self): """Generate the response block of this request. Careful: it only sets the fields which can be set from the request """ res = IODWriteRes() for field in ["seqNum", "ARUUID", "API", "slotNumber", "subslotNumber", "index"]: res.setfieldval(field, self.getfieldval(field)) return res
[ "def", "get_response", "(", "self", ")", ":", "res", "=", "IODWriteRes", "(", ")", "for", "field", "in", "[", "\"seqNum\"", ",", "\"ARUUID\"", ",", "\"API\"", ",", "\"slotNumber\"", ",", "\"subslotNumber\"", ",", "\"index\"", "]", ":", "res", ".", "setfieldval", "(", "field", ",", "self", ".", "getfieldval", "(", "field", ")", ")", "return", "res" ]
Generate the response block of this request. Careful: it only sets the fields which can be set from the request
[ "Generate", "the", "response", "block", "of", "this", "request", ".", "Careful", ":", "it", "only", "sets", "the", "fields", "which", "can", "be", "set", "from", "the", "request" ]
python
train
shapiromatron/bmds
bmds/batch.py
https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/batch.py#L166-L215
def to_docx( self, filename=None, input_dataset=True, summary_table=True, recommendation_details=True, recommended_model=True, all_models=False, ): """ Write batch sessions to a Word file. Parameters ---------- filename : str or None If provided, the file is saved to this location, otherwise this method returns a docx.Document input_dataset : bool Include input dataset data table summary_table : bool Include model summary table recommendation_details : bool Include model recommendation details table recommended_model : bool Include the recommended model output and dose-response plot, if one exists all_models : bool Include all models output and dose-response plots Returns ------- bmds.Reporter The bmds.Reporter object. """ rep = Reporter() for model in self: rep.add_session( model, input_dataset, summary_table, recommendation_details, recommended_model, all_models, ) if filename: rep.save(filename) return rep
[ "def", "to_docx", "(", "self", ",", "filename", "=", "None", ",", "input_dataset", "=", "True", ",", "summary_table", "=", "True", ",", "recommendation_details", "=", "True", ",", "recommended_model", "=", "True", ",", "all_models", "=", "False", ",", ")", ":", "rep", "=", "Reporter", "(", ")", "for", "model", "in", "self", ":", "rep", ".", "add_session", "(", "model", ",", "input_dataset", ",", "summary_table", ",", "recommendation_details", ",", "recommended_model", ",", "all_models", ",", ")", "if", "filename", ":", "rep", ".", "save", "(", "filename", ")", "return", "rep" ]
Write batch sessions to a Word file. Parameters ---------- filename : str or None If provided, the file is saved to this location, otherwise this method returns a docx.Document input_dataset : bool Include input dataset data table summary_table : bool Include model summary table recommendation_details : bool Include model recommendation details table recommended_model : bool Include the recommended model output and dose-response plot, if one exists all_models : bool Include all models output and dose-response plots Returns ------- bmds.Reporter The bmds.Reporter object.
[ "Write", "batch", "sessions", "to", "a", "Word", "file", "." ]
python
train
RockFeng0/rtsf
rtsf/p_common.py
https://github.com/RockFeng0/rtsf/blob/fbc0d57edaeca86418af3942472fcc6d3e9ce591/rtsf/p_common.py#L154-L167
def until(method, timeout = 30, message=''): """Calls the method until the return value is not False.""" end_time = time.time() + timeout while True: try: value = method() if value: return value except: pass time.sleep(1) if time.time() > end_time: break raise Exception(message)
[ "def", "until", "(", "method", ",", "timeout", "=", "30", ",", "message", "=", "''", ")", ":", "end_time", "=", "time", ".", "time", "(", ")", "+", "timeout", "while", "True", ":", "try", ":", "value", "=", "method", "(", ")", "if", "value", ":", "return", "value", "except", ":", "pass", "time", ".", "sleep", "(", "1", ")", "if", "time", ".", "time", "(", ")", ">", "end_time", ":", "break", "raise", "Exception", "(", "message", ")" ]
Calls the method until the return value is not False.
[ "Calls", "the", "method", "until", "the", "return", "value", "is", "not", "False", "." ]
python
train