Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
2,400 | def put_object(self, cont, obj, local_file):
try:
with salt.utils.files.fopen(local_file, ) as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc:
log.error()
if hasattr(exc, ) and hasattr(exc, ):
log.error(, exc.code, exc.msg)
log.error(, getattr(exc, , lambda: six.text_type(exc))())
return False | Upload a file to Swift |
2,401 | def _match_serializers_by_query_arg(self, serializers):
arg_name = current_app.config.get()
if arg_name:
arg_value = request.args.get(arg_name, None)
if arg_value is None:
return None
try:
return serializers[
self.serializers_query_aliases[arg_value]]
except KeyError:
return None
return None | Match serializer by query arg. |
2,402 | def reset_to_flows(self, force=False, _meta=None):
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
")".format(df))
if _meta:
_meta._add_modify("Reset to absolute flows")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None) for key in self.__non_agg_attributes__]
return self | Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal |
2,403 | def import_all(path):
plist = []
fid = 0
while True:
try:
p = PolygonFilter(filename=path, fileid=fid)
plist.append(p)
fid += 1
except IndexError:
break
return plist | Import all polygons from a .poly file.
Returns a list of the imported polygon filters |
2,404 | def dispatch_event(self, event):
if self.config["debug"]:
self.py3_wrapper.log("received event {}".format(event))
event["index"] = event.get("index", "")
instance = event.get("instance", "")
name = event.get("name", "")
if " " in instance:
instance, index = instance.split(" ", 1)
try:
index = int(index)
except ValueError:
pass
event["index"] = index
event["instance"] = instance
if self.config["debug"]:
self.py3_wrapper.log(
.format(
"{} {}".format(name, instance).strip()
)
)
module_name = "{} {}".format(name, instance).strip()
default_event = False
module_info = self.output_modules.get(module_name)
module = module_info["module"]
if module.allow_config_clicks:
button = event.get("button", 0)
on_click = self.on_click.get(module_name, {}).get(str(button))
if on_click:
task = EventClickTask(module_name, event, self, on_click)
self.py3_wrapper.timeout_queue_add(task)
elif button == 2:
default_event = True
task = EventTask(module_name, event, default_event, self)
self.py3_wrapper.timeout_queue_add(task) | Takes an event dict. Logs the event if needed and cleans up the dict
such as setting the index needed for composits. |
2,405 | def GetKernelParams(time, flux, errors, kernel=, mask=[],
giter=3, gmaxf=200, guess=None):
log.info("Optimizing the GP...")
time_copy = np.array(time)
errors_copy = np.array(errors)
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0]
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)])
amp = np.nanstd(flux)
tau = 30.0
if kernel == :
if guess is None:
guess = [white, amp, tau]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[0.5, 100.]]
elif kernel == :
if guess is None:
guess = [white, amp, tau, 1., 20.]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[1e-5, 1e2],
[0.02, 100.]]
else:
raise ValueError()
llbest = -np.inf
xbest = np.array(guess)
for i in range(giter):
iguess = [np.inf for g in guess]
for j, b in enumerate(bounds):
tries = 0
while (iguess[j] < b[0]) or (iguess[j] > b[1]):
iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j]
tries += 1
if tries > 100:
iguess[j] = b[0] + np.random.random() * (b[1] - b[0])
break
x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False,
bounds=bounds, args=(time, flux, errors, kernel),
maxfun=gmaxf)
log.info( % (i + 1, giter))
log.info( + x[2][].decode())
log.info( + % x[2][])
log.info( + % -x[1])
if kernel == :
log.info( + %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info( + %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info( + % x[0][2])
elif kernel == :
log.info( + %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info( + %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info( + % x[0][2])
log.info( + % x[0][3])
if -x[1] > llbest:
llbest = -x[1]
xbest = np.array(x[0])
return xbest | Optimizes the GP by training it on the current de-trended light curve.
Returns the white noise amplitude, red noise amplitude,
and red noise timescale.
:param array_like time: The time array
:param array_like flux: The flux array
:param array_like errors: The flux errors array
:param array_like mask: The indices to be masked when training the GP. \
Default `[]`
:param int giter: The number of iterations. Default 3
:param int gmaxf: The maximum number of function evaluations. Default 200
:param tuple guess: The guess to initialize the minimization with. \
Default :py:obj:`None` |
2,406 | def _is_attribute_property(name, klass):
try:
attributes = klass.getattr(name)
except astroid.NotFoundError:
return False
property_name = "{}.property".format(BUILTINS)
for attr in attributes:
if attr is astroid.Uninferable:
continue
try:
infered = next(attr.infer())
except astroid.InferenceError:
continue
if isinstance(infered, astroid.FunctionDef) and decorated_with_property(
infered
):
return True
if infered.pytype() == property_name:
return True
return False | Check if the given attribute *name* is a property
in the given *klass*.
It will look for `property` calls or for functions
with the given name, decorated by `property` or `property`
subclasses.
Returns ``True`` if the name is a property in the given klass,
``False`` otherwise. |
2,407 | def make_input(self):
all_files ={"ddkfile_" + str(n + 1): ddk for n, ddk in enumerate(self.ddk_filepaths)}
all_files.update({"wfkfile": self.wfk_filepath})
files_nml = {"FILES": all_files}
files= nmltostring(files_nml)
user_file = nmltostring(self.input.as_dict())
return files + user_file | Construct and write the input file of the calculation. |
2,408 | def _GetNormalizedTimestamp(self):
if self._normalized_timestamp is None:
if self._timestamp is not None:
self._normalized_timestamp = (
decimal.Decimal(self._timestamp) /
definitions.NANOSECONDS_PER_SECOND)
return self._normalized_timestamp | Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined. |
2,409 | def trace_walker(module):
for name, function in inspect.getmembers(module, inspect.isfunction):
yield None, function
for name, cls in inspect.getmembers(module, inspect.isclass):
yield cls, None
for name, method in inspect.getmembers(cls, inspect.ismethod):
yield cls, method
for name, function in inspect.getmembers(cls, inspect.isfunction):
yield cls, function
for name, accessor in inspect.getmembers(cls, lambda x: type(x) is property):
yield cls, accessor.fget
yield cls, accessor.fset
yield cls, accessor.fdel | Defines a generator used to walk into modules.
:param module: Module to walk.
:type module: ModuleType
:return: Class / Function / Method.
:rtype: object or object |
2,410 | def get_activations(self):
res = (self.added, self.removed)
self.added = set()
self.removed = set()
return res | Return a list of activations. |
2,411 | def groups(self):
if not self._groups:
self._groups = ComponentGroups(self.api_client)
return self._groups | Component groups
Special property which point to a :class:`~pylls.cachet.ComponentGroups`
instance for convenience. This instance is initialized on first call. |
2,412 | def read_requirements(path,
strict_bounds,
conda_format=False,
filter_names=None):
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
reqs = _filter_requirements(f.readlines(), filter_names=filter_names,
filter_sys_version=not conda_format)
if not strict_bounds:
reqs = map(_with_bounds, reqs)
if conda_format:
reqs = map(_conda_format, reqs)
return list(reqs) | Read a requirements.txt file, expressed as a path relative to Zipline root.
Returns requirements with the pinned versions as lower bounds
if `strict_bounds` is falsey. |
2,413 | def write(self, data, waitForResponse=True, timeout=5, parseError=True, writeTerm=, expectedResponseTermSeq=None):
self.log.debug(, data)
responseLines = super(GsmModem, self).write(data + writeTerm, waitForResponse=waitForResponse, timeout=timeout, expectedResponseTermSeq=expectedResponseTermSeq)
if self._writeWait > 0:
time.sleep(self._writeWait)
if waitForResponse:
cmdStatusLine = responseLines[-1]
if parseError:
if in cmdStatusLine:
cmErrorMatch = self.CM_ERROR_REGEX.match(cmdStatusLine)
if cmErrorMatch:
errorType = cmErrorMatch.group(1)
errorCode = int(cmErrorMatch.group(2))
if errorCode == 515 or errorCode == 14:
self._writeWait += 0.2
self.log.debug(, self._writeWait)
time.sleep(self._writeWait)
result = self.write(data, waitForResponse, timeout, parseError, writeTerm, expectedResponseTermSeq)
self.log.debug()
if errorCode == 515:
self._writeWait = 0.1
else:
self._writeWait = 0
return result
if errorType == :
raise CmeError(data, int(errorCode))
else:
raise CmsError(data, int(errorCode))
else:
raise CommandError(data)
elif cmdStatusLine == :
raise CommandError(data + .format(cmdStatusLine))
return responseLines | Write data to the modem.
This method adds the ``\\r\\n`` end-of-line sequence to the data parameter, and
writes it to the modem.
:param data: Command/data to be written to the modem
:type data: str
:param waitForResponse: Whether this method should block and return the response from the modem or not
:type waitForResponse: bool
:param timeout: Maximum amount of time in seconds to wait for a response from the modem
:type timeout: int
:param parseError: If True, a CommandError is raised if the modem responds with an error (otherwise the response is returned as-is)
:type parseError: bool
:param writeTerm: The terminating sequence to append to the written data
:type writeTerm: str
:param expectedResponseTermSeq: The expected terminating sequence that marks the end of the modem's response (defaults to ``\\r\\n``)
:type expectedResponseTermSeq: str
:raise CommandError: if the command returns an error (only if parseError parameter is True)
:raise TimeoutException: if no response to the command was received from the modem
:return: A list containing the response lines from the modem, or None if waitForResponse is False
:rtype: list |
2,414 | def count(self):
return LazyOpResult(
grizzly_impl.count(
self.expr,
self.weld_type
),
WeldInt(),
0
) | Summary
Returns:
TYPE: Description |
2,415 | def _connect_control(self, event, param, arg):
log.debug("Event: %s, Param: %s" % (event, param))
if event == CbEvent.EVENT_FATALDISCON:
self.fatal_disconnect_event(param)
elif event == CbEvent.EVENT_CONNECT:
self.connect_event()
elif event == CbEvent.EVENT_DISCONNECT:
self.disconnect_event() | Is the actual callback function for :meth:`init_hw_connect_control_ex`.
:param event:
Event (:data:`CbEvent.EVENT_CONNECT`, :data:`CbEvent.EVENT_DISCONNECT` or
:data:`CbEvent.EVENT_FATALDISCON`).
:param param: Additional parameter depending on the event.
- CbEvent.EVENT_CONNECT: always 0
- CbEvent.EVENT_DISCONNECT: always 0
- CbEvent.EVENT_FATALDISCON: USB-CAN-Handle of the disconnected module
:param arg: Additional parameter defined with :meth:`init_hardware_ex` (not used in this wrapper class). |
2,416 | def get_path_regex(self, path):
for regex, func in self._regex_map:
match = re.match(regex, path)
if match:
return func(match)
return None, None | Evaluate the registered path-alias regular expressions |
2,417 | def get_previous_request(rid):
request = None
broker_req = relation_get(attribute=, rid=rid,
unit=local_unit())
if broker_req:
request_data = json.loads(broker_req)
request = CephBrokerRq(api_version=request_data[],
request_id=request_data[])
request.set_ops(request_data[])
return request | Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request |
2,418 | def execute(self, arg_list):
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map) | Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list |
2,419 | def class_config_section(cls):
def c(s):
s = .join(wrap_paragraphs(s, 78))
return + s.replace(, )
breaker = + *78
s = "
lines = [breaker, s, breaker, ]
desc = cls.class_traits().get()
if desc:
desc = desc.default_value
else:
desc = getattr(cls, , )
if desc:
lines.append(c(desc))
lines.append()
parents = []
for parent in cls.mro():
if parent is not cls and issubclass(parent, Configurable) and \
parent.class_traits(config=True):
parents.append(parent)
if parents:
pstr = .join([ p.__name__ for p in parents ])
lines.append(c(%(cls.__name__, pstr)))
lines.append()
for name,trait in cls.class_traits(config=True).iteritems():
help = trait.get_metadata() or
lines.append(c(help))
lines.append(%(cls.__name__, name, trait.get_default_value()))
lines.append()
return .join(lines) | Get the config class config section |
2,420 | def get_all_entity_type_saved_searches(self, entitytype, **kwargs):
kwargs[] = True
if kwargs.get():
return self.get_all_entity_type_saved_searches_with_http_info(entitytype, **kwargs)
else:
(data) = self.get_all_entity_type_saved_searches_with_http_info(entitytype, **kwargs)
return data | Get all saved searches for a specific entity type for a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_entity_type_saved_searches(entitytype, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entitytype: (required)
:param int offset:
:param int limit:
:return: ResponseContainerPagedSavedSearch
If the method is called asynchronously,
returns the request thread. |
2,421 | def descendants(self, node):
self._ensure_parameters()
return CTEQuerySet(self.model, using=self._db, offset=node).exclude(pk=node.pk) | Returns a :class:`QuerySet` with all descendants for a given
:class:`CTENode` `node`.
:param node: the :class:`CTENode` whose descendants are required.
:returns: A :class:`QuerySet` with all descendants of the given
`node`. |
2,422 | def patch_object(obj, attr, value):
orig = getattr(obj, attr, no_value)
try:
setattr(obj, attr, value)
yield obj
finally:
try:
if orig is no_value:
delattr(obj, attr)
else:
setattr(obj, attr, orig)
except:
pass | Context manager that temporarily patches an object *obj* by replacing its attribute *attr* with
*value*. The original value is set again when the context is closed. |
2,423 | def load_mnist_dataset(mode=, one_hot=True):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=one_hot)
trX = mnist.train.images
trY = mnist.train.labels
vlX = mnist.validation.images
vlY = mnist.validation.labels
teX = mnist.test.images
teY = mnist.test.labels
if mode == :
return trX, trY, vlX, vlY, teX, teY
elif mode == :
return trX, vlX, teX | Load the MNIST handwritten digits dataset.
:param mode: 'supervised' or 'unsupervised' mode
:param one_hot: whether to get one hot encoded labels
:return: train, validation, test data:
for (X, y) if 'supervised',
for (X) if 'unsupervised' |
2,424 | def open(self, using=None, **kwargs):
return self._get_connection(using).indices.open(index=self._name, **kwargs) | Opens the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.open`` unchanged. |
2,425 | def x10_all_lights_off(self, housecode):
msg = X10Send.command_msg(housecode, X10_COMMAND_ALL_LIGHTS_OFF)
self.send_msg(msg)
self._x10_command_to_device(housecode, X10_COMMAND_ALL_LIGHTS_OFF, msg) | Send the X10 All Lights Off command. |
2,426 | def seek_end(fileobj, offset):
if offset < 0:
raise ValueError
if get_size(fileobj) < offset:
fileobj.seek(0, 0)
else:
fileobj.seek(-offset, 2) | Like fileobj.seek(-offset, 2), but will not try to go beyond the start
Needed since file objects from BytesIO will not raise IOError and
file objects from open() will raise IOError if going to a negative offset.
To make things easier for custom implementations, instead of allowing
both behaviors, we just don't do it.
Args:
fileobj (fileobj)
offset (int): how many bytes away from the end backwards to seek to
Raises:
IOError |
2,427 | def set_querier_mode(self, dpid, server_port):
self.dpid = dpid
self.server_port = server_port
if self._querier_thread:
hub.kill(self._querier_thread)
self._querier_thread = None | set the datapath to work as a querier. note that you can set
up only the one querier. when you called this method several
times, only the last one becomes effective. |
2,428 | def file_mtime(file_path):
if not os.path.isfile(file_path):
raise IOError( % file_path)
ut = subprocess.check_output([, , , ,
file_path]).strip()
return datetime.fromtimestamp(int(ut)) | Returns the file modified time. This is with regards to the last
modification the file has had in the droopescan repo, rather than actual
file modification time in the filesystem.
@param file_path: file path relative to the executable.
@return datetime.datetime object. |
2,429 | def async_batch_annotate_files(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "async_batch_annotate_files" not in self._inner_api_calls:
self._inner_api_calls[
"async_batch_annotate_files"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.async_batch_annotate_files,
default_retry=self._method_configs["AsyncBatchAnnotateFiles"].retry,
default_timeout=self._method_configs["AsyncBatchAnnotateFiles"].timeout,
client_info=self._client_info,
)
request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(requests=requests)
operation = self._inner_api_calls["async_batch_annotate_files"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
image_annotator_pb2.AsyncBatchAnnotateFilesResponse,
metadata_type=image_annotator_pb2.OperationMetadata,
) | Run asynchronous image detection and annotation for a list of generic
files, such as PDF files, which may contain multiple pages and multiple
images per page. Progress and results can be retrieved through the
``google.longrunning.Operations`` interface. ``Operation.metadata``
contains ``OperationMetadata`` (metadata). ``Operation.response``
contains ``AsyncBatchAnnotateFilesResponse`` (results).
Example:
>>> from google.cloud import vision_v1p4beta1
>>>
>>> client = vision_v1p4beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> response = client.async_batch_annotate_files(requests)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p4beta1.types.AsyncAnnotateFileRequest]]): Individual async file annotation requests for this batch.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p4beta1.types.AsyncAnnotateFileRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p4beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
2,430 | def set_index(self, index):
item = index.internalPointer()
note = item.internal_data()
self.content_lb.setText(note.content)
self.created_dte.setDateTime(dt_to_qdatetime(note.date_created))
self.updated_dte.setDateTime(dt_to_qdatetime(note.date_updated))
self.username_lb.setText(note.user.username) | Display the data of the given index
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None |
2,431 | def addField(self, field) :
if field.lower() in self.legend :
raise ValueError("%s is already in the legend" % field.lower())
self.legend[field.lower()] = len(self.legend)
if len(self.strLegend) > 0 :
self.strLegend += self.separator + field
else :
self.strLegend += field | add a filed to the legend |
2,432 | def get_log_level(args):
index = -1
log_level = None
if in args and args[]:
index = sys.argv.index(args[])
if args.get():
log_level =
if in sys.argv and sys.argv.index() < index:
sys.argv.remove()
elif in sys.argv and sys.argv.index() < index:
sys.argv.remove()
elif args.get():
log_level =
if in sys.argv and sys.argv.index() < index:
sys.argv.remove()
elif in sys.argv and sys.argv.index() < index:
sys.argv.remove()
elif args.get():
log_level = args[]
sys.argv.remove()
sys.argv.remove(log_level)
if log_level not in (None, , , , ):
raise exceptions.InvalidLogLevelError(log_level)
return getattr(logging, log_level) if log_level else None | Get the log level from the CLI arguments.
Removes logging arguments from sys.argv.
Args:
args: The parsed docopt arguments to be used to determine the logging
level.
Returns:
The correct log level based on the three CLI arguments given.
Raises:
ValueError: Raised if the given log level is not in the acceptable
list of values. |
2,433 | def _init_metadata(self):
TextAnswerFormRecord._init_metadata(self)
FilesAnswerFormRecord._init_metadata(self)
super(AnswerTextAndFilesMixin, self)._init_metadata() | stub |
2,434 | def register_class(self, class_type, component_scope=scope.InstancePerDependency, register_as=None):
registration = _ConstructorRegistration(class_type, component_scope())
self._register(class_type, registration, register_as) | Registers the given class for creation via its constructor.
:param class_type: The class type.
:param component_scope: The scope of the component, defaults to instance per dependency.
:param register_as: The types to register the class as, defaults to the given class_type. |
2,435 | def login(request, template_name=,
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ))
if request.method == "POST":
form = authentication_form(data=request.POST, request=request)
if form.is_valid():
netloc = urlparse(redirect_to)[1]
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
}
if extra_context is not None:
context.update(extra_context)
request.current_app = current_app
return TemplateResponse(request, template_name, context) | Displays the login form and handles the login action. |
2,436 | def prepare_static_data(self, data):
d = data.copy()
for f in self.get_fields():
if f[] and f[] in d:
d[f[]] = make_view_field(f, None, self.types_convert_map, self.fields_convert_map, d[f[]])[]
return d | If user defined static fields, then process them with visiable value |
2,437 | def GET_namespace_info( self, path_info, namespace_id ):
if not check_namespace(namespace_id):
return self._reply_json({: }, status_code=400)
blockstackd_url = get_blockstackd_url()
namespace_rec = blockstackd_client.get_namespace_record(namespace_id, hostport=blockstackd_url)
if json_is_error(namespace_rec):
status_code = namespace_rec.get(, 502)
return self._reply_json({: namespace_rec[]}, status_code=status_code)
self._reply_json(namespace_rec)
return | Look up a namespace's info
Reply information about a namespace
Reply 404 if the namespace doesn't exist
Reply 502 for any error in talking to the blocksatck server |
2,438 | def _parse_bool(value):
if isinstance(value, bool):
return value
elif isinstance(value, str):
if value == :
return True
elif value == :
return False
raise Exception("Value %s is not boolean." % value) | Convert ``string`` or ``bool`` to ``bool``. |
2,439 | def remove(self):
url = self.getUrl()
for retry in range(3):
try:
self.logger.debug("Making DELETE request to slick at url %s", url)
r = requests.delete(url)
self.logger.debug("Request returned status code %d", r.status_code)
if r.status_code is 200:
return None
else:
self.logger.debug("Body of what slick returned: %s", r.text)
except BaseException as error:
self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info())
raise SlickCommunicationError(
"Tried 3 times to request data from slick at url %s without a successful status code.", url) | Remove or delete the specified object from slick. You specify which one you want by providing the id as
a parameter to the parent object, using it as a function. Example:
slick.projects("4fd8cd95e4b0ee7ba54b9885").remove() |
2,440 | def text_remove_empty_lines(text):
lines = [ line.rstrip() for line in text.splitlines() if line.strip() ]
return "\n".join(lines) | Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace |
2,441 | def can_infect(self, event):
if event.from_stop_I != self.stop_I:
return False
if not self.has_been_visited():
return False
else:
time_sep = event.dep_time_ut-self.get_min_visit_time()
if (time_sep >= self.min_transfer_time) or (event.trip_I == -1 and time_sep >= 0):
return True
else:
for visit in self.visit_events:
if (event.trip_I == visit.trip_I) and (time_sep >= 0):
return True
return False | Whether the spreading stop can infect using this event. |
2,442 | def rank2d(X, y=None, ax=None, algorithm=, features=None,
show_feature_names=True, colormap=, **kwargs):
visualizer = Rank2D(ax, algorithm, features, colormap, show_feature_names,
**kwargs)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
return visualizer.ax | Displays pairwise comparisons of features with the algorithm and ranks
them in a lower-left triangle heatmap plot.
This helper function is a quick wrapper to utilize the Rank2D Visualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib axes
the axis to plot the figure on.
algorithm : one of {pearson, covariance, spearman, kendalltau}
the ranking algorithm to use, default is Pearson correlation.
features : list
A list of feature names to use.
If a DataFrame is passed to fit and features is None, feature
names are selected as the columns of the DataFrame.
show_feature_names : boolean, default: True
If True, the feature names are used to label the axis ticks in the
plot.
colormap : string or cmap
optional string or matplotlib cmap to colorize lines
Use either color to colorize the lines on a per class basis or
colormap to color them on a continuous scale.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on. |
2,443 | def is_ternary(self, keyword):
return keyword in {
:set([, ]),
:set([, ]),
:set([])
}.get(self.keyword, []) | return true if the given keyword is a ternary keyword
for this ControlLine |
2,444 | def get_cust_cols(path):
required_keys = ["title", "id", "sType", "visible"]
with open(path, ) as f:
try:
cust_cols = ast.literal_eval(f.read())
except Exception as err:
sys.stderr.write("Invalid custom columns file: {}\n".format(path))
sys.stderr.write("{}\n".format(err))
sys.exit(1)
for col in cust_cols:
for required_key in required_keys:
if required_key not in col:
sys.stderr.write("Missing required key in custom "
"column {}\n".format(required_key, col))
sys.exit(1)
if "jsonxs" not in col and "tpl" not in col:
sys.stderr.write("You need to specify or "
"for custom column {}\n".format(col))
sys.exit(1)
return cust_cols | Load custom column definitions. |
2,445 | def preprocess(self, image, image_format):
save_kwargs = {: image_format}
if hasattr(image, ):
exif_datadict = image._getexif()
if exif_datadict is not None:
exif = dict(exif_datadict.items())
orientation = exif.get(EXIF_ORIENTATION_KEY, None)
if orientation == 3:
image = image.transpose(Image.ROTATE_180)
elif orientation == 6:
image = image.transpose(Image.ROTATE_270)
elif orientation == 8:
image = image.transpose(Image.ROTATE_90)
save_kwargs[] = image.info.get()
if hasattr(self, % image_format):
image, addl_save_kwargs = getattr(
self,
% image_format
)(image=image)
save_kwargs.update(addl_save_kwargs)
return image, save_kwargs | Preprocess an image.
An API hook for image pre-processing. Calls any image format specific
pre-processors (if defined). I.E. If `image_format` is 'JPEG', this
method will look for a method named `preprocess_JPEG`, if found
`image` will be passed to it.
Arguments:
* `image`: a PIL Image instance
* `image_format`: str, a valid PIL format (i.e. 'JPEG' or 'GIF')
Subclasses should return a 2-tuple:
* [0]: A PIL Image instance.
* [1]: A dictionary of additional keyword arguments to be used
when the instance is saved. If no additional keyword
arguments, return an empty dict ({}). |
2,446 | def debug_shell(user_ns, user_global_ns, traceback=None, execWrapper=None):
ipshell = None
try:
import IPython
have_ipython = True
except ImportError:
have_ipython = False
if not ipshell and traceback and have_ipython:
try:
from IPython.core.debugger import Pdb
from IPython.terminal.debugger import TerminalPdb
from IPython.terminal.ipapp import TerminalIPythonApp
ipapp = TerminalIPythonApp.instance()
ipapp.interact = False
ipapp.initialize(argv=[])
def_colors = ipapp.shell.colors
pdb_obj = TerminalPdb(def_colors)
pdb_obj.botframe = None
def ipshell():
pdb_obj.interaction(None, traceback=traceback)
except Exception:
print("IPython Pdb exception:")
better_exchook(*sys.exc_info(), autodebugshell=False)
if not ipshell and have_ipython:
try:
import IPython
import IPython.terminal.embed
class DummyMod(object):
module = DummyMod()
module.__dict__ = user_global_ns
module.__name__ = "_DummyMod"
if "__name__" not in user_ns:
user_ns = user_ns.copy()
user_ns["__name__"] = "_DummyUserNsMod"
ipshell = IPython.terminal.embed.InteractiveShellEmbed.instance(
user_ns=user_ns, user_module=module)
except Exception:
print("IPython not available:")
better_exchook(*sys.exc_info(), autodebugshell=False)
else:
if execWrapper:
old = ipshell.run_code
ipshell.run_code = lambda code: execWrapper(lambda: old(code))
if ipshell:
ipshell()
else:
print("Use simple debug shell:")
if traceback:
import pdb
pdb.post_mortem(traceback)
else:
simple_debug_shell(user_global_ns, user_ns) | Spawns some interactive shell. Tries to use IPython if available.
Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`.
:param dict[str] user_ns:
:param dict[str] user_global_ns:
:param traceback:
:param execWrapper:
:return: nothing |
2,447 | def from_gaussian_draw(cls,pst,cov,num_reals=1,use_homegrown=True,group_chunks=False,
fill_fixed=True,enforce_bounds=False):
real_names = np.arange(num_reals,dtype=np.int64)
li = pst.parameter_data.partrans == "log"
vals = pst.parameter_data.parval1.copy()
vals[li] = vals.loc[li].apply(np.log10)
if list(vals.index.values) != cov.row_names:
common_names = get_common_elements(vals.index.values,
cov.row_names)
if len(common_names) == 0:
raise Exception("ParameterEnsemble::from_gaussian_draw() error: cov and pst share no common names")
vals = vals.loc[common_names]
cov = cov.get(common_names)
else:
common_names = cov.row_names
li = pst.parameter_data.partrans.loc[common_names] == "log"
if cov.isdiagonal:
arr = np.zeros((num_reals,len(vals)))
stds = {pname:std for pname,std in zip(common_names,np.sqrt(cov.x.flatten()))}
means = {pname:val for pname,val in zip(common_names,vals)}
arr = np.random.randn(num_reals,len(common_names))
adj_pars = set(pst.adj_par_names)
for i,pname in enumerate(common_names):
if pname in adj_pars:
arr[:,i] = (arr[:,i] * stds[pname]) + means[pname]
else:
arr[:,i] = means[pname]
df = pd.DataFrame(data=arr,columns=common_names,index=real_names)
else:
if use_homegrown:
print("making full cov draws with home-grown goodness")
if group_chunks:
par_cov = pst.parameter_data.loc[cov.names,:]
par_cov.loc[:,"idxs"] = np.arange(cov.shape[0])
pargps = par_cov.pargp.unique()
reals = np.zeros((num_reals,cov.shape[0]))
for ipg,pargp in enumerate(pargps):
pnames = list(par_cov.loc[par_cov.pargp==pargp,"parnme"])
idxs = par_cov.loc[par_cov.pargp == pargp, "idxs"]
s,e = idxs[0],idxs[-1]
snv = np.random.randn(num_reals, len(pnames))
cov_pg = cov.get(pnames)
if len(pnames) == 1:
std = np.sqrt(cov_pg.x)
reals[:,idxs] = vals[pnames].values[0] + (snv * std)
else:
try:
cov_pg.inv
except:
covname = "trouble_{0}.cov".format(pargp)
cov_pg.to_ascii(covname)
raise Exception("error inverting cov for par group ,"+\
"saved trouble cov to {1}".
format(pargp,covname))
v, w = np.linalg.eigh(cov_pg.as_2d)
for i in range(v.shape[0]):
if v[i] > 1.0e-10:
pass
else:
print("near zero eigen value found",v[i],\
"at index",i," of ",v.shape[0])
v[i] = 0.0
vsqrt = np.sqrt(v)
vsqrt[i:] = 0.0
v = np.diag(vsqrt)
a = np.dot(w, v)
pg_vals = vals[pnames]
for i in range(num_reals):
reals[i,idxs] = pg_vals + np.dot(a,snv[i,:])
else:
snv = np.random.randn(num_reals, cov.shape[0])
v, w = np.linalg.eigh(cov.as_2d)
for i in range(v.shape[0]):
if v[i] > 1.0e-10:
pass
else:
print("near zero eigen value found", v[i], \
"at index", i, " of ", v.shape[0])
v[i] = 0.0
a = np.dot(w, np.sqrt(np.diag(v)))
reals = []
for vec in snv:
real = vals + np.dot(a, vec)
reals.append(real)
df = pd.DataFrame(reals, columns=common_names, index=real_names)
else:
df = pd.DataFrame(data=np.random.multivariate_normal(vals, cov.as_2d,num_reals),
columns = common_names,index=real_names)
df.loc[:,li] = 10.0**df.loc[:,li]
if fill_fixed:
par = pst.parameter_data
fixed_vals = par.loc[par.partrans.apply(lambda x: x in ["fixed","tied"]),"parval1"]
for fname,fval in zip(fixed_vals.index,fixed_vals.values):
df.loc[:,fname] = fval
new_pe = cls.from_dataframe(pst=pst,df=df)
if enforce_bounds:
new_pe.enforce()
return new_pe | instantiate a parameter ensemble from a covariance matrix
Parameters
----------
pst : pyemu.Pst
a control file instance
cov : (pyemu.Cov)
covariance matrix to use for drawing
num_reals : int
number of realizations to generate
use_homegrown : bool
flag to use home-grown full cov draws...much faster
than numpy...
group_chunks : bool
flag to break up draws by par groups. Only applies
to homegrown, full cov case. Default is False
fill_fixed : bool
flag to fill in fixed parameters from the pst into the
ensemble using the parval1 from the pst. Default is True
enforce_bounds : bool
flag to enforce parameter bounds from the pst. realized
parameter values that violate bounds are simply changed to the
value of the violated bound. Default is False
Returns
-------
ParameterEnsemble : ParameterEnsemble |
2,448 | def limits(self,x1,x2,y1,y2):
import math
self.x1=x1
self.x2=x2
self.y1=y1
self.y2=y2
self.xscale=(self.cx2-self.cx1)/(self.x2-self.x1)
self.yscale=(self.cy2-self.cy1)/(self.y2-self.y1)
ra1=self.x1
ra2=self.x2
dec1=self.y1
dec2=self.y2
(sx1,sy2)=self.p2c((ra1,dec1))
(sx2,sy1)=self.p2c((ra2,dec2))
self.config(scrollregion=(sx1-self.lgutter,sy1+self.bgutter,sx2+self.rgutter,sy2-self.tgutter)) | Set the coordinate boundaries of plot |
2,449 | def neighbors(self, subid, params=None):
params = update_params(params, {: subid})
return self.request(, params, ) | v1/server/neighbors
GET - account
Determine what other subscriptions are hosted on the same physical
host as a given subscription.
Link: https://www.vultr.com/api/#server_neighbors |
2,450 | def column_lists_equal(a: List[Column], b: List[Column]) -> bool:
n = len(a)
if len(b) != n:
return False
for i in range(n):
if not columns_equal(a[i], b[i]):
log.debug("Mismatch: {!r} != {!r}", a[i], b[i])
return False
return True | Are all columns in list ``a`` equal to their counterparts in list ``b``,
as per :func:`columns_equal`? |
2,451 | def parse(self, node):
self._attrs = {}
vals = []
yielded = False
for x in self._read_parts(node):
if isinstance(x, Field):
yielded = True
x.attrs = self._attrs
yield x
else:
vals.append(ustr(x).strip())
joined = .join([ x for x in vals if x ])
if joined:
yielded = True
yield Field(node, guess_type(joined), self._attrs)
if not yielded:
yield Field(node, "", self._attrs) | Return generator yielding Field objects for a given node |
2,452 | def activatewindow(self, window_name):
window_handle = self._get_window_handle(window_name)
self._grabfocus(window_handle)
return 1 | Activate window.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@return: 1 on success.
@rtype: integer |
2,453 | def _listen_inbox_messages(self):
inbox_queue = Queue(maxsize=self._n_jobs * 4)
threads = []
try:
for i in range(self._n_jobs):
t = BotQueueWorker(name=.format(i),
jobs=inbox_queue,
target=self._process_inbox_message)
t.start()
self._threads.append(t)
for message in self._reddit.inbox.stream():
if self._stop:
self._do_stop(inbox_queue, threads)
break
inbox_queue.put(message)
self.log.debug()
except Exception as e:
self._do_stop(inbox_queue, threads)
self.log.error()
self.log.error(str(e))
self.log.error()
time.sleep(10 * 60)
self._listen_inbox_messages() | Start listening to messages, using a separate thread. |
2,454 | def parse(self, text, noprefix=False):
res = self.match(text, noprefix)
if res:
r = res[]
p = res[]
d = {: 0, : 0, : 0}
if in p and p[] == True:
d[] = datetime.datetime.now().year
for k, v in list(r.items()):
d[k] = int(v)
dt = datetime.datetime(**d)
return dt
return None | Parse date and time from given date string.
:param text:
Any human readable string
:type date_string: str|unicode
:param noprefix:
If set True than doesn't use prefix based date patterns filtering settings
:type noprefix: bool
:return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None
:rtype: :class:`datetime <datetime.datetime>`. |
2,455 | def vlan_classifier_group_groupid(self, **kwargs):
config = ET.Element("config")
vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan")
classifier = ET.SubElement(vlan, "classifier")
group = ET.SubElement(classifier, "group")
oper_key = ET.SubElement(group, "oper")
oper_key.text = kwargs.pop()
rule_name_key = ET.SubElement(group, "rule-name")
rule_name_key.text = kwargs.pop()
ruleid_key = ET.SubElement(group, "ruleid")
ruleid_key.text = kwargs.pop()
groupid = ET.SubElement(group, "groupid")
groupid.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
2,456 | def get_message(self, id):
url = self._base_url + "/3/message/{0}".format(id)
resp = self._send_request(url)
return Message(resp, self) | Return a Message object for given id.
:param id: The id of the message object to return. |
2,457 | def select_from_array(cls, array, identifier):
base_array = np.zeros(array.shape)
array_coords = np.where(array == identifier)
base_array[array_coords] = 1
return cls(base_array) | Return a region from a numpy array.
:param array: :class:`numpy.ndarray`
:param identifier: value representing the region to select in the array
:returns: :class:`jicimagelib.region.Region` |
2,458 | def online(note, github_repository, github_username):
callbacks.git_installed()
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
" requires a git repository in order to follow "
"the current branch.travis.yml.travis.ymlgitcommit-mgitpush--set-upstreamorigin', repo.active_branch.name]
) | Upload the repository to GitHub and enable testing on Travis CI. |
2,459 | def until_state(self, state, timeout=None):
if state not in self._valid_states:
raise ValueError(
.format(self._valid_states, state))
if state != self._state:
if timeout:
return with_timeout(self._ioloop.time() + timeout,
self._waiting_futures[state],
self._ioloop)
else:
return self._waiting_futures[state]
else:
f = tornado_Future()
f.set_result(True)
return f | Return a tornado Future that will resolve when the requested state is set |
2,460 | def search(context, keywords, module, raw, kind):
logging.info(_())
sense = context.obj[]
func = sense.query_names if module else sense.query_info
none = True
for keyword in keywords:
output = func(keyword, raw, kind)
if output:
none = False
print(output)
else:
logging.warning(_(), keyword)
sys.exit(1 if none else 0) | Query Windows identifiers and locations.
Windows database must be prepared before using this. |
2,461 | def run_executable(repo, args, includes):
mgr = plugins_get_mgr()
repomgr = mgr.get(what=, name=)
platform_metadata = repomgr.get_metadata()
print("Obtaining Commit Information")
(executable, commiturl) = \
find_executable_commitpath(repo, args)
tmpdir = tempfile.mkdtemp()
print("Running the command")
strace_filename = os.path.join(tmpdir,)
cmd = ["strace.py", "-f", "-o", strace_filename,
"-s", "1024", "-q", "--"] + args
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
stdout = os.path.join(tmpdir, )
with open(stdout, ) as fd:
fd.write(out.decode())
stderr = os.path.join(tmpdir, )
with open(stderr, ) as fd:
fd.write(err.decode())
files = extract_files(strace_filename, includes)
execution_metadata = {
: executable,
: commiturl,
: args,
}
execution_metadata.update(platform_metadata)
for i in range(len(files)):
files[i][] = execution_metadata
return files | Run the executable and capture the input and output... |
2,462 | def _get_video_id(self, url=None):
if url:
html_data = self.http.request("get", url).text
else:
html_data = self.get_urldata()
html_data = self.get_urldata()
match = re.search(r, html_data)
if match:
return match.group(1)
match = re.search(r, html_data)
if match:
return match.group(1)
match = re.search(r, html_data)
if match:
return match.group(1)
clips = False
slug = None
match = re.search(, self.get_urldata())
if match:
jansson = json.loads(match.group(1))
if "seasonNumberOrVideoId" in jansson:
season = jansson["seasonNumberOrVideoId"]
match = re.search(r"\w-(\d+)$", season)
if match:
season = match.group(1)
else:
match = self._conentpage(self.get_urldata())
if match:
janson2 = json.loads(match.group(1))
if janson2["formatPage"]["format"]:
season = janson2["formatPage"]["format"]["seasonNumber"]
return janson2["formatPage"]["format"]["videos"][str(season)]["program"][0]["id"]
return None
if "videoIdOrEpisodeNumber" in jansson:
videp = jansson["videoIdOrEpisodeNumber"]
match = re.search(r, videp)
if match:
episodenr = match.group(2)
else:
episodenr = videp
clips = True
match = re.search(r, season)
if match:
season = match.group(2)
else:
match = re.search(r, self.url)
if match:
episodenr = match.group(2)
else:
episodenr = season
if "slug" in jansson:
slug = jansson["slug"]
if clips:
return episodenr
else:
match = self._conentpage(self.get_urldata())
if match:
janson = json.loads(match.group(1))
for i in janson["formatPage"]["format"]["videos"].keys():
if "program" in janson["formatPage"]["format"]["videos"][str(i)]:
for n in janson["formatPage"]["format"]["videos"][i]["program"]:
if str(n["episodeNumber"]) and int(episodenr) == n["episodeNumber"] and int(season) == n["seasonNumber"]:
if slug is None or slug == n["formatSlug"]:
return n["id"]
elif n["id"] == episodenr:
return episodenr
parse = urlparse(self.url)
match = re.search(r, parse.path)
if match:
return match.group(1)
match = re.search(r, html_data)
if match:
return match.group(1)
match = re.search(r, html_data)
if match:
return match.group(1).split("/")[-2]
return None | Extract video id. It will try to avoid making an HTTP request
if it can find the ID in the URL, but otherwise it will try
to scrape it from the HTML document. Returns None in case it's
unable to extract the ID at all. |
2,463 | def _apply_decorator_to_methods(cls, decorator):
for method in cls.methods:
method_name = method.lower()
decorated_method_func = decorator(getattr(cls, method_name))
setattr(cls, method_name, decorated_method_func) | This helper can apply a given decorator to all methods on the current
Resource.
NOTE: In contrast to ``Resource.method_decorators``, which has a
similar use-case, this method applies decorators directly and override
methods in-place, while the decorators listed in
``Resource.method_decorators`` are applied on every request which is
quite a waste of resources. |
2,464 | def _get_handler_set(cls, request, fail_enum, header_proto=None):
added = set()
handlers = []
for controls in request.sorting:
control_bytes = controls.SerializeToString()
if control_bytes not in added:
added.add(control_bytes)
handlers.append(
cls._ValueHandler(controls, fail_enum, header_proto))
return handlers | Goes through the list of ClientSortControls and returns a list of
unique _ValueHandlers. Maintains order, but drops ClientSortControls
that have already appeared to help prevent spamming. |
2,465 | def save_to_cache(dxobject):
mainfile
if dxpy.JOB_ID is None:
raise DXError()
if not in os.environ:
raise DXError()
dxobject.clone(os.environ.get()) | :param dxobject: a dxpy object handler for an object to save to the cache
:raises: :exc:`~dxpy.exceptions.DXError` if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables
Clones the given object to the project cache.
Example::
@dxpy.entry_point('main')
def main(*args, **kwargs):
x = load_from_cache(name="Indexed genome", classname='file')
if x is None:
x = compute_result(*args)
save_to_cache(x) |
2,466 | def get_submission(self, submissionid, user_check=True):
sub = self._database.submissions.find_one({: ObjectId(submissionid)})
if user_check and not self.user_is_submission_owner(sub):
return None
return sub | Get a submission from the database |
2,467 | def before(self, callback: Union[Callable, str]) -> "Control":
if isinstance(callback, Control):
callback = callback._before
self._before = callback
return self | Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing). |
2,468 | def print_graph(self, format=None, output=sys.stdout, depth=0, **kwargs):
graph = self.as_graph(depth=depth)
graph.print(format=format, output=output, **kwargs) | Print the graph for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write.
depth (int): depth of the graph. |
2,469 | def i2c_write(self, address, *args):
data = [address, self.I2C_WRITE]
for item in args:
data.append(item & 0x7f)
data.append((item >> 7) & 0x7f)
self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data) | Write data to an i2c device.
:param address: i2c device address
:param args: A variable number of bytes to be sent to the device |
2,470 | def list_namespaced_config_map(self, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.list_namespaced_config_map_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_config_map_with_http_info(namespace, **kwargs)
return data | list or watch objects of kind ConfigMap
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_config_map(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ConfigMapList
If the method is called asynchronously,
returns the request thread. |
2,471 | def on_epoch_end(self, epoch, **kwargs:Any)->None:
"Compare the value monitored to its best and maybe reduce lr."
current = self.get_monitor_value()
if current is None: return
if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0
else:
self.wait += 1
if self.wait > self.patience:
self.opt.lr *= self.factor
self.wait = 0
print(f) | Compare the value monitored to its best and maybe reduce lr. |
2,472 | def has_device_info(self, key):
if _debug: DeviceInfoCache._debug("has_device_info %r", key)
return key in self.cache | Return true iff cache has information about the device. |
2,473 | def _maybe_limit_chromosomes(data):
std_chroms = []
prob_chroms = []
noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data)
for contig in ref.file_contigs(dd.get_ref_file(data)):
if contig.name.find(":") > 0 or (noalt_calling and not chromhacks.is_nonalt(contig.name)):
prob_chroms.append(contig.name)
else:
std_chroms.append(contig.name)
if len(prob_chroms) > 0:
return std_chroms
else:
return [] | Potentially limit chromosomes to avoid problematically named HLA contigs.
HLAs have ':' characters in them which confuse downstream processing. If
we have no problematic chromosomes we don't limit anything. |
2,474 | def SoS_exec(script: str, _dict: dict = None,
return_result: bool = True) -> None:
if _dict is None:
_dict = env.sos_dict.dict()
if not return_result:
exec(
compile(script, filename=stmtHash.hash(script), mode=), _dict)
return None
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return
if isinstance(stmts[-1], ast.Expr):
if len(stmts) > 1:
exec(
compile(
ast.Module(body=stmts[:-1]),
filename=stmtHash.hash(script),
mode="exec"), _dict)
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval"), _dict)
else:
exec(
compile(script, filename=stmtHash.hash(script), mode=),
_dict)
res = None
except SyntaxError as e:
raise SyntaxError(f"Invalid code {script}: {e}")
return res | Execute a statement. |
2,475 | def extract_arguments(frame):
arguments = ([], None, None)
try:
source = textwrap.dedent("".join(inspect.getsourcelines(frame)[0]).replace("\\\n", ""))
except (IOError, TypeError) as error:
return arguments
try:
node = ast.parse(source)
except:
return arguments
if not node.body:
return arguments
node = node.body[0]
if not isinstance(node, ast.FunctionDef):
return arguments
return [arg.id for arg in node.args.args], node.args.vararg, node.args.kwarg | Extracts the arguments from given frame.
:param frame: Frame.
:type frame: object
:return: Arguments.
:rtype: tuple |
2,476 | def save_retinotopy_cache(sdir, sid, hemi, props, alignment=, overwrite=False):
s retinotopy cache from the given properties. The first argument is the
subject directory).
_retinotopy_cacheretinotopy_cacheretinotopy', v % (h, alignment))
for (k,v) in six.iteritems(_retinotopy_cache_tr[htype])}
for (p,fl) in six.iteritems(files):
if p not in props or (not overwrite and os.path.exists(fl)): continue
p = np.asarray(props[p])
if np.issubdtype(p.dtype, np.floating): p = np.asarray(p, np.float32)
dr = os.path.split(os.path.abspath(fl))[0]
if not os.path.isdir(dr): os.makedirs(os.path.abspath(dr), 0o755)
nyio.save(fl, p) | Saves the subject's retinotopy cache from the given properties. The first argument is the
subject's directory (not the subjects' directory). |
2,477 | def delete_event(self, id, **data):
return self.delete("/events/{0}/".format(id), data=data) | DELETE /events/:id/
Deletes an event if the delete is permitted. In order for a delete to be permitted, there must be no pending or
completed orders. Returns a boolean indicating success or failure of the delete. |
2,478 | def minimum_required(version):
def _minimum_required(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if list(self.version) < list(version):
raise errors.JLinkException( % version)
return func(self, *args, **kwargs)
return wrapper
return _minimum_required | Decorator to specify the minimum SDK version required.
Args:
version (str): valid version string
Returns:
A decorator function. |
2,479 | def _copy_to_configdir(items, out_dir, args):
out = []
for item in items:
ped_file = tz.get_in(["metadata", "ped"], item)
if ped_file and os.path.exists(ped_file):
ped_config_file = os.path.join(out_dir, "config", os.path.basename(ped_file))
if not os.path.exists(ped_config_file):
shutil.copy(ped_file, ped_config_file)
item["metadata"]["ped"] = ped_config_file
out.append(item)
if hasattr(args, "systemconfig") and args.systemconfig:
shutil.copy(args.systemconfig, os.path.join(out_dir, "config", os.path.basename(args.systemconfig)))
return out | Copy configuration files like PED inputs to working config directory. |
2,480 | def write_dict_to_yaml(dictionary, path, **kwargs):
with open(path, ) as f:
yaml.dump(dictionary, f, indent=4, **kwargs) | Writes a dictionary to a yaml file
:param dictionary: the dictionary to be written
:param path: the absolute path of the target yaml file
:param kwargs: optional additional parameters for dumper |
2,481 | def _restore_constructor(self, cls):
cls.__init__ = self._observers[cls].init
del self._observers[cls] | Restore the original constructor, lose track of class. |
2,482 | def _compute_forearc_backarc_term(self, C, sites, dists):
f_faba = np.zeros_like(dists.rhypo)
max_dist = dists.rhypo[sites.backarc]
max_dist[max_dist < 85.0] = 85.0
f_faba[sites.backarc] = C[] +\
(C[] * np.log(max_dist / 40.0))
return f_faba | Computes the forearc/backarc scaling term given by equation (4). |
2,483 | def as_xml(self,parent):
n=parent.newChild(None,"N",None)
n.newTextChild(None,"FAMILY",to_utf8(self.family))
n.newTextChild(None,"GIVEN",to_utf8(self.given))
n.newTextChild(None,"MIDDLE",to_utf8(self.middle))
n.newTextChild(None,"PREFIX",to_utf8(self.prefix))
n.newTextChild(None,"SUFFIX",to_utf8(self.suffix))
return n | Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode` |
2,484 | def clip_box(dataset, bounds=None, invert=True, factor=0.35):
if bounds is None:
def _get_quarter(dmin, dmax):
return dmax - ((dmax - dmin) * factor)
xmin, xmax, ymin, ymax, zmin, zmax = dataset.bounds
xmin = _get_quarter(xmin, xmax)
ymin = _get_quarter(ymin, ymax)
zmin = _get_quarter(zmin, zmax)
bounds = [xmin, xmax, ymin, ymax, zmin, zmax]
if isinstance(bounds, (float, int)):
bounds = [bounds, bounds, bounds]
if len(bounds) == 3:
xmin, xmax, ymin, ymax, zmin, zmax = dataset.bounds
bounds = (xmin,xmin+bounds[0], ymin,ymin+bounds[1], zmin,zmin+bounds[2])
if not isinstance(bounds, collections.Iterable) or len(bounds) != 6:
raise AssertionError()
xmin, xmax, ymin, ymax, zmin, zmax = bounds
alg = vtk.vtkBoxClipDataSet()
alg.SetInputDataObject(dataset)
alg.SetBoxClip(xmin, xmax, ymin, ymax, zmin, zmax)
port = 0
if invert:
port = 1
alg.GenerateClippedOutputOn()
alg.Update()
return _get_output(alg, oport=port) | Clips a dataset by a bounding box defined by the bounds. If no bounds
are given, a corner of the dataset bounds will be removed.
Parameters
----------
bounds : tuple(float)
Length 6 iterable of floats: (xmin, xmax, ymin, ymax, zmin, zmax)
invert : bool
Flag on whether to flip/invert the clip
factor : float, optional
If bounds are not given this is the factor along each axis to
extract the default box. |
2,485 | def checkout_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES,
branch=None, depth=None):
tmpdir = tempfile.mkdtemp()
target_dir = target_dir or os.path.join(tmpdir, "repo")
try:
yield clone_git_repo(git_url, target_dir, commit, retry_times, branch, depth)
finally:
shutil.rmtree(tmpdir) | clone provided git repo to target_dir, optionally checkout provided commit
yield the ClonedRepoData and delete the repo when finished
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:param branch: str, optional branch of the commit, required if depth is provided
:param depth: int, optional expected depth
:return: str, int, commit ID of HEAD |
2,486 | def union(self, other):
if not self.is_valid_range(other):
msg = "Unsupported type to test for union "
raise TypeError(msg.format(other))
if not self:
return other
elif not other:
return self
if self < other:
a, b = self, other
else:
a, b = other, self
if (a.upper < b.lower or a.upper == b.lower and not
a.upper_inc and not b.lower_inc) and not a.adjacent(b):
raise ValueError("Ranges must be either adjacent or overlapping")
if a.upper == b.upper:
upper = a.upper
upper_inc = a.upper_inc or b.upper_inc
elif a.upper < b.upper:
upper = b.upper
upper_inc = b.upper_inc
else:
upper = a.upper
upper_inc = a.upper_inc
return self.__class__(a.lower, upper, a.lower_inc, upper_inc) | Merges this range with a given range.
>>> intrange(1, 5).union(intrange(5, 10))
intrange([1,10))
>>> intrange(1, 10).union(intrange(5, 15))
intrange([1,15))
Two ranges can not be merged if the resulting range would be split in
two. This happens when the two sets are neither adjacent nor overlaps.
>>> intrange(1, 5).union(intrange(10, 15))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Ranges must be either adjacent or overlapping
This does not modify the range in place.
This is the same as the ``+`` operator for two ranges in PostgreSQL.
:param other: Range to merge with.
:return: A new range that is the union of this and `other`.
:raises ValueError: If `other` can not be merged with this range. |
2,487 | def notify(request):
s runserver
command to reload the source code, to take those changes into account.
::templatemediasrcmtime': resource.mtime}
for resource in updates
]))
return response | This view gets a POST request from the Javascript part of the
AutoreloadPanel that contains a body that looks like::
template=/full/path/to/template.html&template=/another/template.eml:123456789&
media=/static/url/to/a/file:133456780&media=http://media.localhost.local/base.css
It is a list of template paths and a list of URLs that are part of the
static/media directories of the project. The filename might be followed by
a unix-epoch timestamp of the last modified date, seperated by a colon.
The view then blocks the response as long until one of the specified files
has a modified-time that is newer than the specified timestamp. It will
return a line seperated list of those changed files.
The view might also return with an empty response and status 204 (No
Content) if the source code that the development server runs was modified.
This is needed to free the current thread and allow django's runserver
command to reload the source code, to take those changes into account. |
2,488 | def insert(conn, qualified_name: str, column_names, records):
query = create_insert_statement(qualified_name, column_names)
with conn:
with conn.cursor(cursor_factory=NamedTupleCursor) as cursor:
for record in records:
cursor.execute(query, record) | Insert a collection of namedtuple records. |
2,489 | def post(self, *messages):
url = "queues/%s/messages" % self.name
msgs = [{: msg} if isinstance(msg, basestring) else msg
for msg in messages]
data = json.dumps({: msgs})
result = self.client.post(url=url, body=data,
headers={: })
return result[] | Executes an HTTP request to create message on the queue.
Creates queue if not existed.
Arguments:
messages -- An array of messages to be added to the queue. |
2,490 | def withSize(cls, minimum, maximum):
class X(cls):
subtypeSpec = cls.subtypeSpec + constraint.ValueSizeConstraint(
minimum, maximum)
X.__name__ = cls.__name__
return X | Creates a subclass with value size constraint. |
2,491 | def create_prj_model(self, ):
prjs = djadapter.projects.all()
rootdata = treemodel.ListItemData([, , ])
prjroot = treemodel.TreeItem(rootdata)
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, prjroot)
prjmodel = treemodel.TreeModel(prjroot)
return prjmodel | Create and return a tree model that represents a list of projects
:returns: the creeated model
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None |
2,492 | def neo(graph: BELGraph, connection: str, password: str):
import py2neo
neo_graph = py2neo.Graph(connection, password=password)
to_neo4j(graph, neo_graph) | Upload to neo4j. |
2,493 | def configure (command = None, condition = None, options = None):
rc_type = feature.get_values(, options)
if rc_type:
assert(len(rc_type) == 1)
rc_type = rc_type[0]
if command and condition and rc_type:
flags(, , condition, command)
flags(, , condition, [rc_type.lower()])
flags(, , [], [])
flags(, , [], [])
if debug():
print , condition, , command | Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found. |
2,494 | def COOKIES(self):
depr()
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies | A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. |
2,495 | def process(self, candidates):
return sorted(candidates, key=attrgetter(), reverse=self.reverse) | :arg list candidates: list of Candidates
:returns: score-sorted list of Candidates |
2,496 | def convert_iou(pinyin):
return IU_RE.sub(lambda m: m.group(1) + IU_MAP[m.group(2)], pinyin) | iou 转换,还原原始的韵母
iou,uei,uen前面加声母的时候,写成iu,ui,un。
例如niu(牛),gui(归),lun(论)。 |
2,497 | def handle_user_post_save(sender, **kwargs):
created = kwargs.get("created", False)
user_instance = kwargs.get("instance", None)
if user_instance is None:
return
try:
pending_ecu = PendingEnterpriseCustomerUser.objects.get(user_email=user_instance.email)
except PendingEnterpriseCustomerUser.DoesNotExist:
return
if not created:
try:
existing_record = EnterpriseCustomerUser.objects.get(user_id=user_instance.id)
message_template = "User {user} have changed email to match pending Enterprise Customer link, " \
"but was already linked to Enterprise Customer {enterprise_customer} - " \
"deleting pending link record"
logger.info(message_template.format(
user=user_instance, enterprise_customer=existing_record.enterprise_customer
))
pending_ecu.delete()
return
except EnterpriseCustomerUser.DoesNotExist:
pass
enterprise_customer_user = EnterpriseCustomerUser.objects.create(
enterprise_customer=pending_ecu.enterprise_customer,
user_id=user_instance.id
)
pending_enrollments = list(pending_ecu.pendingenrollment_set.all())
if pending_enrollments:
def _complete_user_enrollment():
for enrollment in pending_enrollments:
enterprise_customer_user.enroll(
enrollment.course_id, enrollment.course_mode, cohort=enrollment.cohort_name)
track_enrollment(, user_instance.id, enrollment.course_id)
pending_ecu.delete()
transaction.on_commit(_complete_user_enrollment)
else:
pending_ecu.delete() | Handle User model changes - checks if pending enterprise customer user record exists and upgrades it to actual link.
If there are pending enrollments attached to the PendingEnterpriseCustomerUser, then this signal also takes the
newly-created users and enrolls them in the relevant courses. |
2,498 | def where(self, predicate):
if predicate is None:
raise NullArgumentError(u"No predicate given for where clause")
return Enumerable3(filter(predicate, self)) | Returns new Enumerable where elements matching predicate are selected
:param predicate: predicate as a lambda expression
:return: new Enumerable object |
2,499 | def pipeline(
ctx,
input_fn,
db_save,
db_delete,
output_fn,
rules,
species,
namespace_targets,
version,
api,
config_fn,
):
if config_fn:
config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn)
else:
config = ctx.config
if namespace_targets:
namespace_targets = json.loads(namespace_targets)
if rules:
rules = rules.replace(" ", "").split(",")
namespace_targets = utils.first_true(
[namespace_targets, config["bel"]["lang"].get("canonical")], None
)
rules = utils.first_true(
[rules, config["bel"]["nanopub"].get("pipeline_edge_rules", False)], False
)
api = utils.first_true(
[api, config["bel_api"]["servers"].get("api_url", None)], None
)
version = utils.first_true(
[version, config["bel"]["lang"].get("default_bel_version", None)], None
)
n = bnn.Nanopub()
try:
json_flag, jsonl_flag, yaml_flag, jgf_flag = False, False, False, False
all_bel_edges = []
fout = None
if db_save or db_delete:
if db_delete:
arango_client = bel.db.arangodb.get_client()
bel.db.arangodb.delete_database(arango_client, "edgestore")
else:
arango_client = bel.db.arangodb.get_client()
edgestore_handle = bel.db.arangodb.get_edgestore_handle(arango_client)
elif re.search("ya?ml", output_fn):
yaml_flag = True
elif "jsonl" in output_fn:
jsonl_flag = True
elif "json" in output_fn:
json_flag = True
elif "jgf" in output_fn:
jgf_flag = True
if db_save:
pass
elif "gz" in output_fn:
fout = gzip.open(output_fn, "wt")
else:
fout = open(output_fn, "wt")
nanopub_cnt = 0
with timy.Timer() as timer:
for np in bnf.read_nanopubs(input_fn):
nanopub_cnt += 1
if nanopub_cnt % 100 == 0:
timer.track(f"{nanopub_cnt} Nanopubs processed into Edges")
bel_edges = n.bel_edges(
np,
namespace_targets=namespace_targets,
orthologize_target=species,
rules=rules,
)
if db_save:
bel.edge.edges.load_edges_into_db(edgestore_handle, edges=bel_edges)
elif jsonl_flag:
fout.write("{}\n".format(json.dumps(bel_edges)))
else:
all_bel_edges.extend(bel_edges)
if db_save:
pass
elif yaml_flag:
fout.write("{}\n".format(yaml.dumps(all_bel_edges)))
elif json_flag:
fout.write("{}\n".format(json.dumps(all_bel_edges)))
elif jgf_flag:
bnf.edges_to_jgf(output_fn, all_bel_edges)
finally:
if fout:
fout.close() | BEL Pipeline - BEL Nanopubs into BEL Edges
This will process BEL Nanopubs into BEL Edges by validating, orthologizing (if requested),
canonicalizing, and then computing the BEL Edges based on the given rule_set.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
If input fn has *.jsonl*, will parsed as a JSONLines file
IF input fn has *.json*, will be parsed as a JSON file
If input fn has *.yaml* or *.yml*, will be parsed as a YAML file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
If output fn has *.jgf, will be written as JSON Graph Formatted file |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.