Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
21,400 | def PluginTagToContent(self, plugin_name):
if plugin_name not in self._plugin_to_tag_to_content:
raise KeyError( % plugin_name)
return self._plugin_to_tag_to_content[plugin_name] | Returns a dict mapping tags to content specific to that plugin.
Args:
plugin_name: The name of the plugin for which to fetch plugin-specific
content.
Raises:
KeyError: if the plugin name is not found.
Returns:
A dict mapping tags to plugin-specific content (which are always strings).
Those strings are often serialized protos. |
21,401 | def ask_backend(self):
response = self._ask_boolean(
"Do you have a local docker daemon (on Linux), do you use docker-machine via a local machine, or do you use "
"Docker for macOS?", True)
if (response):
self._display_info("If you use docker-machine on macOS, please see "
"http://inginious.readthedocs.io/en/latest/install_doc/troubleshooting.html")
return "local"
else:
self._display_info(
"You will have to run inginious-backend and inginious-agent yourself. Please run the commands without argument "
"and/or read the documentation for more info")
return self._display_question("Please enter the address of your backend") | Ask the user to choose the backend |
21,402 | def begin_write(self, content_type=None):
assert not self.is_collection
self._check_write_access()
mode = "wb"
return open(self.absFilePath, mode, BUFFER_SIZE) | Open content as a stream for writing.
See DAVResource.begin_write() |
21,403 | def suggest_move(self, position):
start = time.time()
if self.timed_match:
while time.time() - start < self.seconds_per_move:
self.tree_search()
else:
current_readouts = self.root.N
while self.root.N < current_readouts + self.num_readouts:
self.tree_search()
if self.verbosity > 0:
dbg("%d: Searched %d times in %.2f seconds\n\n" % (
position.n, self.num_readouts, time.time() - start))
if self.verbosity > 2:
dbg(self.root.describe())
dbg()
if self.verbosity > 3:
dbg(self.root.position)
return self.pick_move() | Used for playing a single game.
For parallel play, use initialize_move, select_leaf,
incorporate_results, and pick_move |
21,404 | def to_internal_value(self, data):
if html.is_html_input(data):
data = html.parse_html_list(data)
if not isinstance(data, list):
message = self.error_messages[].format(
input_type=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
})
if not self.allow_empty and len(data) == 0:
message = self.error_messages[]
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
})
ret = []
errors = []
for item in data:
try:
validated = self.child.run_validation(item)
except ValidationError as exc:
errors.append(exc.detail)
else:
ret.append(validated)
errors.append({})
if any(errors):
raise ValidationError(errors)
return ret | List of dicts of native values <- List of dicts of primitive datatypes. |
21,405 | def _flush_tile_queue_blits(self, surface):
tw, th = self.data.tile_size
ltw = self._tile_view.left * tw
tth = self._tile_view.top * th
self.data.prepare_tiles(self._tile_view)
blit_list = [(image, (x * tw - ltw, y * th - tth)) for x, y, l, image in self._tile_queue]
surface.blits(blit_list) | Blit the queued tiles and block until the tile queue is empty
for pygame 1.9.4 + |
21,406 | def _make_load_template(self):
loader = self._make_loader()
def load_template(template_name):
return loader.load_name(template_name)
return load_template | Return a function that loads a template by name. |
21,407 | def wants(cls, *service_names):
def _decorator(cls_):
for service_name in service_names:
cls_._services_requested[service_name] = "want"
return cls_
return _decorator | A class decorator to indicate that an XBlock class wants particular services. |
21,408 | def multiifo_noise_coinc_rate(rates, slop):
ifos = numpy.array(sorted(rates.keys()))
rates_raw = list(rates[ifo] for ifo in ifos)
expected_coinc_rates = {}
allowed_area = multiifo_noise_coincident_area(ifos, slop)
rateprod = [numpy.prod(rs) for rs in zip(*rates_raw)]
ifostring = .join(ifos)
expected_coinc_rates[ifostring] = allowed_area * numpy.array(rateprod)
if len(ifos) > 2:
subsets = itertools.combinations(ifos, len(ifos) - 1)
for subset in subsets:
rates_subset = {}
for ifo in subset:
rates_subset[ifo] = rates[ifo]
sub_coinc_rates = multiifo_noise_coinc_rate(rates_subset, slop)
for sub_coinc in sub_coinc_rates:
expected_coinc_rates[sub_coinc] = sub_coinc_rates[sub_coinc]
return expected_coinc_rates | Calculate the expected rate of noise coincidences for multiple detectors
Parameters
----------
rates: dict
Dictionary keyed on ifo string
Value is a sequence of single-detector trigger rates, units assumed
to be Hz
slop: float
time added to maximum time-of-flight between detectors to account
for timing error
Returns
-------
expected_coinc_rates: dict
Dictionary keyed on the ifo combination string
Value is expected coincidence rate in the combination, units Hz |
21,409 | def set_env(self, key, value):
os.environ[make_env_key(self.appname, key)] = str(value)
self._registered_env_keys.add(key)
self._clear_memoization() | Sets environment variables by prepending the app_name to `key`. Also registers the
environment variable with the instance object preventing an otherwise-required call to
`reload()`. |
21,410 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.entity is not None:
_dict[] = self.entity
if hasattr(self, ) and self.location is not None:
_dict[] = self.location
if hasattr(self, ) and self.value is not None:
_dict[] = self.value
if hasattr(self, ) and self.confidence is not None:
_dict[] = self.confidence
if hasattr(self, ) and self.metadata is not None:
_dict[] = self.metadata
if hasattr(self, ) and self.groups is not None:
_dict[] = [x._to_dict() for x in self.groups]
if hasattr(self, ):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict | Return a json dictionary representing this model. |
21,411 | def stop_execution(self):
if not (self._stopping or self._stopped):
for actor in self.owner.actors:
actor.stop_execution()
self._stopping = True | Triggers the stopping of the object. |
21,412 | def explain_weights_dfs(estimator, **kwargs):
kwargs = _set_defaults(kwargs)
return format_as_dataframes(
eli5.explain_weights(estimator, **kwargs)) | Explain weights and export them to a dict with ``pandas.DataFrame``
values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does).
All keyword arguments are passed to :func:`eli5.explain_weights`.
Weights of all features are exported by default. |
21,413 | def sync_local_to_remote(force="no"):
_check_requirements()
if force != "yes":
message = "This will replace the remote database with your "\
"local , are you sure [y/n]" % (env.psql_db, env.local_psql_db)
answer = prompt(message, "y")
if answer != "y":
logger.info("Sync stopped")
return
init_tasks()
local_file = "sync_%s.sql.tar.gz" % int(time.time()*1000)
local_path = "/tmp/%s" % local_file
with context_managers.shell_env(PGPASSWORD=env.local_psql_password):
elocal("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % (
local_path, env.local_psql_user, env.local_psql_db
))
remote_path = "/tmp/%s" % local_file
put(remote_path, local_path)
with context_managers.shell_env(PGPASSWORD=env.psql_password):
env.run("pg_restore --clean -h localhost -d %s -U %s " % (
env.psql_db,
env.psql_user,
remote_path)
)
env.run("rm %s" % remote_path)
elocal("rm %s" % local_path)
run_hook("postgres.after_sync_local_to_remote")
logger.info("Sync complete") | Sync your local postgres database with remote
Example:
fabrik prod sync_local_to_remote:force=yes |
21,414 | def _serialize_object(self, response_data, request):
if self._is_doc_request(request):
return response_data
else:
return super(DocumentedResource, self)._serialize_object(
response_data, request) | Override to not serialize doc responses. |
21,415 | def count_never_executed(self):
lineno = self.firstlineno
counter = 0
for line in self.source:
if self.sourcelines.get(lineno) == 0:
if not self.blank_rx.match(line):
counter += 1
lineno += 1
return counter | Count statements that were never executed. |
21,416 | def _agl_compliant_name(glyph_name):
MAX_GLYPH_NAME_LENGTH = 63
clean_name = re.sub("[^0-9a-zA-Z_.]", "", glyph_name)
if len(clean_name) > MAX_GLYPH_NAME_LENGTH:
return None
return clean_name | Return an AGL-compliant name string or None if we can't make one. |
21,417 | def is_form_get(attr, attrs):
res = False
if attr == "action":
method = attrs.get_true(, u).lower()
res = method !=
return res | Check if this is a GET form action URL. |
21,418 | def print_input(i):
o=i.get(,)
rx=dumps_json({:i, :})
if rx[]>0: return rx
h=rx[]
if o==: out(h)
return {:0, :h} | Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
html - input as JSON
} |
21,419 | async def set_gs(self, mgr_addr, gs):
remote_manager = await self.env.connect(mgr_addr)
await remote_manager.set_gs(gs) | Set grid size for :py:class:`GridEnvironment` which manager is in
given address.
:param str mgr_addr: Address of the manager agent
:param gs:
New grid size of the grid environment, iterable with length 2. |
21,420 | def _parse_query(self, source):
if self.OBJECTFILTER_WORDS.search(source):
syntax_ = "objectfilter"
else:
syntax_ = None
return query.Query(source, syntax=syntax_) | Parse one of the rules as either objectfilter or dottysql.
Example:
_parse_query("5 + 5")
# Returns Sum(Literal(5), Literal(5))
Arguments:
source: A rule in either objectfilter or dottysql syntax.
Returns:
The AST to represent the rule. |
21,421 | def to_python(self, value):
if isinstance(value, GroupDescriptor):
value = value._value
result = {}
for name, field in self.fields.items():
result[name] = field.to_python(value.get(name, None))
return GroupDescriptor(result) | Convert value if needed. |
21,422 | def parse_table_definition_file(file):
logging.info("Reading table definition from ...", file)
if not os.path.isfile(file):
logging.error("File does not exist.", file)
exit(1)
try:
tableGenFile = ElementTree.ElementTree().parse(file)
except IOError as e:
logging.error(, file, e)
exit(1)
except ElementTree.ParseError as e:
logging.error(, file, e)
exit(1)
if != tableGenFile.tag:
logging.error("Table file %s is invalid: Ittable'.", file)
exit(1)
return tableGenFile | Read an parse the XML of a table-definition file.
@return: an ElementTree object for the table definition |
21,423 | def get_if_raw_addr(ifname):
try:
fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
except OSError as msg:
warning("Failed to execute ifconfig: (%s)", msg)
return b"\0\0\0\0"
addresses = [l for l in fd if l.find("inet ") >= 0]
if not addresses:
warning("No IPv4 address found on %s !", ifname)
return b"\0\0\0\0"
address = addresses[0].split()[1]
if in address:
address = address.split("/")[0]
return socket.inet_pton(socket.AF_INET, address) | Returns the IPv4 address configured on 'ifname', packed with inet_pton. |
21,424 | def _parse(self, threshold):
match = re.search(r, threshold)
if not match:
raise ValueError(.format(threshold))
if match.group(1) == :
self._inclusive = True
if match.group(3) == :
self._min = float()
elif match.group(3):
self._min = float(match.group(3))
else:
self._min = float(0)
if match.group(4):
self._max = float(match.group(4))
else:
self._max = float()
if self._max < self._min:
raise ValueError() | internal threshold string parser
arguments:
threshold: string describing the threshold |
21,425 | def merge(root, head, update, head_source=None):
configuration = get_configuration(head, update, head_source)
conflicts = []
root, head, update = filter_records(root, head, update, filters=configuration.pre_filters)
merger = Merger(
root=root, head=head, update=update,
default_dict_merge_op=configuration.default_dict_merge_op,
default_list_merge_op=configuration.default_list_merge_op,
list_dict_ops=configuration.list_dict_ops,
list_merge_ops=configuration.list_merge_ops,
comparators=configuration.comparators,
)
try:
merger.merge()
except MergeError as e:
conflicts = e.content
conflicts = filter_conflicts(conflicts, configuration.conflict_filters)
conflicts_as_json = [json.loads(c.to_json()) for c in conflicts]
flat_conflicts_as_json = list(itertools.chain.from_iterable(conflicts_as_json))
merged = merger.merged_root
return merged, flat_conflicts_as_json | This function instantiate a ``Merger`` object using a configuration in
according to the ``source`` value of head and update params.
Then it run the merger on the three files provided in input.
Params
root(dict): the last common parent json of head and update
head(dict): the last version of a record in INSPIRE
update(dict): the update coming from outside INSPIRE to merge
head_source(string): the source of the head record. If ``None``,
heuristics are used to derive it from the metadata. This is useful
if the HEAD came from legacy and the acquisition_source does not
reflect the state of the record.
Return
A tuple containing the resulted merged record in json format and a
an object containing all generated conflicts. |
21,426 | def set_control_scheme(self, index):
self._current_control_scheme = index % self._num_control_schemes
self._control_scheme_buffer[0] = self._current_control_scheme | Sets the control scheme for the agent. See :obj:`ControlSchemes`.
Args:
index (int): The control scheme to use. Should be set with an enum from :obj:`ControlSchemes`. |
21,427 | def fill_model(self, model=None):
normalized_dct = self.normalize()
if model:
if not isinstance(model, self._model_class):
raise ModelFormSecurityError( % (model, self._model_class.__name__))
model.populate(**normalized_dct)
return model
return self._model_class(**normalized_dct) | Populates a model with normalized properties. If no model is provided (None) a new one will be created.
:param model: model to be populade
:return: populated model |
21,428 | def compute_sims(inputs: mx.nd.NDArray, normalize: bool) -> mx.nd.NDArray:
if normalize:
logger.info("Normalizing embeddings to unit length")
inputs = mx.nd.L2Normalization(inputs, mode=)
sims = mx.nd.dot(inputs, inputs, transpose_b=True)
sims_np = sims.asnumpy()
np.fill_diagonal(sims_np, -9999999.)
sims = mx.nd.array(sims_np)
return sims | Returns a matrix with pair-wise similarity scores between inputs.
Similarity score is (normalized) Euclidean distance. 'Similarity with self' is masked
to large negative value.
:param inputs: NDArray of inputs.
:param normalize: Whether to normalize to unit-length.
:return: NDArray with pairwise similarities of same shape as inputs. |
21,429 | def bounding_box(alpha, threshold=0.1):
assert alpha.ndim == 2
supp_axs = [alpha.max(axis=1-i) for i in range(2)]
bb = [np.where(supp_axs[i] > threshold)[0][[0, -1]] for i in range(2)]
return (bb[0][0], bb[1][0], bb[0][1], bb[1][1]) | Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold. |
21,430 | def _pack(self, msg_type, payload):
pb = payload.encode()
s = struct.pack(, len(pb), msg_type.value)
return self.MAGIC.encode() + s + pb | Packs the given message type and payload. Turns the resulting
message into a byte string. |
21,431 | def phonenumber_validation(data):
from phonenumber_field.phonenumber import to_python
phone_number = to_python(data)
if not phone_number:
return data
elif not phone_number.country_code:
raise serializers.ValidationError(_("Phone number needs to include valid country code (E.g +37255555555)."))
elif not phone_number.is_valid():
raise serializers.ValidationError(_())
return data | Validates phonenumber
Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the
country prefix is absent. |
21,432 | def mkdir_chown(paths, user_group=None, permissions=, create_parent=True, check_if_exists=False, recursive=False):
def _generate_str(path):
mkdir_str = mkdir(path, create_parent, check_if_exists)
chown_str = chown(user_group, path, recursive) if user_group else None
chmod_str = chmod(permissions, path, recursive) if permissions else None
return .join(n for n in (mkdir_str, chown_str, chmod_str) if n)
if isinstance(paths, (tuple, list)):
return .join((_generate_str(path) for path in paths))
return _generate_str(paths) | Generates a unix command line for creating a directory and assigning permissions to it. Shortcut to a combination of
:func:`~mkdir`, :func:`~chown`, and :func:`~chmod`.
Note that if `check_if_exists` has been set to ``True``, and the directory is found, `mkdir` is not called, but
`user_group` and `permissions` are still be applied.
:param paths: Can be a single path string, or a list or tuple of path strings.
:type paths: unicode | str | tuple[unicode | str] | list[unicode | str]
:param: Optional owner of the directory. For notation, see :func:`~get_user_group`.
:type user_group: unicode | str | int | tuple
:param permissions: Optional permission mode, in any notation accepted by the unix `chmod` command.
Default is ``ug=rwX,o=rX``.
:type permissions: unicode | str
:param create_parent: Parent directories are created if not present (`-p` argument to `mkdir`).
:type create_parent: bool
:param check_if_exists: Prior to creating the directory, checks if it already exists.
:type check_if_exists: bool
:param recursive: Apply permissions and owner change recursively.
:type recursive: bool
:return: Unix shell command line.
:rtype: unicode | str |
21,433 | def generate_image_commands():
class ImageClient(object):
group = "image"
from spython.main.base.logger import println
from spython.main.base.command import ( init_command, run_command )
from .utils import ( compress, decompress )
from .create import create
from .importcmd import importcmd
from .export import export
ImageClient.create = create
ImageClient.imprt = importcmd
ImageClient.export = export
ImageClient.decompress = decompress
ImageClient.compress = compress
ImageClient.println = println
ImageClient.init_command = init_command
ImageClient.run_command = run_command
cli = ImageClient()
return cli | The Image client holds the Singularity image command group, mainly
deprecated commands (image.import) and additional command helpers
that are commonly use but not provided by Singularity
The levels of verbosity (debug and quiet) are passed from the main
client via the environment variable MESSAGELEVEL.
These commands are added to Client.image under main/__init__.py to
expose subcommands:
Client.image.export
Client.image.imprt
Client.image.decompress
Client.image.create |
21,434 | def sort_dict(d, desc=True):
sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)
return OrderedDict(sort) | Sort an ordered dictionary by value, descending.
Args:
d (OrderedDict): An ordered dictionary.
desc (bool): If true, sort desc.
Returns:
OrderedDict: The sorted dictionary. |
21,435 | def find_xenon_grpc_jar():
prefix = Path(sys.prefix)
locations = [
prefix / ,
prefix / /
]
for location in locations:
jar_file = location / .format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None | Find the Xenon-GRPC jar-file, windows version. |
21,436 | def asyncStarCmap(asyncCallable, iterable):
results = []
yield coopStar(asyncCallable, results.append, iterable)
returnValue(results) | itertools.starmap for deferred callables using cooperative multitasking |
21,437 | def add_acquisition_source(
self,
method,
submission_number=None,
internal_uid=None,
email=None,
orcid=None,
source=None,
datetime=None,
):
acquisition_source = self._sourced_dict(source)
acquisition_source[] = str(submission_number)
for key in (, , , , ):
if locals()[key] is not None:
acquisition_source[key] = locals()[key]
self.obj[] = acquisition_source | Add acquisition source.
:type submission_number: integer
:type email: integer
:type source: string
:param method: method of acquisition for the suggested document
:type method: string
:param orcid: orcid of the user that is creating the record
:type orcid: string
:param internal_uid: id of the user that is creating the record
:type internal_uid: string
:param datetime: UTC datetime in ISO 8601 format
:type datetime: string |
21,438 | def get_labels(data, centroids,K):
distances = np.sqrt(((data - centroids[:, np.newaxis])**2).sum(axis=2))
return np.argmin(distances, axis=0) | Returns a label for each piece of data in the dataset
Parameters
------------
data: array-like, shape= (m_samples,n_samples)
K: integer
number of K clusters
centroids: array-like, shape=(K, n_samples)
returns
-------------
labels: array-like, shape (1,n_samples) |
21,439 | def _deserialize_data(self, json_data):
my_dict = json.loads(json_data.decode().replace(""UTF-8%Y-%m-%d%H:%M:%S%Y-%m-%d%H:%M:%S%Y-%m-%d%H:%M:%S%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST:
my_dict[item] = self._str_to_bool(my_dict[item])
else:
my_dict[item] = unicode(my_dict[item])
return my_dict | Deserialize a JSON into a dictionary |
21,440 | def _process_download_descriptor(self, dd):
self._update_progress_bar()
offsets, resume_bytes = dd.next_offsets()
if resume_bytes is not None:
with self._disk_operation_lock:
self._download_bytes_sofar += resume_bytes
logger.debug(.format(
resume_bytes, self._download_bytes_sofar, dd.entity.name))
del resume_bytes
if offsets is None and dd.all_operations_completed:
finalize = True
sfpath = str(dd.final_path)
dd.finalize_integrity()
if dd.entity.vectored_io is not None:
with self._transfer_lock:
if sfpath not in self._vio_map:
self._vio_map[sfpath] = 1
else:
self._vio_map[sfpath] += 1
if (self._vio_map[sfpath] ==
dd.entity.vectored_io.total_slices):
self._vio_map.pop(sfpath)
else:
finalize = False
if finalize:
dd.finalize_file()
with self._transfer_lock:
self._download_sofar += 1
if dd.entity.is_encrypted:
self._dd_map.pop(sfpath)
self._transfer_set.remove(
blobxfer.operations.download.Downloader.
create_unique_transfer_operation_id(dd.entity))
self._transfer_cc.pop(dd.entity.path, None)
return
if offsets is None:
self._transfer_queue.put(dd)
return
with self._disk_operation_lock:
self._disk_set.add(
blobxfer.operations.download.Downloader.
create_unique_disk_operation_id(dd, offsets))
with self._transfer_lock:
self._transfer_cc[dd.entity.path] += 1
cc_xfer = self._transfer_cc[dd.entity.path]
if cc_xfer <= self._spec.options.max_single_object_concurrency:
self._transfer_queue.put(dd)
if dd.entity.mode == blobxfer.models.azure.StorageModes.File:
data = blobxfer.operations.azure.file.get_file_range(
dd.entity, offsets)
else:
data = blobxfer.operations.azure.blob.get_blob_range(
dd.entity, offsets)
with self._transfer_lock:
self._transfer_cc[dd.entity.path] -= 1
if cc_xfer > self._spec.options.max_single_object_concurrency:
self._transfer_queue.put(dd)
self._disk_queue.put((dd, offsets, data)) | Process download descriptor
:param Downloader self: this
:param blobxfer.models.download.Descriptor dd: download descriptor |
21,441 | def _construct_message(self):
self.message = {"token": self._auth, "channel": self.channel}
super()._construct_message() | Set the message token/channel, then call the bas class constructor. |
21,442 | def _resolve_datacenter(dc, pillarenv):
s a dict then sort it in descending order by key length and try
to use keys as RegEx patterns to match against ``pillarenv``.
The value for matched pattern should be a string (that can use
``str.format`` syntax togetehr with captured variables from pattern)
pointing to targe data center to use.
If none patterns matched return ``None`` which meanse us datacenter of
conencted Consul agent.
Resolving Consul datacenter based on: %sUsing pre-defined DC: \Selecting DC based on pillarenv using %d pattern(s)Pillarenv set to \Matched pattern: \Resolved datacenter: \None of following patterns matched pillarenv=%s: %s, '.join(repr(x) for x in mappings)
) | If ``dc`` is a string - return it as is.
If it's a dict then sort it in descending order by key length and try
to use keys as RegEx patterns to match against ``pillarenv``.
The value for matched pattern should be a string (that can use
``str.format`` syntax togetehr with captured variables from pattern)
pointing to targe data center to use.
If none patterns matched return ``None`` which meanse us datacenter of
conencted Consul agent. |
21,443 | def enqueue_mod(self, dn, mod):
if dn not in self.__pending_mod_dn__:
self.__pending_mod_dn__.append(dn)
self.__mod_queue__[dn] = []
self.__mod_queue__[dn].append(mod) | Enqueue a LDAP modification.
Arguments:
dn -- the distinguished name of the object to modify
mod -- an ldap modfication entry to enqueue |
21,444 | def create_custom_gradebook_column(self, course_id, column_title, column_hidden=None, column_position=None, column_teacher_notes=None):
path = {}
data = {}
params = {}
path["course_id"] = course_id
data["column[title]"] = column_title
if column_position is not None:
data["column[position]"] = column_position
if column_hidden is not None:
data["column[hidden]"] = column_hidden
if column_teacher_notes is not None:
data["column[teacher_notes]"] = column_teacher_notes
self.logger.debug("POST /api/v1/courses/{course_id}/custom_gradebook_columns with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/custom_gradebook_columns".format(**path), data=data, params=params, single_item=True) | Create a custom gradebook column.
Create a custom gradebook column |
21,445 | def get_number_of_atoms(self):
strc = self.get_output_structure()
if not strc:
return None
return Property(scalars=[Scalar(value=len(strc))], units="/unit cell") | Get the number of atoms in the calculated structure.
Returns: Property, where number of atoms is a scalar. |
21,446 | def get_preparation_cmd(user, permissions, path):
r_user = resolve_value(user)
r_permissions = resolve_value(permissions)
if user:
yield chown(r_user, path)
if permissions:
yield chmod(r_permissions, path) | Generates the command lines for adjusting a volume's ownership and permission flags. Returns an empty list if there
is nothing to adjust.
:param user: User to set ownership for on the path via ``chown``.
:type user: unicode | str | int | dockermap.functional.AbstractLazyObject
:param permissions: Permission flags to set via ``chmod``.
:type permissions: unicode | str | dockermap.functional.AbstractLazyObject
:param path: Path to adjust permissions on.
:type path: unicode | str
:return: Iterator over resulting command strings.
:rtype: collections.Iterable[unicode | str] |
21,447 | def get_config(self):
if in self.config:
self.rmq_port = int(self.config[])
if in self.config:
self.rmq_user = self.config[]
if in self.config:
self.rmq_password = self.config[]
if in self.config:
self.rmq_vhost = self.config[]
if in self.config:
self.rmq_exchange_type = self.config[]
if in self.config:
self.rmq_durable = bool(self.config[])
if in self.config:
self.rmq_heartbeat_interval = int(
self.config[]) | Get and set config options from config file |
21,448 | def setValue(self, p_float):
p_float = p_float * 100
super(PercentageSpinBox, self).setValue(p_float) | Override method to set a value to show it as 0 to 100.
:param p_float: The float number that want to be set.
:type p_float: float |
21,449 | def admin_url(obj):
if hasattr(obj, ):
return mark_safe(obj.get_admin_url())
return mark_safe(admin_url_fn(obj)) | Returns the admin URL of the object.
No permissions checking is involved, so use with caution to avoid exposing
the link to unauthorised users.
Example::
{{ foo_obj|admin_url }}
renders as::
/admin/foo/123
:param obj: A Django model instance.
:return: the admin URL of the object |
21,450 | def title(label, style=None):
fig = current_figure()
fig.title = label
if style is not None:
fig.title_style = style | Sets the title for the current figure.
Parameters
----------
label : str
The new title for the current figure.
style: dict
The CSS style to be applied to the figure title |
21,451 | def expect_column_kl_divergence_to_be_less_than(self,
column,
partition_object=None,
threshold=None,
tail_weight_holdout=0,
internal_weight_holdout=0,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
raise NotImplementedError | Expect the Kulback-Leibler (KL) divergence (relative entropy) of the specified column with respect to the \
partition object to be lower than the provided threshold.
KL divergence compares two distributions. The higher the divergence value (relative entropy), the larger the \
difference between the two distributions. A relative entropy of zero indicates that the data are \
distributed identically, `when binned according to the provided partition`.
In many practical contexts, choosing a value between 0.5 and 1 will provide a useful test.
This expectation works on both categorical and continuous partitions. See notes below for details.
expect_column_kl_divergence_to_be_less_than is a :func:`column_aggregate_expectation <great_expectations.data_asset.dataset.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
threshold (float): \
The maximum KL divergence to for which to return `success=True`. If KL divergence is larger than the\
provided threshold, the test will return `success=False`.
Keyword Args:
internal_weight_holdout (float between 0 and 1 or None): \
The amount of weight to split uniformly among zero-weighted partition bins. internal_weight_holdout \
provides a mechanims to make the test less strict by assigning positive weights to values observed in \
the data for which the partition explicitly expected zero weight. With no internal_weight_holdout, \
any value observed in such a region will cause KL divergence to rise to +Infinity.\
Defaults to 0.
tail_weight_holdout (float between 0 and 1 or None): \
The amount of weight to add to the tails of the histogram. Tail weight holdout is split evenly between\
(-Infinity, min(partition_object['bins'])) and (max(partition_object['bins']), +Infinity). \
tail_weight_holdout provides a mechanism to make the test less strict by assigning positive weights to \
values observed in the data that are not present in the partition. With no tail_weight_holdout, \
any value observed outside the provided partition_object will cause KL divergence to rise to +Infinity.\
Defaults to 0.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true KL divergence (relative entropy) or None if the value is calculated \
as infinity, -infinity, or NaN
"details": {
"observed_partition": (dict) The partition observed in the data
"expected_partition": (dict) The partition against which the data were compared,
after applying specified weight holdouts.
}
}
If the partition_object is categorical, this expectation will expect the values in column to also be \
categorical.
* If the column includes values that are not present in the partition, the tail_weight_holdout will be \
equally split among those values, providing a mechanism to weaken the strictness of the expectation \
(otherwise, relative entropy would immediately go to infinity).
* If the partition includes values that are not present in the column, the test will simply include \
zero weight for that value.
If the partition_object is continuous, this expectation will discretize the values in the column according \
to the bins specified in the partition_object, and apply the test to the resulting distribution.
* The internal_weight_holdout and tail_weight_holdout parameters provide a mechanism to weaken the \
expectation, since an expected weight of zero would drive relative entropy to be infinite if any data \
are observed in that interval.
* If internal_weight_holdout is specified, that value will be distributed equally among any intervals \
with weight zero in the partition_object.
* If tail_weight_holdout is specified, that value will be appended to the tails of the bins \
((-Infinity, min(bins)) and (max(bins), Infinity).
If relative entropy/kl divergence goes to infinity for any of the reasons mentioned above, the observed value\
will be set to None. This is because inf, -inf, Nan, are not json serializable and cause some json parsers to\
crash when encountered. The python None token will be serialized to null in json.
See also:
expect_column_chisquare_test_p_value_to_be_greater_than
expect_column_bootstrapped_ks_test_p_value_to_be_greater_than |
21,452 | def compile_pythrancode(module_name, pythrancode, specs=None,
opts=None, cpponly=False, pyonly=False,
output_file=None, module_dir=None, **kwargs):
if pyonly:
content = generate_py(module_name, pythrancode, opts, module_dir)
if output_file is None:
print(content)
return None
else:
return _write_temp(content, )
from pythran.spec import spec_parser
if specs is None:
specs = spec_parser(pythrancode)
module, error_checker = generate_cxx(module_name, pythrancode, specs, opts,
module_dir)
if in kwargs.get(, []):
module.preamble.insert(0, Line())
module.preamble.insert(0, Line(.
format(sys.version_info.major)))
if cpponly:
tmp_file = _write_temp(str(module), )
if not output_file:
output_file = module_name + ".cpp"
shutil.move(tmp_file, output_file)
logger.info("Generated C++ source file: " + output_file)
else:
try:
output_file = compile_cxxcode(module_name,
str(module),
output_binary=output_file,
**kwargs)
except CompileError:
logger.warn("Compilation error, trying hard to find its origin...")
error_checker()
logger.warn("Nop, I'm going to flood you with C++ errors!")
raise
return output_file | Pythran code (string) -> c++ code -> native module
if `cpponly` is set to true, return the generated C++ filename
if `pyonly` is set to true, prints the generated Python filename,
unless `output_file` is set
otherwise, return the generated native library filename |
21,453 | def write(self, buf):
underflow = self._audio_stream.write(buf)
if underflow:
logging.warning(,
len(buf))
return len(buf) | Write bytes to the stream. |
21,454 | def plot_fermi_surface(data, structure, cbm, energy_levels=[],
multiple_figure=True,
mlab_figure=None, kpoints_dict={}, color=(0, 0, 1),
transparency_factor=[], labels_scale_factor=0.05,
points_scale_factor=0.02, interative=True):
try:
from mayavi import mlab
except ImportError:
raise BoltztrapError(
"Mayavi package should be installed to use this function")
bz = structure.lattice.reciprocal_lattice.get_wigner_seitz_cell()
cell = structure.lattice.reciprocal_lattice.matrix
fact = 1 if cbm == False else -1
en_min = np.min(fact * data.ravel())
en_max = np.max(fact * data.ravel())
if energy_levels == []:
energy_levels = [en_min + 0.01] if cbm == True else \
[en_max - 0.01]
print("Energy level set to: " + str(energy_levels[0]) + " eV")
else:
for e in energy_levels:
if e > en_max or e < en_min:
raise BoltztrapError("energy level " + str(e) +
" not in the range of possible energies: [" +
str(en_min) + ", " + str(en_max) + "]")
if transparency_factor == []:
transparency_factor = [1] * len(energy_levels)
if mlab_figure:
fig = mlab_figure
if mlab_figure == None and not multiple_figure:
fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0),
tube_radius=None, figure=fig)
for label, coords in kpoints_dict.items():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor,
color=(0, 0, 0), figure=fig)
mlab.text3d(*label_coords, text=label, scale=labels_scale_factor,
color=(0, 0, 0), figure=fig)
for isolevel, alpha in zip(energy_levels, transparency_factor):
if multiple_figure:
fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0),
tube_radius=None, figure=fig)
for label, coords in kpoints_dict.items():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor,
color=(0, 0, 0), figure=fig)
mlab.text3d(*label_coords, text=label,
scale=labels_scale_factor, color=(0, 0, 0),
figure=fig)
cp = mlab.contour3d(fact * data, contours=[isolevel], transparent=True,
colormap=, color=color, opacity=alpha,
figure=fig)
polydata = cp.actor.actors[0].mapper.input
pts = np.array(polydata.points)
polydata.points = np.dot(pts,
cell / np.array(data.shape)[:, np.newaxis])
cx, cy, cz = [np.mean(np.array(polydata.points)[:, i])
for i in range(3)]
polydata.points = (np.array(polydata.points) - [cx, cy, cz]) * 2
fig.scene.isometric_view()
if interative == True:
mlab.show()
return fig, mlab | Plot the Fermi surface at specific energy value.
Args:
data: energy values in a 3D grid from a CUBE file
via read_cube_file function, or from a
BoltztrapAnalyzer.fermi_surface_data
structure: structure object of the material
energy_levels: list of energy value of the fermi surface.
By default 0 eV correspond to the VBM, as in
the plot of band structure along symmetry line.
Default: max energy value + 0.01 eV
cbm: Boolean value to specify if the considered band is
a conduction band or not
multiple_figure: if True a figure for each energy level will be shown.
If False all the surfaces will be shown in the same figure.
In this las case, tune the transparency factor.
mlab_figure: provide a previous figure to plot a new surface on it.
kpoints_dict: dictionary of kpoints to show in the plot.
example: {"K":[0.5,0.0,0.5]},
where the coords are fractional.
color: tuple (r,g,b) of integers to define the color of the surface.
transparency_factor: list of values in the range [0,1] to tune
the opacity of the surfaces.
labels_scale_factor: factor to tune the size of the kpoint labels
points_scale_factor: factor to tune the size of the kpoint points
interative: if True an interactive figure will be shown.
If False a non interactive figure will be shown, but
it is possible to plot other surfaces on the same figure.
To make it interactive, run mlab.show().
Returns:
a Mayavi figure and a mlab module to control the plot.
Note: Experimental.
Please, double check the surface shown by using some
other software and report issues. |
21,455 | def feature_importances(data, top_n=None, feature_names=None, ax=None):
if data is None:
raise ValueError(
)
res = compute.feature_importances(data, top_n, feature_names)
n_feats = len(res)
if ax is None:
ax = plt.gca()
ax.set_title("Feature importances")
try:
ax.bar(range(n_feats), res.importance, yerr=res.std_, color=,
align="center")
except:
ax.bar(range(n_feats), res.importance, color=,
align="center")
ax.set_xticks(range(n_feats))
ax.set_xticklabels(res.feature_name)
ax.set_xlim([-1, n_feats])
return ax | Get and order feature importances from a scikit-learn model
or from an array-like structure. If data is a scikit-learn model with
sub-estimators (e.g. RandomForest, AdaBoost) the function will compute the
standard deviation of each feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature names
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/feature_importances.py |
21,456 | def need_latex_rerun(self):
for pattern in LATEX_RERUN_PATTERNS:
if pattern.search(self.out):
return True
return False | Test for all rerun patterns if they match the output. |
21,457 | def save_model(self, directory=None, append_timestep=True):
return self.model.save(directory=directory, append_timestep=append_timestep) | Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory (str): Optional checkpoint directory.
append_timestep (bool): Appends the current timestep to the checkpoint file if true.
If this is set to True, the load path must include the checkpoint timestep suffix.
For example, if stored to models/ and set to true, the exported file will be of the
form models/model.ckpt-X where X is the last timestep saved. The load path must
precisely match this file name. If this option is turned off, the checkpoint will
always overwrite the file specified in path and the model can always be loaded under
this path.
Returns:
Checkpoint path were the model was saved. |
21,458 | def get_division(self, obj):
if self.context.get("division"):
return DivisionSerializer(self.context.get("division")).data
else:
if obj.slug == "senate":
return DivisionSerializer(obj.jurisdiction.division).data
else:
us = DivisionSerializer(obj.jurisdiction.division).data
us["children"] = [
DivisionSerializer(
state,
context={"children_level": DivisionLevel.DISTRICT},
).data
for state in obj.jurisdiction.division.children.all()
]
return us | Division. |
21,459 | def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable=):
if not self.runner.sudo or not sudoable:
if executable:
local_cmd = [executable, , cmd]
else:
local_cmd = cmd
else:
local_cmd, prompt = utils.make_sudo_cmd(sudo_user, executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.host)
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir, executable=executable or None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.runner.sudo and sudoable and self.runner.sudo_pass:
fcntl.fcntl(p.stdout, fcntl.F_SETFL,
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL,
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
sudo_output =
while not sudo_output.endswith(prompt):
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
[p.stdout, p.stderr], self.runner.timeout)
if p.stdout in rfd:
chunk = p.stdout.read()
elif p.stderr in rfd:
chunk = p.stderr.read()
else:
stdout, stderr = p.communicate()
raise errors.AnsibleError( + sudo_output)
if not chunk:
stdout, stderr = p.communicate()
raise errors.AnsibleError( + sudo_output)
sudo_output += chunk
p.stdin.write(self.runner.sudo_pass + )
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
stdout, stderr = p.communicate()
return (p.returncode, , stdout, stderr) | run a command on the local host |
21,460 | def _cho_solve_AATI(A, rho, b, c, lwr, check_finite=True):
N, M = A.shape
if N >= M:
x = (b - _cho_solve((c, lwr), b.dot(A).T,
check_finite=check_finite).T.dot(A.T)) / rho
else:
x = _cho_solve((c, lwr), b.T, check_finite=check_finite).T
return x | Patched version of :func:`sporco.linalg.cho_solve_AATI`. |
21,461 | def txid_to_block_data(txid, bitcoind_proxy, proxy=None):
proxy = get_default_proxy() if proxy is None else proxy
timeout = 1.0
while True:
try:
untrusted_tx_data = bitcoind_proxy.getrawtransaction(txid, 1)
untrusted_block_hash = untrusted_tx_data[]
untrusted_block_data = bitcoind_proxy.getblock(untrusted_block_hash)
break
except (OSError, IOError) as ie:
log.exception(ie)
log.error()
timeout = timeout * 2 + random.randint(0, timeout)
continue
except Exception as e:
log.exception(e)
return None, None, None
bitcoind_opts = get_bitcoin_opts()
spv_headers_path = bitcoind_opts[]
untrusted_block_header_hex = virtualchain.block_header_to_hex(
untrusted_block_data, untrusted_block_data[]
)
block_id = SPVClient.block_header_index(
spv_headers_path,
(.format(untrusted_block_header_hex)).decode()
)
if block_id < 0:
log.error(.format(
untrusted_block_header_hex, spv_headers_path
))
return None, None, None
verified_block_header = virtualchain.block_verify(untrusted_block_data)
if not verified_block_header:
msg = (
)
log.error(msg)
return None, None, None
verified_block_hash = virtualchain.block_header_verify(
untrusted_block_data, untrusted_block_data[], untrusted_block_hash
)
if not verified_block_hash:
log.error()
return None, None, None
block_hash = untrusted_block_hash
block_data = untrusted_block_data
tx_data = untrusted_tx_data
return block_hash, block_data, tx_data | Given a txid, get its block's data.
Use SPV to verify the information we receive from the (untrusted)
bitcoind host.
@bitcoind_proxy must be a BitcoindConnection (from virtualchain.lib.session)
Return the (block hash, block data, txdata) on success
Return (None, None, None) on error |
21,462 | async def validate(state, holdout_glob):
if not glob.glob(holdout_glob):
print(t match any files, skipping validationpython3validate.py--flagfile={}validate.flags--work_dir={}'.format(fsdb.working_dir())) | Validate the trained model against holdout games.
Args:
state: the RL loop State instance.
holdout_glob: a glob that matches holdout games. |
21,463 | def _required_child(parent, tag):
if _child(parent, tag) is None:
parent.append(_Element(tag)) | Add child element with *tag* to *parent* if it doesn't already exist. |
21,464 | def get(cls, uuid):
if not uuid:
raise ValueError("get must have a value passed as an argument")
uuid = quote(str(uuid))
url = recurly.base_uri() + (cls.member_path % (uuid,))
_resp, elem = cls.element_for_url(url)
return cls.from_element(elem) | Return a `Resource` instance of this class identified by
the given code or UUID.
Only `Resource` classes with specified `member_path` attributes
can be directly requested with this method. |
21,465 | def generate_checker(value):
@property
@wraps(can_be_)
def checker(self):
return self.can_be_(value)
return checker | Generate state checker for given value. |
21,466 | def get_metric_values(self):
group_names = self.properties.get(, None)
if not group_names:
group_names = self.manager.get_metric_values_group_names()
ret = []
for group_name in group_names:
try:
mo_val = self.manager.get_metric_values(group_name)
ret_item = (group_name, mo_val)
ret.append(ret_item)
except ValueError:
pass
return ret | Get the faked metrics, for all metric groups and all resources that
have been prepared on the manager object of this context object.
Returns:
iterable of tuple (group_name, iterable of values): The faked
metrics, in the order they had been added, where:
group_name (string): Metric group name.
values (:class:~zhmcclient.FakedMetricObjectValues`):
The metric values for one resource at one point in time. |
21,467 | def evaluate(self, x):
if not hasattr(self, ):
c = self.G.igft(self._kernels.evaluate(self.G.e).squeeze())
c = np.sqrt(self.G.n_vertices) * self.G.U * c[:, np.newaxis]
self._coefficients = self.G.gft(c)
shape = x.shape
x = x.flatten()
y = np.full((self.n_features_out, x.size), np.nan)
for i in range(len(x)):
query = self._coefficients[x[i] == self.G.e]
if len(query) != 0:
y[:, i] = query[0]
return y.reshape((self.n_features_out,) + shape) | TODO: will become _evaluate once polynomial filtering is merged. |
21,468 | def _detach_received(self, error):
if error:
condition = error.condition
description = error.description
info = error.info
else:
condition = b"amqp:unknown-error"
description = None
info = None
self._error = errors._process_link_error(self.error_policy, condition, description, info)
_logger.info("Received Link detach event: %r\nLink: %r\nDescription: %r"
"\nDetails: %r\nRetryable: %r\nConnection: %r",
condition, self.name, description, info, self._error.action.retry,
self._session._connection.container_id) | Callback called when a link DETACH frame is received.
This callback will process the received DETACH error to determine if
the link is recoverable or whether it should be shutdown.
:param error: The error information from the detach
frame.
:type error: ~uamqp.errors.ErrorResponse |
21,469 | def check_auth(self, username, password):
return username == self.queryname and password == self.querypw | This function is called to check if a username password combination
is valid. |
21,470 | def enable(self):
nquad = self.nquad.value()
for label, xsll, xsul, xslr, xsur, ys, nx, ny in \
zip(self.label[:nquad], self.xsll[:nquad], self.xsul[:nquad],
self.xslr[:nquad], self.xsur[:nquad], self.ys[:nquad],
self.nx[:nquad], self.ny[:nquad]):
label.config(state=)
for thing in (xsll, xsul, xslr, xsur, ys, nx, ny):
thing.enable()
for label, xsll, xsul, xslr, xsur, ys, nx, ny in \
zip(self.label[nquad:], self.xsll[nquad:], self.xsul[nquad:],
self.xslr[nquad:], self.xsur[nquad:], self.ys[nquad:],
self.nx[nquad:], self.ny[nquad:]):
label.config(state=)
for thing in (xsll, xsul, xslr, xsur, ys, nx, ny):
thing.disable()
self.nquad.enable()
self.xbin.enable()
self.ybin.enable()
self.sbutt.enable() | Enables WinQuad setting |
21,471 | def complete_vhwa_command(self, command):
if not isinstance(command, basestring):
raise TypeError("command can only be an instance of type basestring")
self._call("completeVHWACommand",
in_p=[command]) | Signals that the Video HW Acceleration command has completed.
in command of type str
Pointer to VBOXVHWACMD containing the completed command. |
21,472 | def _run_program(self, bin, fastafile, params=None):
params = self._parse_params(params)
cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s" % (
bin,
fastafile,
params["background_model"],
params["pwmfile"],
params["width"],
params["number"],
params["outfile"],
params["strand"],
)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
motifs = []
if os.path.exists(params["outfile"]):
with open(params["outfile"]) as f:
motifs = self.parse_out(f)
for motif in motifs:
motif.id = "%s_%s" % (self.name, motif.id)
return motifs, stdout, stderr | Run MotifSampler and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool. |
21,473 | def iter_org_events(self, org, number=-1, etag=None):
url =
if org:
url = self._build_url(, , org, base_url=self._api)
return self._iter(int(number), url, Event, etag=etag) | Iterate over events as they appear on the user's organization
dashboard. You must be authenticated to view this.
:param str org: (required), name of the organization
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: list of :class:`Event <github3.events.Event>`\ s |
21,474 | def get_objects_dex(self):
for digest, d in self.analyzed_dex.items():
yield digest, d, self.analyzed_vms[digest] | Yields all dex objects inclduing their Analysis objects
:returns: tuple of (sha256, DalvikVMFormat, Analysis) |
21,475 | def from_string(cls, s):
if not (s.startswith() and s.endswith()):
raise ValueError(
)
s_ = s[1:]
stack = []
deriv = None
try:
matches = cls.udf_re.finditer(s_)
for match in matches:
if match.group():
node = stack.pop()
if len(stack) == 0:
deriv = node
break
else:
stack[-1].daughters.append(node)
elif match.group():
if len(stack) == 0:
raise ValueError()
gd = match.groupdict()
term = UdfTerminal(
_unquote(gd[]),
tokens=_udf_tokens(gd.get()),
parent=stack[-1] if stack else None
)
stack[-1].daughters.append(term)
elif match.group():
gd = match.groupdict()
head = None
entity, _, type = gd[].partition()
if entity[0] == :
entity = entity[1:]
head = True
if type == :
type = None
udf = UdfNode(gd[], entity, gd[],
gd[], gd[],
head=head, type=type,
parent=stack[-1] if stack else None)
stack.append(udf)
elif match.group():
udf = UdfNode(None, match.group())
stack.append(udf)
except (ValueError, AttributeError):
raise ValueError( % s)
if stack or deriv is None:
raise ValueError(
% s)
return cls(*deriv, head=deriv._head, type=deriv.type) | Instantiate a `Derivation` from a UDF or UDX string representation.
The UDF/UDX representations are as output by a processor like the
`LKB <http://moin.delph-in.net/LkbTop>`_ or
`ACE <http://sweaglesw.org/linguistics/ace/>`_, or from the
:meth:`UdfNode.to_udf` or :meth:`UdfNode.to_udx` methods.
Args:
s (str): UDF or UDX serialization |
21,476 | def solar_position_loop(unixtime, loc_args, out):
lat = loc_args[0]
lon = loc_args[1]
elev = loc_args[2]
pressure = loc_args[3]
temp = loc_args[4]
delta_t = loc_args[5]
atmos_refract = loc_args[6]
sst = loc_args[7]
esd = loc_args[8]
for i in range(unixtime.shape[0]):
utime = unixtime[i]
jd = julian_day(utime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
if esd:
out[0, i] = R
continue
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
out[0, i] = v
out[1, i] = alpha
out[2, i] = delta
continue
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha,
H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
out[0, i] = theta
out[1, i] = theta0
out[2, i] = e
out[3, i] = e0
out[4, i] = phi
out[5, i] = eot | Loop through the time array and calculate the solar position |
21,477 | def _check_input(self, X, R):
if not isinstance(X, list):
raise TypeError("Input data should be a list")
if not isinstance(R, list):
raise TypeError("Coordinates should be a list")
if len(X) < 1:
raise ValueError("Need at leat one subject to train the model.\
Got {0:d}".format(len(X)))
for idx, x in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError("Each subject data should be an array")
if x.ndim != 2:
raise TypeError("Each subject data should be 2D array")
if not isinstance(R[idx], np.ndarray):
raise TypeError(
"Each scanner coordinate matrix should be an array")
if R[idx].ndim != 2:
raise TypeError(
"Each scanner coordinate matrix should be 2D array")
if x.shape[0] != R[idx].shape[0]:
raise TypeError(
"n_voxel should be the same in X[idx] and R[idx]")
return self | Check whether input data and coordinates in right type
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself. |
21,478 | def qteStartRecordingHook(self, msgObj):
if self.qteRecording:
self.qteMain.qteStatus()
return
self.qteRecording = True
self.qteMain.qteStatus()
self.recorded_keysequence = QtmacsKeysequence()
self.qteMain.qtesigKeyparsed.connect(self.qteKeyPress)
self.qteMain.qtesigAbort.connect(self.qteStopRecordingHook) | Commence macro recording.
Macros are recorded by connecting to the 'keypressed' signal
it emits.
If the recording has already commenced, or if this method was
called during a macro replay, then return immediately. |
21,479 | def set_continous_wave(self, enabled):
pk = CRTPPacket()
pk.set_header(CRTPPort.PLATFORM, PLATFORM_COMMAND)
pk.data = (0, enabled)
self._cf.send_packet(pk) | Enable/disable the client side X-mode. When enabled this recalculates
the setpoints before sending them to the Crazyflie. |
21,480 | def pick_auth(endpoint_context, areq, all=False):
acrs = []
try:
if len(endpoint_context.authn_broker) == 1:
return endpoint_context.authn_broker.default()
if "acr_values" in areq:
if not isinstance(areq["acr_values"], list):
areq["acr_values"] = [areq["acr_values"]]
acrs = areq["acr_values"]
else:
try:
acrs = areq["claims"]["id_token"]["acr"]["values"]
except KeyError:
try:
_ith = areq[verified_claim_name("id_token_hint")]
except KeyError:
try:
_hint = areq[]
except KeyError:
pass
else:
if endpoint_context.login_hint2acrs:
acrs = endpoint_context.login_hint2acrs(_hint)
else:
try:
acrs = [_ith[]]
except KeyError:
pass
if not acrs:
return endpoint_context.authn_broker.default()
for acr in acrs:
res = endpoint_context.authn_broker.pick(acr)
logger.debug("Picked AuthN broker for ACR %s: %s" % (
str(acr), str(res)))
if res:
if all:
return res
else:
return res[0]
except KeyError as exc:
logger.debug(
"An error occurred while picking the authN broker: %s" % str(exc))
return None | Pick authentication method
:param areq: AuthorizationRequest instance
:return: A dictionary with the authentication method and its authn class ref |
21,481 | def _get_unicode(data, force=False):
if isinstance(data, binary_type):
return data.decode()
elif data is None:
return
elif force:
if PY2:
return unicode(data)
else:
return str(data)
else:
return data | Try to return a text aka unicode object from the given data. |
21,482 | def InteractiveShell(self, cmd=None, strip_cmd=True, delim=None, strip_delim=True):
conn = self._get_service_connection(b)
return self.protocol_handler.InteractiveShellCommand(
conn, cmd=cmd, strip_cmd=strip_cmd,
delim=delim, strip_delim=strip_delim) | Get stdout from the currently open interactive shell and optionally run a command
on the device, returning all output.
Args:
cmd: Optional. Command to run on the target.
strip_cmd: Optional (default True). Strip command name from stdout.
delim: Optional. Delimiter to look for in the output to know when to stop expecting more output
(usually the shell prompt)
strip_delim: Optional (default True): Strip the provided delimiter from the output
Returns:
The stdout from the shell command. |
21,483 | def take_action(self, production_rule: str) -> :
left_side, right_side = production_rule.split()
assert self._nonterminal_stack[-1] == left_side, (f"Tried to expand {self._nonterminal_stack[-1]}"
f"but got rule {left_side} -> {right_side}")
new_stack = self._nonterminal_stack[:-1]
productions = self._get_productions_from_string(right_side)
if self._reverse_productions:
productions = list(reversed(productions))
for production in productions:
if self._is_nonterminal(production):
new_stack.append(production)
return GrammarStatelet(nonterminal_stack=new_stack,
valid_actions=self._valid_actions,
is_nonterminal=self._is_nonterminal,
reverse_productions=self._reverse_productions) | Takes an action in the current grammar state, returning a new grammar state with whatever
updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS".
This will update the non-terminal stack. Updating the non-terminal stack involves popping
the non-terminal that was expanded off of the stack, then pushing on any non-terminals in
the production rule back on the stack.
For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and
``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e",
"<e,d>"]``.
If ``self._reverse_productions`` is set to ``False`` then we push the non-terminals on in
in their given order, which means that the first non-terminal in the production rule gets
popped off the stack `last`. |
21,484 | def init_flatpak():
tessdata_files = glob.glob("/app/share/locale/*/*.traineddata")
if len(tessdata_files) <= 0:
return os.path.exists("/app")
localdir = os.path.expanduser("~/.local")
base_data_dir = os.getenv(
"XDG_DATA_HOME",
os.path.join(localdir, "share")
)
tessdatadir = os.path.join(base_data_dir, "paperwork", "tessdata")
logger.info("Assuming we are running in Flatpak."
" Building tessdata directory {} ...".format(tessdatadir))
util.rm_rf(tessdatadir)
util.mkdir_p(tessdatadir)
os.symlink("/app/share/tessdata/eng.traineddata",
os.path.join(tessdatadir, "eng.traineddata"))
os.symlink("/app/share/tessdata/osd.traineddata",
os.path.join(tessdatadir, "osd.traineddata"))
os.symlink("/app/share/tessdata/configs",
os.path.join(tessdatadir, "configs"))
os.symlink("/app/share/tessdata/tessconfigs",
os.path.join(tessdatadir, "tessconfigs"))
for tessdata in tessdata_files:
logger.info("{} found".format(tessdata))
os.symlink(tessdata, os.path.join(tessdatadir,
os.path.basename(tessdata)))
os.environ[] = os.path.dirname(tessdatadir)
logger.info("Tessdata directory ready")
return True | If we are in Flatpak, we must build a tessdata/ directory using the
.traineddata files from each locale directory |
21,485 | def drop_prefix_and_return_type(function):
DELIMITERS = {
: ,
: ,
: ,
: ,
: "re building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
current.append(char)
elif levels:
current.append(char)
elif char == :
tokens.append(.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(.join(current))
while len(tokens) > 1 and tokens[-1].startswith((, )):
tokens = tokens[:-2] + [.join(tokens[-2:])]
return tokens[-1] | Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value |
21,486 | def load(self):
droplets = self.get_data("droplets/%s" % self.id)
droplet = droplets[]
for attr in droplet.keys():
setattr(self, attr, droplet[attr])
for net in self.networks[]:
if net[] == :
self.private_ip_address = net[]
if net[] == :
self.ip_address = net[]
if self.networks[]:
self.ip_v6_address = self.networks[][0][]
if "backups" in self.features:
self.backups = True
else:
self.backups = False
if "ipv6" in self.features:
self.ipv6 = True
else:
self.ipv6 = False
if "private_networking" in self.features:
self.private_networking = True
else:
self.private_networking = False
if "tags" in droplets:
self.tags = droplets["tags"]
return self | Fetch data about droplet - use this instead of get_data() |
21,487 | def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=):
histObj = {}
if fromVal != toVal:
histObj[lookupType] = {"from": fromVal, "to": toVal}
if lookupType in [, , , , ] and using!=:
histObj[lookupType]["using"] = using
if lookupType in [, , , ] and pattern!=:
histObj[lookupType]["pattern"] = pattern
return histObj | Return a dictionary detailing what, if any, change was made to a record field
:param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, normLookup, normRegex, normIncludes, deriveValue, copyValue, deriveRegex
:param string fromVal: previous field value
:param string toVal: new string value
:param dict using: field values used to derive new values; only applicable for deriveValue, copyValue, deriveRegex
:param string pattern: which regex pattern was matched to make the change; only applicable for genericRegex, fieldSpecificRegex, deriveRegex |
21,488 | def get_direct_queue(self):
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True) | Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance |
21,489 | def plot(self, columns=None, loc=None, iloc=None, **kwargs):
from matplotlib import pyplot as plt
assert loc is None or iloc is None, "Cannot set both loc and iloc in call to .plot"
def shaded_plot(ax, x, y, y_upper, y_lower, **kwargs):
base_line, = ax.plot(x, y, drawstyle="steps-post", **kwargs)
ax.fill_between(x, y_lower, y2=y_upper, alpha=0.25, color=base_line.get_color(), linewidth=1.0, step="post")
def create_df_slicer(loc, iloc):
get_method = "loc" if loc is not None else "iloc"
if iloc is None and loc is None:
user_submitted_ix = slice(0, None)
else:
user_submitted_ix = loc if loc is not None else iloc
return lambda df: getattr(df, get_method)[user_submitted_ix]
subset_df = create_df_slicer(loc, iloc)
if not columns:
columns = self.cumulative_hazards_.columns
else:
columns = _to_list(columns)
set_kwargs_ax(kwargs)
ax = kwargs.pop("ax")
x = subset_df(self.cumulative_hazards_).index.values.astype(float)
for column in columns:
y = subset_df(self.cumulative_hazards_[column]).values
index = subset_df(self.cumulative_hazards_[column]).index
y_upper = subset_df(self.confidence_intervals_[column].loc["upper-bound"]).values
y_lower = subset_df(self.confidence_intervals_[column].loc["lower-bound"]).values
shaded_plot(ax, x, y, y_upper, y_lower, label=column, **kwargs)
plt.hlines(0, index.min() - 1, index.max(), color="k", linestyles="--", alpha=0.5)
ax.legend()
return ax | A wrapper around plotting. Matplotlib plot arguments can be passed in, plus:
Parameters
-----------
columns: string or list-like, optional
If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all.
loc:
iloc: slice, optional
specify a location-based subsection of the curves to plot, ex:
``.plot(iloc=slice(0,10))`` will plot the first 10 time points. |
21,490 | def LightcurveHDU(model):
cards = model._mission.HDUCards(model.meta, hdu=1)
cards.append((, ))
cards.append((, ))
cards.append((, ))
cards.append((, model.mission, ))
cards.append((, EVEREST_MAJOR_MINOR, ))
cards.append((, EVEREST_VERSION, ))
cards.append((, strftime(),
))
cards.append((, model.name, ))
cards.append((, model.aperture_name, ))
cards.append((, model.bpad, ))
for c in range(len(model.breakpoints)):
cards.append(
( % (c + 1), model.breakpoints[c],
))
cards.append((, model.cbv_num, ))
cards.append((, model.cbv_niter,
))
cards.append((, model.cbv_win, ))
cards.append((, model.cbv_order, ))
cards.append((, model.cdivs, ))
cards.append((, model.cdpp, ))
cards.append((, model.cdppr, ))
cards.append((, model.cdppv, ))
cards.append((, model.cdppg, ))
for i in range(99):
try:
cards.append(( % (i + 1),
model.cdpp_arr[i] if not np.isnan(
model.cdpp_arr[i]) else 0, ))
cards.append(( % (
i + 1), model.cdppr_arr[i] if not np.isnan(
model.cdppr_arr[i]) else 0, ))
cards.append(( % (i + 1),
model.cdppv_arr[i] if not np.isnan(
model.cdppv_arr[i]) else 0, ))
except:
break
cards.append(
(, model.cv_min, ))
cards.append(
(, model.giter, ))
cards.append(
(, model.giter, ))
cards.append((, model.gp_factor,
))
cards.append((, model.kernel, ))
if model.kernel == :
cards.append(
(, model.kernel_params[0],
))
cards.append(
(, model.kernel_params[1],
))
cards.append(
(, model.kernel_params[2],
))
elif model.kernel == :
cards.append(
(, model.kernel_params[0],
))
cards.append(
(, model.kernel_params[1], ))
cards.append((, model.kernel_params[2], ))
cards.append((, model.kernel_params[3], ))
for c in range(len(model.breakpoints)):
for o in range(model.pld_order):
cards.append(( % (c + 1, o + 1),
model.lam[c][o], ))
if model.name == :
cards.append(( % (c + 1, o + 1),
model.reclam[c][o],
))
cards.append((, model.leps, ))
cards.append((, model.max_pixels, ))
for i, source in enumerate(model.nearby[:99]):
cards.append(( %
(i + 1), source[], ))
cards.append(
( % (i + 1), source[], ))
cards.append(
( % (i + 1), source[], ))
cards.append(
( % (i + 1), source[], ))
cards.append(( %
(i + 1), source[], ))
cards.append(( %
(i + 1), source[], ))
for i, n in enumerate(model.neighbors):
cards.append(
( % i, model.neighbors[i],
))
cards.append((, model.oiter, ))
cards.append((, model.optimize_gp, ))
cards.append(
(, model.osigma, ))
for i, planet in enumerate(model.planets):
cards.append(
( % (i + 1), planet[0], ))
cards.append(
( % (i + 1), planet[1], ))
cards.append(
( % (i + 1), planet[2],
))
cards.append((, model.pld_order, ))
cards.append((, model.saturated, ))
cards.append((, model.saturation_tolerance,
))
quality = np.array(model.quality)
quality[np.array(model.badmask, dtype=int)] += 2 ** (QUALITY_BAD - 1)
quality[np.array(model.nanmask, dtype=int)] += 2 ** (QUALITY_NAN - 1)
quality[np.array(model.outmask, dtype=int)] += 2 ** (QUALITY_OUT - 1)
quality[np.array(model.recmask, dtype=int)] += 2 ** (QUALITY_REC - 1)
quality[np.array(model.transitmask, dtype=int)] += 2 ** (QUALITY_TRN - 1)
return hdu | Construct the data HDU file containing the arrays and the observing info. |
21,491 | def unpack(self, buff, offset=0):
begin = offset
hexas = []
while begin < offset + 8:
number = struct.unpack("!B", buff[begin:begin+1])[0]
hexas.append("%.2x" % number)
begin += 1
self._value = .join(hexas) | Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error. |
21,492 | def _build_http(http=None):
if not http:
http = httplib2.Http(
timeout=HTTP_REQUEST_TIMEOUT, ca_certs=HTTPLIB_CA_BUNDLE)
user_agent = .format(
httplib2.__version__,
,
)
return set_user_agent(http, user_agent) | Construct an http client suitable for googleapiclient usage w/ user agent. |
21,493 | def kill(self) -> None:
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate) | Kill ffmpeg job. |
21,494 | def generate_signed_url_v2(
credentials,
resource,
expiration,
api_access_endpoint="",
method="GET",
content_md5=None,
content_type=None,
response_type=None,
response_disposition=None,
generation=None,
headers=None,
query_parameters=None,
):
expiration_stamp = get_expiration_seconds_v2(expiration)
canonical = canonicalize(method, resource, query_parameters, headers)
elements_to_sign = [
canonical.method,
content_md5 or "",
content_type or "",
str(expiration_stamp),
]
elements_to_sign.extend(canonical.headers)
elements_to_sign.append(canonical.resource)
string_to_sign = "\n".join(elements_to_sign)
signed_query_params = get_signed_query_params_v2(
credentials, expiration_stamp, string_to_sign
)
if response_type is not None:
signed_query_params["response-content-type"] = response_type
if response_disposition is not None:
signed_query_params["response-content-disposition"] = response_disposition
if generation is not None:
signed_query_params["generation"] = generation
signed_query_params.update(canonical.query_parameters)
sorted_signed_query_params = sorted(signed_query_params.items())
return "{endpoint}{resource}?{querystring}".format(
endpoint=api_access_endpoint,
resource=resource,
querystring=six.moves.urllib.parse.urlencode(sorted_signed_query_params),
) | Generate a V2 signed URL to provide query-string auth'n to a resource.
.. note::
Assumes ``credentials`` implements the
:class:`google.auth.credentials.Signing` interface. Also assumes
``credentials`` has a ``service_account_email`` property which
identifies the credentials.
.. note::
If you are on Google Compute Engine, you can't generate a signed URL.
Follow `Issue 922`_ for updates on this. If you'd like to be able to
generate a signed URL from GCE, you can use a standard service account
from a JSON file rather than a GCE service account.
See headers `reference`_ for more details on optional arguments.
.. _Issue 922: https://github.com/GoogleCloudPlatform/\
google-cloud-python/issues/922
.. _reference: https://cloud.google.com/storage/docs/reference-headers
:type credentials: :class:`google.auth.credentials.Signing`
:param credentials: Credentials object with an associated private key to
sign text.
:type resource: str
:param resource: A pointer to a specific resource
(typically, ``/bucket-name/path/to/blob.txt``).
:type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
:param expiration: Point in time when the signed URL should expire.
:type api_access_endpoint: str
:param api_access_endpoint: Optional URI base. Defaults to empty string.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the
signature will additionally contain the `x-goog-resumable`
header, and the method changed to POST. See the signed URL
docs regarding this flow:
https://cloud.google.com/storage/docs/access-control/signed-urls
:type content_md5: str
:param content_md5: (Optional) The MD5 hash of the object referenced by
``resource``.
:type content_type: str
:param content_type: (Optional) The content type of the object referenced
by ``resource``.
:type response_type: str
:param response_type: (Optional) Content type of responses to requests for
the signed URL. Used to over-ride the content type of
the underlying resource.
:type response_disposition: str
:param response_disposition: (Optional) Content disposition of responses to
requests for the signed URL.
:type generation: str
:param generation: (Optional) A value that indicates which generation of
the resource to fetch.
:type headers: Union[dict|List(Tuple(str,str))]
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:raises: :exc:`TypeError` when expiration is not a valid type.
:raises: :exc:`AttributeError` if credentials is not an instance
of :class:`google.auth.credentials.Signing`.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration. |
21,495 | def render_field(self, obj, field_name, **options):
try:
field = obj._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(obj, field_name, )
if hasattr(field, ) and getattr(field, ):
return getattr(obj, .format(field_name))()
value = getattr(obj, field_name, )
renderer = self.renderers.get(type(field))
if renderer:
return renderer(value, **options)
if isinstance(value, models.BaseModel):
value = str(value)
return self.render_value(value, **options) | Render field |
21,496 | def add_dependency(self, p_from_todo, p_to_todo):
def find_next_id():
def id_exists(p_id):
for todo in self._todos:
number = str(p_id)
if todo.has_tag(, number) or todo.has_tag(, number):
return True
return False
new_id = 1
while id_exists(new_id):
new_id += 1
return str(new_id)
def append_projects_to_subtodo():
if config().append_parent_projects():
for project in p_from_todo.projects() - p_to_todo.projects():
self.append(p_to_todo, "+{}".format(project))
def append_contexts_to_subtodo():
if config().append_parent_contexts():
for context in p_from_todo.contexts() - p_to_todo.contexts():
self.append(p_to_todo, "@{}".format(context))
if p_from_todo != p_to_todo and not self._depgraph.has_edge(
hash(p_from_todo), hash(p_to_todo)):
dep_id = None
if p_from_todo.has_tag():
dep_id = p_from_todo.tag_value()
else:
dep_id = find_next_id()
p_from_todo.set_tag(, dep_id)
p_to_todo.add_tag(, dep_id)
self._add_edge(p_from_todo, p_to_todo, dep_id)
append_projects_to_subtodo()
append_contexts_to_subtodo()
self.dirty = True | Adds a dependency from task 1 to task 2. |
21,497 | def unpack_kinesis_event(kinesis_event, deserializer=None, unpacker=None,
embed_timestamp=False):
records = kinesis_event["Records"]
events = []
shard_ids = set()
for rec in records:
data = rec["kinesis"]["data"]
try:
payload = b64decode(data)
except TypeError:
payload = b64decode(data.encode("utf-8"))
if unpacker:
payload = unpacker(payload)
shard_ids.add(rec["eventID"].split(":")[0])
try:
payload = payload.decode()
except AttributeError:
pass
if deserializer:
try:
payload = deserializer(payload)
except ValueError:
try:
payload = deserializer(payload.replace("\\"))
except:
logger.error("Invalid searialized payload: {}".format(
payload))
raise
if isinstance(payload, dict) and embed_timestamp:
ts = rec["kinesis"].get("approximateArrivalTimestamp")
if ts:
ts = datetime.fromtimestamp(ts, tz=tz.tzutc())
ts_str = ("{year:04d}-{month:02d}-{day:02d} "
"{hour:02d}:{minute:02d}:{second:02d}").format(
year=ts.year,
month=ts.month,
day=ts.day,
hour=ts.hour,
minute=ts.minute,
second=ts.second)
else:
ts_str = ""
payload[embed_timestamp] = ts_str
events.append(payload)
if len(shard_ids) > 1:
msg = "Kinesis event contains records from several shards: {}".format(
shard_ids)
raise(BadKinesisEventError(msg))
return events, shard_ids.pop() | Extracts events (a list of dicts) from a Kinesis event. |
21,498 | def getstate(self):
state = "RUNNING"
exit_code = -1
exitcode_file = os.path.join(self.workdir, "exit_code")
pid_file = os.path.join(self.workdir, "pid")
if os.path.exists(exitcode_file):
with open(exitcode_file) as f:
exit_code = int(f.read())
elif os.path.exists(pid_file):
with open(pid_file, "r") as pid:
pid = int(pid.read())
try:
(_pid, exit_status) = os.waitpid(pid, os.WNOHANG)
if _pid != 0:
exit_code = exit_status >> 8
with open(exitcode_file, "w") as f:
f.write(str(exit_code))
os.unlink(pid_file)
except OSError:
os.unlink(pid_file)
exit_code = 255
if exit_code == 0:
state = "COMPLETE"
elif exit_code != -1:
state = "EXECUTOR_ERROR"
return state, exit_code | Returns RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255 |
21,499 | def find_sample_min_std(self, Intensities):
Best_array = []
best_array_std_perc = inf
Best_array_tmp = []
Best_interpretations = {}
Best_interpretations_tmp = {}
for this_specimen in list(Intensities.keys()):
for value in Intensities[this_specimen]:
Best_interpretations_tmp[this_specimen] = value
Best_array_tmp = [value]
all_other_specimens = list(Intensities.keys())
all_other_specimens.remove(this_specimen)
for other_specimen in all_other_specimens:
closest_value = self.find_close_value(
Intensities[other_specimen], value)
Best_array_tmp.append(closest_value)
Best_interpretations_tmp[other_specimen] = closest_value
if std(Best_array_tmp, ddof=1) / mean(Best_array_tmp) < best_array_std_perc:
Best_array = Best_array_tmp
best_array_std_perc = std(
Best_array, ddof=1) / mean(Best_array_tmp)
Best_interpretations = copy.deepcopy(
Best_interpretations_tmp)
Best_interpretations_tmp = {}
return Best_interpretations, mean(Best_array), std(Best_array, ddof=1) | find the best interpretation with the minimum stratard deviation (in units of percent % !) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.