Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
10,200 | def get_signature(self, req):
oss_url = url.URL(req.url)
oss_headers = [
"{0}:{1}\n".format(key, val)
for key, val in req.headers.lower_items()
if key.startswith(self.X_OSS_PREFIX)
]
canonicalized_headers = "".join(sorted(oss_headers))
logger.debug(
"canonicalized header : [{0}]".format(canonicalized_headers)
)
oss_url.params = {
key: val
for key, val in oss_url.params.items()
if key in self.SUB_RESOURCES or key in self.OVERRIDE_QUERIES
}
oss_url.forge(key=lambda x: x[0])
canonicalized_str = "{0}/{1}{2}".format(
canonicalized_headers,
self.get_bucket(oss_url.host),
oss_url.uri
)
str_to_sign = "\n".join([
req.method,
req.headers["content-md5"],
req.headers["content-type"],
req.headers["date"],
canonicalized_str
])
logger.debug(
"signature str is \n{0}\n{1}\n{0}\n".format("
)
if isinstance(str_to_sign, requests.compat.str):
str_to_sign = str_to_sign.encode("utf8")
signature_bin = hmac.new(self._secret_key, str_to_sign, hashlib.sha1)
signature = base64.b64encode(signature_bin.digest()).decode("utf8")
logger.debug("signature is [{0}]".format(signature))
return signature | calculate the signature of the oss request
Returns the signatue |
10,201 | def pydeps2reqs(deps):
reqs = defaultdict(set)
for k, v in list(deps.items()):
p = v[]
if p and not p.startswith(sys.real_prefix):
if p.startswith(sys.prefix) and in p:
if not p.endswith():
if in p.replace(, ):
reqs[] |= set(v[])
else:
name = k.split(, 1)[0]
if name not in skiplist:
reqs[name] |= set(v[])
if in reqs:
del reqs[]
return .join(dep2req(name, reqs[name]) for name in sorted(reqs)) | Convert a deps instance into requirements. |
10,202 | def parse_url(url):
if url.startswith((, , )):
if url.startswith():
return urlparse.urlparse(url, scheme=)
else:
return urlparse.urlparse(url)
else:
return urlparse.urlparse(urlparse.urljoin(, url)) | Return a clean URL. Remove the prefix for the Auth URL if Found.
:param url:
:return aurl: |
10,203 | def is_valid_regex(string):
try:
re.compile(string)
is_valid = True
except re.error:
is_valid = False
return is_valid | Checks whether the re module can compile the given regular expression.
Parameters
----------
string: str
Returns
-------
boolean |
10,204 | def yaml_to_str(data: Mapping) -> str:
return yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper) | Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str |
10,205 | def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True,
get_overlapping=True):
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if not variant_obj:
variant_obj = store.variant(variant_id)
if add_case:
variant_case(store, case_obj, variant_obj)
variant_obj[] = [
(, variant_obj.get()),
(, variant_obj.get()),
(, variant_obj.get()),
(, variant_obj.get()),
(, variant_obj.get()),
(, variant_obj.get()),
(, variant_obj.get()),
(, variant_obj.get()),
]
variant_obj[] = callers(variant_obj, category=)
overlapping_snvs = []
if get_overlapping:
overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in
store.overlapping(variant_obj))
for gene_obj in variant_obj[]:
if gene_obj.get():
ensembl_id = gene_obj[][]
try:
build = int(gene_obj[].get(,))
except Exception:
build = 37
gene_obj[] = ensembl(ensembl_id, build=build)
variant_obj[] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj[], comments=True)
case_clinvars = store.case_to_clinVars(case_obj.get())
if variant_id in case_clinvars:
variant_obj[] = case_clinvars.get(variant_id)[]
if not in variant_obj:
variant_obj[] = variant_obj[]
return {
: institute_obj,
: case_obj,
: variant_obj,
: overlapping_snvs,
: MANUAL_RANK_OPTIONS,
: DISMISS_VARIANT_OPTIONS
} | Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
} |
10,206 | def z__update(self):
updates = []
for text in self._updates:
if self._AVOID_RAW_FORM:
text_repr = multiline_repr(text)
raw_char =
else:
text_repr = multiline_repr(text, RAW_MULTILINE_CHARS)
if len(text_repr) == len(text):
raw_char = if in text_repr else
else:
text_repr = multiline_repr(text)
raw_char =
quotes =
if quotes in text:
quotes = "\n\\\n\n\n \n\n'.join(lines).lstrip() | Triple quoted baseline representation.
Return string with multiple triple quoted baseline strings when
baseline had been compared multiple times against varying strings.
:returns: source file baseline replacement text
:rtype: str |
10,207 | def _set_comment(self, section, comment, key=None):
if in comment:
comment = .join(comment.split())
comment = + comment
if key:
self._comments[(section, key)] = comment
else:
self._comments[section] = comment | Set a comment for section or key
:param str section: Section to add comment to
:param str comment: Comment to add
:param str key: Key to add comment to |
10,208 | def largest_compartment_id_met(model):
candidate, second = sorted(
((c, len(metabolites_per_compartment(model, c)))
for c in model.compartments), reverse=True, key=itemgetter(1))[:2]
if candidate[1] == second[1]:
raise RuntimeError("There is a tie for the largest compartment. "
"Compartment {} and {} have equal amounts of "
"metabolites.".format(candidate[0], second[0]))
else:
return candidate[0] | Return the ID of the compartment with the most metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
string
Compartment ID of the compartment with the most metabolites. |
10,209 | def fit(self,
target_type,
target,
adjust_thickness=False,
adjust_site_atten=False,
adjust_source_vel=False):
density = self.profile.density
nl = len(density)
slowness = self.profile.slowness
thickness = self.profile.thickness
site_atten = self._site_atten
initial = slowness
bounds = 1 / np.tile((4000, 100), (nl, 1))
if not adjust_source_vel:
bounds[-1] = (initial[-1], initial[-1])
if adjust_thickness:
bounds = np.r_[bounds, [[t / 2, 2 * t] for t in thickness]]
initial = np.r_[initial, thickness]
if adjust_site_atten:
bounds = np.r_[bounds, [[0.0001, 0.200]]]
initial = np.r_[initial, self.site_atten]
def calc_rmse(this, that):
return np.mean(((this - that) / that) ** 2)
def err(x):
_slowness = x[0:nl]
if adjust_thickness:
_thickness = x[nl:(2 * nl)]
else:
_thickness = thickness
if adjust_site_atten:
self._site_atten = x[-1]
crustal_amp, site_term = self._calc_amp(density, _thickness,
_slowness)
calc = crustal_amp if target_type == else site_term
err = 10 * calc_rmse(target, calc)
err += calc_rmse(slowness, _slowness)
if adjust_thickness:
err += calc_rmse(thickness, _thickness)
if adjust_site_atten:
err += calc_rmse(self._site_atten, site_atten)
return err
res = minimize(err, initial, method=, bounds=bounds)
slowness = res.x[0:nl]
if adjust_thickness:
thickness = res.x[nl:(2 * nl)]
profile = Profile([
Layer(l.soil_type, t, 1 / s)
for l, t, s in zip(self.profile, thickness, slowness)
], self.profile.wt_depth)
return (self.motion, profile, self.loc_input) | Fit to a target crustal amplification or site term.
The fitting process adjusts the velocity, site attenuation, and layer
thickness (if enabled) to fit a target values. The frequency range is
specified by the input motion.
Parameters
----------
target_type: str
Options are 'crustal_amp' to only fit to the crustal amplification,
or 'site_term' to fit both the velocity and the site attenuation
parameter.
target: `array_like`
Target values.
adjust_thickness: bool (optional)
If the thickness of the layers is adjusted as well, default: False.
adjust_site_atten: bool (optional)
If the site attenuation is adjusted as well, default: False.
adjust_source_vel: bool (optional)
If the source velocity should be adjusted, default: False.
Returns
-------
profile: `pyrsa.site.Profile`
profile optimized to fit a target amplification. |
10,210 | def name(value):
if value is None:
return
for (test, name) in TESTS:
if isinstance(value, test):
return name
return | Get the string title for a particular type.
Given a value, get an appropriate string title for the type that can
be used to re-cast the value later. |
10,211 | def is_valid_intensity_measure_types(self):
if self.ground_motion_correlation_model:
for imt in self.imtls:
if not (imt.startswith() or imt == ):
raise ValueError(
% (
self.ground_motion_correlation_model, imt))
if self.risk_files:
return (self.intensity_measure_types is None and
self.intensity_measure_types_and_levels is None)
elif not hasattr(self, ) and not hasattr(
self, ):
return False
return True | If the IMTs and levels are extracted from the risk models,
they must not be set directly. Moreover, if
`intensity_measure_types_and_levels` is set directly,
`intensity_measure_types` must not be set. |
10,212 | def get_iam_policy(self):
instance_admin_client = self._client.instance_admin_client
resp = instance_admin_client.get_iam_policy(resource=self.name)
return Policy.from_pb(resp) | Gets the access control policy for an instance resource.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_get_iam_policy]
:end-before: [END bigtable_get_iam_policy]
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this instance |
10,213 | def reorder(self, indices: mx.nd.NDArray) -> None:
if self.global_avoid_states:
self.global_avoid_states = [self.global_avoid_states[x] for x in indices.asnumpy()]
if self.local_avoid_states:
self.local_avoid_states = [self.local_avoid_states[x] for x in indices.asnumpy()] | Reorders the avoid list according to the selected row indices.
This can produce duplicates, but this is fixed if state changes occur in consume().
:param indices: An mx.nd.NDArray containing indices of hypotheses to select. |
10,214 | def create(container, portal_type, *args, **kwargs):
from bika.lims.utils import tmpID
if kwargs.get("title") is None:
kwargs["title"] = "New {}".format(portal_type)
tmp_id = tmpID()
types_tool = get_tool("portal_types")
fti = types_tool.getTypeInfo(portal_type)
if fti.product:
obj = _createObjectByType(portal_type, container, tmp_id)
else:
factory = getUtility(IFactory, fti.factory)
obj = factory(tmp_id, *args, **kwargs)
if hasattr(obj, ):
obj._setPortalTypeName(fti.getId())
notify(ObjectCreatedEvent(obj))
container._setObject(tmp_id, obj)
obj = container._getOb(obj.getId())
if is_at_content(obj):
obj.processForm()
obj.edit(**kwargs)
modified(obj)
return obj | Creates an object in Bika LIMS
This code uses most of the parts from the TypesTool
see: `Products.CMFCore.TypesTool._constructInstance`
:param container: container
:type container: ATContentType/DexterityContentType/CatalogBrain
:param portal_type: The portal type to create, e.g. "Client"
:type portal_type: string
:param title: The title for the new content object
:type title: string
:returns: The new created object |
10,215 | def comments(self):
import math
from .author import Author, ANONYMOUS
from .comment import Comment
api_url = Get_Answer_Comment_URL.format(self.aid)
page = pages = 1
while page <= pages:
res = self._session.get(api_url + + str(page))
if page == 1:
total = int(res.json()[][])
if total == 0:
return
pages = math.ceil(total / 30)
page += 1
comment_items = res.json()[]
for comment_item in comment_items:
comment_id = comment_item[]
content = comment_item[]
upvote_num = comment_item[]
time_string = comment_item[][:19]
time = datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
if comment_item[].get() is not None:
a_url = comment_item[][]
a_name = comment_item[][]
photo_url_tmp = comment_item[][][]
photo_url_id = comment_item[][][]
a_photo_url = photo_url_tmp.replace(
, photo_url_id).replace(, )
author_obj = Author(a_url, a_name, photo_url=a_photo_url,
session=self._session)
else:
author_obj = ANONYMOUS
yield Comment(comment_id, self, author_obj, upvote_num, content, time) | 获取答案下的所有评论.
:return: 答案下的所有评论,返回生成器
:rtype: Comments.Iterable |
10,216 | def compute_dosage(expec, alt=None):
r
if alt is None:
return expec[..., -1]
try:
return expec[:, alt]
except NotImplementedError:
alt = asarray(alt, int)
return asarray(expec, float)[:, alt] | r""" Compute dosage from allele expectation.
Parameters
----------
expec : array_like
Allele expectations encoded as a samples-by-alleles matrix.
alt : array_like, optional
Alternative allele index. If ``None``, the allele having the minor
allele frequency for the provided ``expec`` is used as the alternative.
Defaults to ``None``.
Returns
-------
:class:`numpy.ndarray`
Dosage encoded as an array of size equal to the number of samples.
Examples
--------
.. code-block:: python
:caption: First a quick-start example.
>>> from bgen_reader import allele_expectation, compute_dosage
>>> from bgen_reader import example_files, read_bgen
>>>
>>> # Download an example.
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Read the example.
>>> bgen = read_bgen(filepath, verbose=False)
>>>
>>> # Extract the allele expectations of the fourth variant.
>>> variant_idx = 3
>>> e = allele_expectation(bgen, variant_idx)
>>>
>>> # Compute the dosage when considering the first allele
>>> # as the reference/alternative one.
>>> alt_allele_idx = 1
>>> d = compute_dosage(e, alt=alt_allele_idx)
>>>
>>> # Print the dosage of the first five samples only.
>>> print(d[:5])
[1.96185308 0.00982666 0.01745552 1.00347899 1.01153563]
>>>
>>> # Clean-up the example
>>> example.close()
.. code-block:: python
:caption: Genotype probabilities, allele expectations and frequencies.
>>> from bgen_reader import (
... allele_expectation,
... allele_frequency,
... compute_dosage,
... example_files,
... read_bgen,
... )
>>> from pandas import DataFrame
>>> from xarray import DataArray
>>>
>>> # Download an example
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Open the bgen file.
>>> bgen = read_bgen(filepath, verbose=False)
>>> variants = bgen["variants"]
>>> genotype = bgen["genotype"]
>>> samples = bgen["samples"]
>>>
>>> variant_idx = 3
>>> variant = variants.loc[variant_idx].compute()
>>> # Print the metadata of the fourth variant.
>>> print(variant)
id rsid chrom pos nalleles allele_ids vaddr
3 SNPID_5 RSID_5 01 5000 2 A,G 16034
>>> geno = bgen["genotype"][variant_idx].compute()
>>> metageno = DataFrame({k: geno[k] for k in ["ploidy", "missing"]},
... index=samples)
>>> metageno.index.name = "sample"
>>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
ploidy missing
sample
sample_001 2 False
sample_002 2 False
sample_003 2 False
sample_004 2 False
... ... ...
sample_497 2 False
sample_498 2 False
sample_499 2 False
sample_500 2 False
<BLANKLINE>
[500 rows x 2 columns]
>>> p = DataArray(
... geno["probs"],
... name="probability",
... coords={"sample": samples},
... dims=["sample", "genotype"],
... )
>>> # Print the genotype probabilities.
>>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
genotype 0 1 2
sample
sample_001 0.00488 0.02838 0.96674
sample_002 0.99045 0.00928 0.00027
sample_003 0.98932 0.00391 0.00677
sample_004 0.00662 0.98328 0.01010
... ... ... ...
sample_497 0.00137 0.01312 0.98550
sample_498 0.00552 0.99423 0.00024
sample_499 0.01266 0.01154 0.97580
sample_500 0.00021 0.98431 0.01547
<BLANKLINE>
[500 rows x 3 columns]
>>> alleles = variant["allele_ids"].item().split(",")
>>> e = DataArray(
... allele_expectation(bgen, variant_idx),
... name="expectation",
... coords={"sample": samples, "allele": alleles},
... dims=["sample", "allele"],
... )
>>> # Print the allele expectations.
>>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
allele A G
sample
sample_001 0.03815 1.96185
sample_002 1.99017 0.00983
sample_003 1.98254 0.01746
sample_004 0.99652 1.00348
... ... ...
sample_497 0.01587 1.98413
sample_498 1.00528 0.99472
sample_499 0.03687 1.96313
sample_500 0.98474 1.01526
<BLANKLINE>
[500 rows x 2 columns]
>>> rsid = variant["rsid"].item()
>>> chrom = variant["chrom"].item()
>>> variant_name = f"{chrom}:{rsid}"
>>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles)
>>> f.index.name = "allele"
>>> # Allele frequencies.
>>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
01:RSID_5
allele
A 305.97218
G 194.02782
>>> alt = f.idxmin().item()
>>> alt_idx = alleles.index(alt)
>>> d = compute_dosage(e, alt=alt_idx).to_series()
>>> d = DataFrame(d.values, columns=[f"alt={alt}"], index=d.index)
>>> # Dosages when considering G as the alternative allele.
>>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
alt=G
sample
sample_001 1.96185
sample_002 0.00983
sample_003 0.01746
sample_004 1.00348
... ...
sample_497 1.98413
sample_498 0.99472
sample_499 1.96313
sample_500 1.01526
<BLANKLINE>
[500 rows x 1 columns]
>>>
>>> # Clean-up the example
>>> example.close() |
10,217 | def normalize(self, mode="max", value=1):
if mode.lower() == "sum":
factor = np.sum(self.y, axis=0)
elif mode.lower() == "max":
factor = np.max(self.y, axis=0)
else:
raise ValueError("Unsupported normalization mode %s!" % mode)
self.y /= factor / value | Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1. |
10,218 | def get_jobs(self, project, **params):
return self._get_json_list(self.JOBS_ENDPOINT, project, **params) | Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results |
10,219 | def score_cosine(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde) | Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float |
10,220 | def get_config_path():
if os.environ.get(BUGWARRIORRC):
return os.environ[BUGWARRIORRC]
xdg_config_home = (
os.environ.get() or os.path.expanduser())
xdg_config_dirs = (
(os.environ.get() or ).split())
paths = [
os.path.join(xdg_config_home, , ),
os.path.expanduser("~/.bugwarriorrc")]
paths += [
os.path.join(d, , ) for d in xdg_config_dirs]
for path in paths:
if os.path.exists(path):
return path
return paths[0] | Determine the path to the config file. This will return, in this order of
precedence:
- the value of $BUGWARRIORRC if set
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc if exists
- ~/.bugwarriorrc if exists
- <dir>/bugwarrior/bugwarriorc if exists, for dir in $XDG_CONFIG_DIRS
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc otherwise |
10,221 | def get_limits(self):
if not self.limits:
self.limits = {}
for item in [self.MAX_RRSETS_BY_ZONE,
self.MAX_VPCS_ASSOCIATED_BY_ZONE]:
self.limits[item["name"]] = AwsLimit(
item["name"],
self,
item["default_limit"],
self.warning_threshold,
self.critical_threshold,
limit_type=,
limit_subtype=item["name"]
)
return self.limits | Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
Limits from:
docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict |
10,222 | def _pos(self, k):
if k < 2:
raise ValueError("k smaller than 2")
G = np.zeros((self.m, self.m))
for i in range(self.m):
for j in range(self.m):
if i == j:
continue
if i < k or j < k:
continue
if i == k or j == k:
G[i][j] = 1
return G | Description:
Position k breaking
Parameters:
k: position k is used for the breaking |
10,223 | def __xd_iterator_pass_on(arr, view, fun):
iterations = [[None] if dim in view else list(range(arr.shape[dim])) for dim in range(arr.ndim)]
passon = None
for indices in itertools.product(*iterations):
slicer = [slice(None) if idx is None else slice(idx, idx + 1) for idx in indices]
passon = fun(scipy.squeeze(arr[slicer]), passon)
return passon | Like xd_iterator, but the fun return values are always passed on to the next and only the last returned. |
10,224 | def register_producer(cls, producer):
log.info(
.format(producer.__class__.__name__))
cls._producer = (cls._producer or producer) | Register a default producer for events to use.
:param producer: the default producer to to dispatch events on. |
10,225 | def get_trending_daily_not_starred(self):
trending_daily = self.get_trending_daily()
starred_repos = self.get_starred_repos()
repos_list = []
for repo in trending_daily:
if repo not in starred_repos:
repos_list.append(repo)
return repos_list | Gets trending repositories NOT starred by user
:return: List of daily-trending repositories which are not starred |
10,226 | def check_token(token):
user = models.User.objects(api_key=token).first()
return user or None | Verify http header token authentification |
10,227 | def loginfo(logger, msg, *args, **kwargs):
if esgfpid.defaults.LOG_INFO_TO_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
logger.info(msg, *args, **kwargs) | Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG). |
10,228 | def yield_figs(self, **kwargs):
yield self.plot_densities(title="PAW densities", show=False)
yield self.plot_waves(title="PAW waves", show=False)
yield self.plot_projectors(title="PAW projectors", show=False) | This function *generates* a predefined list of matplotlib figures with minimal input from the user. |
10,229 | def _get_tree_properties(root):
is_descending = True
is_ascending = True
min_node_value = root.value
max_node_value = root.value
size = 0
leaf_count = 0
min_leaf_depth = 0
max_leaf_depth = -1
is_strict = True
is_complete = True
current_nodes = [root]
non_full_node_seen = False
while len(current_nodes) > 0:
max_leaf_depth += 1
next_nodes = []
for node in current_nodes:
size += 1
value = node.value
min_node_value = min(value, min_node_value)
max_node_value = max(value, max_node_value)
if node.left is None and node.right is None:
if min_leaf_depth == 0:
min_leaf_depth = max_leaf_depth
leaf_count += 1
if node.left is not None:
if node.left.value > value:
is_descending = False
elif node.left.value < value:
is_ascending = False
next_nodes.append(node.left)
is_complete = not non_full_node_seen
else:
non_full_node_seen = True
if node.right is not None:
if node.right.value > value:
is_descending = False
elif node.right.value < value:
is_ascending = False
next_nodes.append(node.right)
is_complete = not non_full_node_seen
else:
non_full_node_seen = True
is_strict &= (node.left is None) == (node.right is None)
current_nodes = next_nodes
return {
: max_leaf_depth,
: size,
: is_complete and is_descending,
: is_complete and is_ascending,
: leaf_count == 2 ** max_leaf_depth,
: is_strict,
: is_complete,
: leaf_count,
: min_node_value,
: max_node_value,
: min_leaf_depth,
: max_leaf_depth,
} | Inspect the binary tree and return its properties (e.g. height).
:param root: Root node of the binary tree.
:rtype: binarytree.Node
:return: Binary tree properties.
:rtype: dict |
10,230 | def embed(self, rel, other, wrap=False):
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap) | Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents. |
10,231 | def config(self, kw=None, **kwargs):
themebg = kwargs.pop("themebg", self._themebg)
toplevel = kwargs.pop("toplevel", self._toplevel)
theme = kwargs.pop("theme", self.current_theme)
color = self._get_bg_color()
if themebg != self._themebg:
if themebg is False:
self.configure(bg="white")
else:
self.configure(bg=color)
self._themebg = themebg
if toplevel != self._toplevel:
if toplevel is True:
self._setup_toplevel_hook(color)
else:
tk.Toplevel.__init__ = self.__init__toplevel
self._toplevel = toplevel
if theme != self.current_theme:
self.set_theme(theme)
return tk.Tk.config(self, kw, **kwargs) | configure redirect to support additional options |
10,232 | def CopyRecord(record, **field_overrides):
fields = field_overrides
for field in record.__slots__:
if field in field_overrides:
continue
value = getattr(record, field)
if isinstance(value, RecordClass):
new_value = CopyRecord(value)
else:
new_value = copy.copy(value)
fields[field] = new_value
return type(record)(**fields) | Copies a record and its fields, recurses for any field that is a Record.
For records that have nested mutable fields, use copy.deepcopy.
Args:
record: A Record instance to be copied.
**field_overrides: Fields and their values to override in the new copy.
Returns: A copy of the given record with any fields overridden. |
10,233 | def get_child_bin_ids(self, bin_id):
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=bin_id)
return self._hierarchy_session.get_children(id_=bin_id) | Gets the child ``Ids`` of the given bin.
arg: bin_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the bin
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
10,234 | def table(
self,
dirPath=None):
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles | *Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files |
10,235 | def fmt_repr(obj):
items = ["%s = %r" % (k, v) for k, v in list(exclude_fields(obj).items())]
return "<%s: {%s}>" % (obj.__class__.__name__, .join(items)) | Print a orphaned string representation of an object without the
clutter of its parent object. |
10,236 | def _shape_text(self, text, colsep=u"\t", rowsep=u"\n",
transpose=False, skiprows=0, comments=):
assert colsep != rowsep
out = []
text_rows = text.split(rowsep)[skiprows:]
for row in text_rows:
stripped = to_text_string(row).strip()
if len(stripped) == 0 or stripped.startswith(comments):
continue
line = to_text_string(row).split(colsep)
line = [try_to_parse(to_text_string(x)) for x in line]
out.append(line)
if programs.is_module_installed():
from numpy import nan
out = list(zip_longest(*out, fillvalue=nan))
else:
out = list(zip_longest(*out, fillvalue=None))
out = [[r[col] for r in out] for col in range(len(out[0]))]
if transpose:
return [[r[col] for r in out] for col in range(len(out[0]))]
return out | Decode the shape of the given text |
10,237 | def open_resource(self, filename, mode=):
assert self.current_run is not None, "Can only be called during a run."
return self.current_run.open_resource(filename, mode) | Open a file and also save it as a resource.
Opens a file, reports it to the observers as a resource, and returns
the opened file.
In Sacred terminology a resource is a file that the experiment needed
to access during a run. In case of a MongoObserver that means making
sure the file is stored in the database (but avoiding duplicates) along
its path and md5 sum.
This function can only be called during a run, and just calls the
:py:meth:`sacred.run.Run.open_resource` method.
Parameters
----------
filename: str
name of the file that should be opened
mode : str
mode that file will be open
Returns
-------
file
the opened file-object |
10,238 | def argmin(self, values):
keys, minima = self.min(values)
minima = minima[self.inverse]
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]] | return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group |
10,239 | def _validate(self):
if self.tpid.value not in (EtherType.VLAN, EtherType.VLAN_QINQ):
raise UnpackException
return | Assure this is a valid VLAN header instance. |
10,240 | def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None):
path = {}
data = {}
params = {}
path["quiz_submission_id"] = quiz_submission_id
data["attempt"] = attempt
data["validation_token"] = validation_token
if access_code is not None:
data["access_code"] = access_code
if quiz_questions is not None:
data["quiz_questions"] = quiz_questions
self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True) | Answering questions.
Provide or update an answer to one or more QuizQuestions. |
10,241 | def session_from_client_config(client_config, scopes, **kwargs):
if in client_config:
config = client_config[]
elif in client_config:
config = client_config[]
else:
raise ValueError(
)
if not _REQUIRED_CONFIG_KEYS.issubset(config.keys()):
raise ValueError()
session = requests_oauthlib.OAuth2Session(
client_id=config[],
scope=scopes,
**kwargs)
return session, client_config | Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Raises:
ValueError: If the client configuration is not in the correct
format.
Returns:
Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
oauthlib session and the validated client configuration.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets |
10,242 | def is_checked(self) -> bool:
if not self.redis_key_checked:
return False
value = self._red.get(self.redis_key_checked)
if not value:
return False
return True | One task ran (checked). |
10,243 | def get_sla_template_path(service_type=ServiceTypes.ASSET_ACCESS):
if service_type == ServiceTypes.ASSET_ACCESS:
name =
elif service_type == ServiceTypes.CLOUD_COMPUTE:
name =
elif service_type == ServiceTypes.FITCHAIN_COMPUTE:
name =
else:
raise ValueError(f)
return os.path.join(os.path.sep, *os.path.realpath(__file__).split(os.path.sep)[1:-1], name) | Get the template for a ServiceType.
:param service_type: ServiceTypes
:return: Path of the template, str |
10,244 | def get_aside(self, aside_usage_id):
aside_type = self.id_reader.get_aside_type_from_usage(aside_usage_id)
xblock_usage = self.id_reader.get_usage_id_from_aside(aside_usage_id)
xblock_def = self.id_reader.get_definition_id(xblock_usage)
aside_def_id, aside_usage_id = self.id_generator.create_aside(xblock_def, xblock_usage, aside_type)
keys = ScopeIds(self.user_id, aside_type, aside_def_id, aside_usage_id)
block = self.create_aside(aside_type, keys)
return block | Create an XBlockAside in this runtime.
The `aside_usage_id` is used to find the Aside class and data. |
10,245 | def unstash_index(self, sync=False, branch=None):
stash_list = self.git_exec([, ], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = if sync else
if (
(( in stash) and
(.format(branch) in stash) and
(verb in stash)
) or
(( in stash) and
(.format(branch) in stash) and
(verb in stash)
)
):
return stash[7] | Returns an unstash index if one is available. |
10,246 | def flush_headers(self, sync: bool = False) -> None:
if self._headers_sent:
return
self._headers_sent = True
self.handel_default()
self.write(
b"HTTP/%s %d %s\r\n" % (
encode_str(self._version),
self._status,
self._message,
),
sync,
)
for name, value in self._headers.items():
name_byte = encode_str(name)
if isinstance(value, list):
for val in value:
self.write(
b"%s: %s\r\n" % (
name_byte,
encode_str(val),
),
sync,
)
else:
val = value
self.write(
b"%s: %s\r\n" % (
name_byte,
encode_str(value),
),
sync,
)
self.write(b"\r\n", sync) | 通过异步写入 header |
10,247 | def runSearchRnaQuantificationSets(self, request):
return self.runSearchRequest(
request, protocol.SearchRnaQuantificationSetsRequest,
protocol.SearchRnaQuantificationSetsResponse,
self.rnaQuantificationSetsGenerator) | Returns a SearchRnaQuantificationSetsResponse for the specified
SearchRnaQuantificationSetsRequest object. |
10,248 | def scaffold_hits(searches, fasta, max_hits):
scaffolds = {}
for seq in parse_fasta(fasta):
scaffold = seq[0].split()[0].split(, 1)[1].rsplit(, 1)[0]
if scaffold not in scaffolds:
scaffolds[scaffold] = 0
scaffolds[scaffold] += 1
s2rp = {s: {r[0]: []
for r in searches}
for s in scaffolds}
for search in searches:
rp, blast = search
hits = [i for i in numblast(open(blast), max_hits, evalue_thresh, bit_thresh)]
for hit in hits:
s = hit[0].split()[0].rsplit(, 1)[0]
hit[10], hit[11] = float(hit[10]), float(hit[11])
s2rp[s][rp].append(hit)
return scaffolds, s2rp | get hits from each search against each RP
scaffolds[scaffold] = # ORfs
s2rp[scaffold] = {rp:[hits]} |
10,249 | def querysets_from_title_prefix(title_prefix=None, model=DEFAULT_MODEL, app=DEFAULT_APP):
if title_prefix is None:
title_prefix = [None]
filter_dicts = []
model_list = []
if isinstance(title_prefix, basestring):
title_prefix = title_prefix.split()
elif not isinstance(title_prefix, dict):
title_prefix = title_prefix
if isinstance(title_prefix, (list, tuple)):
for i, title_prefix in enumerate(title_prefix):
if isinstance(title_prefix, basestring):
if title_prefix.lower().endswith():
title_prefix = title_prefix[:-5].strip()
title_prefix += [title_prefix]
model_list += []
else:
model_list += [DEFAULT_MODEL]
filter_dicts += [{: title_prefix}]
elif isinstance(title_prefix, dict):
filter_dicts = [title_prefix]
elif isinstance(title_prefix, (list, tuple)):
filter_dicts = util.listify(title_prefix)
model = get_model(model, app)
querysets = []
for filter_dict, model in zip(filter_dicts, model_list):
filter_dict = filter_dict or {}
querysets += [model.objects.filter(**filter_dict)] | Return a list of Querysets from a list of model numbers |
10,250 | def iodp_kly4s_lore(kly4s_file, meas_out=,
spec_infile=, spec_out=, instrument=,
actual_volume="",dir_path=, input_dir_path=):
version_num = pmag.get_version()
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_reqd_columns=[,,,,,,\
,,\
,,,\
,,,\
,,,,\
,,,\
,,]
spec_reqd_columns=[,,,,,\
,,\
,,\
,,,\
,,]
kly4s_file = pmag.resolve_file_name(kly4s_file, input_dir_path)
spec_out = pmag.resolve_file_name(spec_out, dir_path)
spec_file=pmag.resolve_file_name(spec_infile, dir_path)
meas_out = pmag.resolve_file_name(meas_out, dir_path)
specs=pd.read_csv(spec_file,sep=,header=1)
if len(specs)==0:
print ()
print ()
return False
LORE_specimens=list(specs.specimen.unique())
in_df=pd.read_csv(kly4s_file)
if len(in_df)==0:
print ()
return False
measurements_df=pd.DataFrame(columns=meas_reqd_columns)
specimens_df=pd.DataFrame(columns=spec_reqd_columns)
hole,kly4s_specimens=iodp_sample_names(in_df)
for spec in list(kly4s_specimens.unique()):
if spec not in LORE_specimens:
print (spec, )
specimens_df[]=kly4s_specimens
specimens_df[]=kly4s_specimens
specimens_df[]=
specimens_df[]=
specimens_df[]=
specimens_df[]=0
specimens_df[]=0
specimens_df[]=192
specimens_df[]=0
specimens_df[]=
specimens_df[]=
specimens_df[]= "LP-X:AE-H:LP-AN-MS"
specimens_df[]=specimens_df[].astype()++ "LP-AN-MS"
measurements_df[]=kly4s_specimens
measurements_df[]=
measurements_df[]=
measurements_df[]=273
measurements_df[]=version_num
measurements_df["treat_temp"] = % (273)
measurements_df["meas_temp"] = % (273)
measurements_df["treat_ac_field"] =
measurements_df["treat_dc_field"] =
measurements_df["treat_dc_field_phi"] =
measurements_df["treat_dc_field_theta"] =
measurements_df["treat_step_num"] =
measurements_df["standard"] =
measurements_df[]="IODP-KLY4S"
measurements_df[]=
measurements_df[]=
measurements_df[]=measurements_df[].astype()++\
measurements_df[].astype()
meas_num=range(len(kly4s_specimens))
measurements_df[]=meas_num
measurements_df[]=measurements_df[].astype()++\
measurements_df[].astype()
nominal_volume=in_df[]*1e-6
if actual_volume:
actual_volume=(1e-6*actual_volume)
factor=nominal_volume/actual_volume
else:
actual_volume=nominal_volume
factor=1
measurements_df[]=in_df[]*factor
measurements_df[]=+in_df[].astype()+
specimens_df[]=in_df[]
specimens_df[]=actual_volume
s1=in_df[]
s2=in_df[]
s3=in_df[]
s4=in_df[]
s5=in_df[]
s6=in_df[]
if in in_df.columns:
specimens_df[]=in_df[]
specimens_df[] = s1.astype()++ s2.astype()++s3.astype()++\
s4.astype()++ s5.astype()++ s6.astype()
tau1=in_df[]/3
v1_dec=in_df[]
v1_inc=in_df[]
specimens_df[]=tau1.astype()+":"+v1_dec.astype()+":"+v1_inc.astype()
tau2=in_df[]/3
v2_dec=in_df[]
v2_inc=in_df[]
specimens_df[]=tau2.astype()+":"+v2_dec.astype()+":"+v2_inc.astype()
tau3=in_df[]/3
v3_dec=in_df[]
v3_inc=in_df[]
specimens_df[]=tau3.astype()+":"+v3_dec.astype()+":"+v3_inc.astype()
measurements_df.fillna("",inplace=True)
meas_dicts = measurements_df.to_dict()
pmag.magic_write(meas_out, meas_dicts, )
specimens_df.fillna("",inplace=True)
spec_dicts = specimens_df.to_dict()
pmag.magic_write(spec_out, spec_dicts, )
return True | Converts ascii files generated by SUFAR ver.4.0 and downloaded from the LIMS online
repository to MagIC (datamodel 3) files
Parameters
----------
kly4s_file : str
input LORE downloaded csv file, required
meas_output : str
measurement output filename, default "measurements.txt"
spec_infile : str
specimen infile, default specimens.txt
[file created by iodp_samples_csv from LORE downloaded sample file]
spec_outfile : str
specimen outfile, default "kly4s_specimens.txt"
instrument : str
instrument name, default ""
actual_volume : float
the nominal volume is assumed to be 8cc or even 10cc, depending on the shipboard
software used, actual_vol is the actual specimen volume in cc
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, file name written) |
10,251 | def list(self):
url = "api/v0002/mgmt/custom/bundle"
r = self._apiClient.get(url)
if r.status_code == 200:
return r.json()
else:
raise ApiException(r) | List all device management extension packages |
10,252 | def sort_response(response: Dict[str, Any]) -> OrderedDict:
root_order = ["jsonrpc", "result", "error", "id"]
error_order = ["code", "message", "data"]
req = OrderedDict(sorted(response.items(), key=lambda k: root_order.index(k[0])))
if "error" in response:
req["error"] = OrderedDict(
sorted(response["error"].items(), key=lambda k: error_order.index(k[0]))
)
return req | Sort the keys in a JSON-RPC response object.
This has no effect other than making it nicer to read. Useful in Python 3.5 only,
dictionaries are already sorted in newer Python versions.
Example::
>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))
{"jsonrpc": "2.0", "result": 5, "id": 1}
Args:
response: Deserialized JSON-RPC response.
Returns:
The same response, sorted in an OrderedDict. |
10,253 | def register_handler(self, name, handler, esc_strings):
self._handlers[name] = handler
for esc_str in esc_strings:
self._esc_handlers[esc_str] = handler | Register a handler instance by name with esc_strings. |
10,254 | def finalize(self, process_row = None):
if process_row is not None:
process_id = process_row.process_id
elif self.process is not None:
process_id = self.process.process_id
else:
raise ValueError("must supply a process row to .__init__()")
self.segment_def_table.sync_next_id()
self.segment_table.sync_next_id()
self.segment_sum_table.sync_next_id()
self.sort()
def row_generator(segs, target_table, process_id, segment_def_id):
id_column = target_table.next_id.column_name
for seg in segs:
row = target_table.RowType()
row.segment = seg
row.process_id = process_id
row.segment_def_id = segment_def_id
setattr(row, id_column, target_table.get_next_id())
if in target_table.validcolumns:
row.comment = None
yield row, target_table
row_generators = []
while self:
ligolw_segment_list = self.pop()
segment_def_row = self.segment_def_table.RowType()
segment_def_row.process_id = process_id
segment_def_row.segment_def_id = self.segment_def_table.get_next_id()
segment_def_row.instruments = ligolw_segment_list.instruments
segment_def_row.name = ligolw_segment_list.name
segment_def_row.version = ligolw_segment_list.version
segment_def_row.comment = ligolw_segment_list.comment
self.segment_def_table.append(segment_def_row)
row_generators.append(row_generator(ligolw_segment_list.valid, self.segment_sum_table, process_id, segment_def_row.segment_def_id))
row_generators.append(row_generator(ligolw_segment_list.active, self.segment_table, process_id, segment_def_row.segment_def_id))
for row, target_table in iterutils.inorder(*row_generators):
target_table.append(row) | Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned. |
10,255 | def delete(queue, items):
con = _conn(queue)
with con:
cur = con.cursor()
if isinstance(items, six.string_types):
items = _quote_escape(items)
cmd = .format(queue, items)
log.debug(, cmd)
cur.execute(cmd)
return True
if isinstance(items, list):
items = [_quote_escape(el) for el in items]
cmd = .format(queue)
log.debug(, cmd)
newitems = []
for item in items:
newitems.append((item,))
cur.executemany(cmd, newitems)
if isinstance(items, dict):
items = salt.utils.json.dumps(items).replace(, "SQL Query: %s', cmd)
cur.execute(cmd)
return True
return True | Delete an item or items from a queue |
10,256 | def rasterize(path,
pitch,
origin,
resolution=None,
fill=True,
width=None):
pitch = float(pitch)
origin = np.asanyarray(origin, dtype=np.float64)
if resolution is None:
span = np.vstack((path.bounds,
origin)).ptp(
axis=0)
resolution = np.ceil(span / pitch) + 2
resolution = np.asanyarray(resolution,
dtype=np.int64)
resolution = tuple(resolution.tolist())
discrete = [((i - origin) / pitch).astype(np.int)
for i in path.discrete]
exteriors = Image.new(mode=, size=resolution)
edraw = ImageDraw.Draw(exteriors)
if width is not None:
width = int(width)
for coords in discrete:
edraw.line(coords.flatten().tolist(),
fill=1,
width=width)
if not fill:
del edraw
return exteriors
roots = path.root
interiors = Image.new(mode=, size=resolution)
idraw = ImageDraw.Draw(interiors)
for i, points in enumerate(discrete):
if i in roots:
edraw.polygon(points.flatten().tolist(),
fill=1)
else:
idraw.polygon(points.flatten().tolist(),
fill=1)
del edraw
del idraw
raster = ImageChops.subtract(exteriors, interiors)
return raster | Rasterize a Path2D object into a boolean image ("mode 1").
Parameters
------------
path: Path2D object
pitch: float, length in model space of a pixel edge
origin: (2,) float, origin position in model space
resolution: (2,) int, resolution in pixel space
fill: bool, if True will return closed regions as filled
width: int, if not None will draw outline this wide (pixels)
Returns
------------
raster: PIL.Image object, mode 1 |
10,257 | def remove_import_statements(code):
new_code = []
for line in code.splitlines():
if not line.lstrip().startswith() and \
not line.lstrip().startswith():
new_code.append(line)
while new_code and new_code[0] == :
new_code.pop(0)
while new_code and new_code[-1] == :
new_code.pop()
return .join(new_code) | Removes lines with import statements from the code.
Args:
code: The code to be stripped.
Returns:
The code without import statements. |
10,258 | def config_stop(args):
r = fapi.abort_submission(args.project, args.workspace,
args.submission_id)
fapi._check_response_code(r, 204)
return ("Aborted {0} in {1}/{2}".format(args.submission_id,
args.project,
args.workspace)) | Abort a task (method configuration) by submission ID in given space |
10,259 | def getprop(self, prop_name):
return self.shell(
[, prop_name],
timeout=DEFAULT_GETPROP_TIMEOUT_SEC).decode().strip() | Get a property of the device.
This is a convenience wrapper for "adb shell getprop xxx".
Args:
prop_name: A string that is the name of the property to get.
Returns:
A string that is the value of the property, or None if the property
doesn't exist. |
10,260 | def parse_if(self):
node = result = nodes.If(lineno=self.stream.expect().lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements((, ,
))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test():
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test():
result.else_ = self.parse_statements((,),
drop_needle=True)
break
return result | Parse an if construct. |
10,261 | def camel_to_underscore(string):
string = FIRST_CAP_RE.sub(r, string)
return ALL_CAP_RE.sub(r, string).lower() | Convert camelcase to lowercase and underscore.
Recipe from http://stackoverflow.com/a/1176023
Args:
string (str): The string to convert.
Returns:
str: The converted string. |
10,262 | def json_as_html(self):
from cspreports import utils
formatted_json = utils.format_report(self.json)
return mark_safe("<pre>\n%s</pre>" % escape(formatted_json)) | Print out self.json in a nice way. |
10,263 | def as_dict(self, verbosity=1, fmt=None, **kwargs):
if fmt == "abivars":
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d | Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation. |
10,264 | def set_plugins(self, input_plugins):
header = "glances_"
for item in input_plugins:
try:
plugin = __import__(header + item)
except ImportError:
logger.error("Can not import {} plugin. Please upgrade your Glances client/server version.".format(item))
else:
logger.debug("Server uses {} plugin".format(item))
self._plugins[item] = plugin.Plugin(args=self.args)
sys.path = sys_path | Set the plugin list according to the Glances server. |
10,265 | def greenhall_sx(t, F, alpha):
if F == float():
return greenhall_sw(t, alpha+2)
a = 2*greenhall_sw(t, alpha)
b = greenhall_sw(t-1.0/float(F), alpha)
c = greenhall_sw(t+1.0/float(F), alpha)
return pow(F, 2)*(a-b-c) | Eqn (8) from Greenhall2004 |
10,266 | def _get_images_dir():
img_dir = __salt__[]()
if img_dir:
salt.utils.versions.warn_until(
,
virt.images\
virt:images\virt.images\
)
else:
img_dir = __salt__[]()
log.debug(
, img_dir)
return img_dir | Extract the images dir from the configuration. First attempts to
find legacy virt.images, then tries virt:images. |
10,267 | def configure(conf, channel=False, group=False, fm_integration=False):
conf = expanduser(conf) if conf else get_config_path()
prompt = "❯ " if not sys.platform.startswith("win32") else "> "
contact_url = "https://telegram.me/"
print("Talk with the {} on Telegram ({}), create a bot and insert the token"
.format(markup("BotFather", "cyan"), contact_url + "BotFather"))
try:
token = input(markup(prompt, "magenta")).strip()
except UnicodeEncodeError:
prompt = "> "
token = input(markup(prompt, "magenta")).strip()
try:
bot = telegram.Bot(token)
bot_name = bot.get_me().username
except:
print(markup("Something went wrong, please try again.\n", "red"))
return configure()
print("Connected with {}.\n".format(markup(bot_name, "cyan")))
if channel:
print("Do you want to send to a {} or a {} channel? [pub/priv]"
.format(markup("public", "bold"), markup("private", "bold")))
channel_type = input(markup(prompt, "magenta")).strip()
if channel_type.startswith("pub"):
print("\nEnter your channel's public name or link:")
chat_id = input(markup(prompt, "magenta")).strip()
if "/" in chat_id:
chat_id = "@" + chat_id.split("/")[-1]
elif chat_id.startswith("@"):
pass
else:
chat_id = "@" + chat_id
else:
print("\nOpen https://web.telegram.org in your browser, sign in and open your private channel."
"\nNow copy the URL in the address bar and enter it here:")
url = input(markup(prompt, "magenta")).strip()
chat_id = "-100" + re.match(".+web\.telegram\.org\/
authorized = False
while not authorized:
try:
bot.send_chat_action(chat_id=chat_id, action="typing")
authorized = True
except (telegram.error.Unauthorized, telegram.error.BadRequest):
input("Please add {} as administrator to your channel and press Enter"
.format(markup(bot_name, "cyan")))
print(markup("\nCongratulations! telegram-send can now post to your channel!", "green"))
else:
password = "".join([str(randint(0, 9)) for _ in range(5)])
bot_url = contact_url + bot_name
fancy_bot_name = markup(bot_name, "cyan")
if group:
password = "/{}@{}".format(password, bot_name)
print("Please add {} to your group\nand send the following message to the group: {}\n"
.format(fancy_bot_name, markup(password, "bold")))
else:
print("Please add {} on Telegram ({})\nand send it the password: {}\n"
.format(fancy_bot_name, bot_url, markup(password, "bold")))
update, update_id = None, None
def get_user():
updates = bot.get_updates(offset=update_id, timeout=10)
for update in updates:
if update.message:
if update.message.text == password:
return update, None
if len(updates) > 0:
return None, updates[-1].update_id + 1
else:
return None, None
while update is None:
try:
update, update_id = get_user()
except Exception as e:
print("Error! {}".format(e))
chat_id = update.message.chat_id
user = update.message.from_user.username or update.message.from_user.first_name
m = ("Congratulations {}! ".format(user), "\ntelegram-send is now ready for use!")
ball = "🎊"
print(markup("".join(m), "green"))
bot.send_message(chat_id=chat_id, text=ball + " " + m[0] + ball + m[1])
config = configparser.ConfigParser()
config.add_section("telegram")
config.set("telegram", "TOKEN", token)
config.set("telegram", "chat_id", str(chat_id))
conf_dir = dirname(conf)
if conf_dir:
makedirs_check(conf_dir)
with open(conf, "w") as f:
config.write(f)
if fm_integration:
if not sys.platform.startswith("win32"):
return integrate_file_manager() | Guide user to set up the bot, saves configuration at `conf`.
# Arguments
conf (str): Path where to save the configuration file. May contain `~` for
user's home.
channel (Optional[bool]): Configure a channel.
group (Optional[bool]): Configure a group.
fm_integration (Optional[bool]): Setup file manager integration. |
10,268 | def gen_batch(data, batch_size, maxiter=np.inf, random_state=None):
perms = endless_permutations(_len_data(data), random_state)
it = 0
while it < maxiter:
it += 1
ind = np.array([next(perms) for _ in range(batch_size)])
yield _split_data(data, ind) | Create random batches for Stochastic gradients.
Batch index generator for SGD that will yeild random batches for a
a defined number of iterations, which can be infinite. This generator makes
consecutive passes through the data, drawing without replacement on each
pass.
Parameters
----------
data : ndarray or sequence of ndarrays
The data, can be a matrix X, (X,y) tuples etc
batch_size : int
number of data points in each batch.
maxiter : int, optional
The number of iterations
random_state : int or RandomState, optional
random seed
Yields
------
ndarray or sequence :
with each array length ``batch_size``, i.e. a subset of data. |
10,269 | def get_file(profile, branch, file_path):
branch_sha = get_branch_sha(profile, branch)
tree = get_files_in_branch(profile, branch_sha)
match = None
for item in tree:
if item.get("path") == file_path:
match = item
break
file_sha = match.get("sha")
blob = blobs.get_blob(profile, file_sha)
content = blob.get("content")
decoded_content = b64decode(content)
return decoded_content.decode("utf-8") | Get a file from a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
branch
The name of a branch.
file_path
The path of the file to fetch.
Returns:
The (UTF-8 encoded) content of the file, as a string. |
10,270 | def bfs_conditional(G, source, reverse=False, keys=True, data=False,
yield_nodes=True, yield_if=None,
continue_if=None, visited_nodes=None,
yield_source=False):
if reverse and hasattr(G, ):
G = G.reverse()
if isinstance(G, nx.Graph):
neighbors = functools.partial(G.edges, data=data)
else:
neighbors = functools.partial(G.edges, keys=keys, data=data)
queue = collections.deque([])
if visited_nodes is None:
visited_nodes = set([])
else:
visited_nodes = set(visited_nodes)
if source not in visited_nodes:
if yield_nodes and yield_source:
yield source
visited_nodes.add(source)
new_edges = neighbors(source)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((source, new_edges))
while queue:
parent, edges = queue[0]
for edge in edges:
child = edge[1]
if yield_nodes:
if child not in visited_nodes:
if yield_if is None or yield_if(G, child, edge):
yield child
else:
if yield_if is None or yield_if(G, child, edge):
yield edge
if child not in visited_nodes:
visited_nodes.add(child)
if continue_if is None or continue_if(G, child, edge):
new_edges = neighbors(child)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((child, new_edges))
queue.popleft() | Produce edges in a breadth-first-search starting at source, but only return
nodes that satisfiy a condition, and only iterate past a node if it
satisfies a different condition.
conditions are callables that take (G, child, edge) and return true or false
CommandLine:
python -m utool.util_graph bfs_conditional
Example:
>>> # DISABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)])
>>> continue_if = lambda G, child, edge: True
>>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False))
>>> print(result)
[(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)]
Example:
>>> # ENABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> continue_if = lambda G, child, edge: (child % 2 == 0)
>>> yield_if = lambda G, child, edge: (child % 2 == 1)
>>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10),
>>> (4, 3), (3, 6),
>>> (0, 2), (2, 4), (4, 6), (6, 10)])
>>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if,
>>> yield_if=yield_if))
>>> print(result)
[1, 3, 5] |
10,271 | def set_home_location(self):
try:
latlon = self.module().click_position
except Exception:
print("No map available")
return
lat = float(latlon[0])
lon = float(latlon[1])
if self.wploader.count() == 0:
self.wploader.add_latlonalt(lat, lon, 0)
w = self.wploader.wp(0)
w.x = lat
w.y = lon
self.wploader.set(w, 0)
self.loading_waypoints = True
self.loading_waypoint_lasttime = time.time()
self.master.mav.mission_write_partial_list_send(self.target_system,
self.target_component,
0, 0) | set home location from last map click |
10,272 | def post(ctx, uri, input_file):
http_client = get_wva(ctx).get_http_client()
cli_pprint(http_client.post(uri, input_file.read())) | POST file data to a specific URI
Note that POST is not used for most web services URIs. Instead,
PUT is used for creating resources. |
10,273 | def make_password(password, salt=None, hasher=):
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = bCryptPasswordHasher
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt) | Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info. |
10,274 | def deactivate(self):
remove_builtin = self.remove_builtin
for key, val in self._orig_builtins.iteritems():
remove_builtin(key, val)
self._orig_builtins.clear()
self._builtins_added = False | Remove any builtins which might have been added by add_builtins, or
restore overwritten ones to their previous values. |
10,275 | def switch_led_on(self, ids):
self._set_LED(dict(zip(ids, itertools.repeat(True)))) | Switches on the LED of the motors with the specified ids. |
10,276 | def azureContainerSAS(self, *args, **kwargs):
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs) | Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable`` |
10,277 | def container_fs_usage_bytes(self, metric, scraper_config):
metric_name = scraper_config[] +
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
self._process_usage_metric(metric_name, metric, self.fs_usage_bytes, scraper_config) | Number of bytes that are consumed by the container on this filesystem. |
10,278 | def start(self):
self._current_session = session = self._http_client.session()
request = self.next_request()
assert request
if request.url_info.password or \
request.url_info.hostname_with_port in self._hostnames_with_auth:
self._add_basic_auth_header(request)
response = yield from session.start(request)
self._process_response(response)
return response | Begin fetching the next request. |
10,279 | def _fill(self):
try:
self._head = self._iterable.next()
except StopIteration:
self._head = None | Advance the iterator without returning the old head. |
10,280 | def evaluate(self, item):
try:
for match in PATH_PATTERN.finditer(self.field):
path = match.group(0)
if path[0] == "[":
try:
item = item[int(match.group(1))]
except IndexError:
item = item[0]
else:
item = item.get(path)
except (IndexError, TypeError, AttributeError):
return None
return item | Pull the field off the item |
10,281 | def check_tag_data(data):
"Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag."
if len(data) < 10:
raise ValueError("Tag too short")
if data[0:3] != b"ID3":
raise ValueError("Missing ID3 identifier")
if data[3] >= 5 or data[4] != 0:
raise ValueError("Unknown ID3 version")
length = stagger.conversion.Syncsafe.decode(data[6:10]) + 10
if len(data) != length:
raise ValueError("Tag size mismatch") | Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag. |
10,282 | def experiment_group_post_delete(sender, **kwargs):
instance = kwargs[]
auditor.record(event_type=EXPERIMENT_GROUP_DELETED, instance=instance)
remove_bookmarks(object_id=instance.id, content_type=) | Delete all group outputs. |
10,283 | def _make_resource(self):
with self._lock:
for i in self._unavailable_range():
if self._reference_queue[i] is None:
rtracker = _ResourceTracker(
self._factory(**self._factory_arguments))
self._reference_queue[i] = rtracker
self._size += 1
return rtracker
raise PoolFullError | Returns a resource instance. |
10,284 | def get_internal_instances(self, phase=None):
if phase is None:
return [instance for instance in self.instances if not instance.is_external]
return [instance for instance in self.instances
if not instance.is_external and phase in instance.phases and
instance not in self.to_restart] | Get a list of internal instances (in a specific phase)
If phase is None, return all internal instances whtever the phase
:param phase: phase to filter (never used)
:type phase:
:return: internal instances list
:rtype: list |
10,285 | def _get_template_dirs():
return filter(lambda x: os.path.exists(x), [
os.path.join(os.path.expanduser(), , ),
os.path.join(, , , , ),
os.path.join(os.path.dirname(os.path.abspath(__file__)), ),
]) | existing directories where to search for jinja2 templates. The order
is important. The first found template from the first found dir wins! |
10,286 | def listRemoteDatawraps(location = conf.pyGeno_REMOTE_LOCATION) :
loc = location + "/datawraps.json"
response = urllib2.urlopen(loc)
js = json.loads(response.read())
return js | Lists all the datawraps availabe from a remote a remote location. |
10,287 | def _load_data(self, band):
df = bandpass_data_frame( + band + , )
df.resp *= df.wlen
return df | From Morrissey+ 2005, with the actual data coming from
http://www.astro.caltech.edu/~capak/filters/. According to the latter,
these are in QE units and thus need to be multiplied by the wavelength
when integrating per-energy. |
10,288 | def delete_all_objects(self, async_=False):
nms = self.list_object_names(full_listing=True)
return self.object_manager.delete_all_objects(nms, async_=async_) | Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call |
10,289 | def disclaim_key_flags():
globals_for_caller = sys._getframe(1).f_globals
module, _ = _helpers.get_module_object_and_name(globals_for_caller)
_helpers.disclaim_module_ids.add(id(module)) | Declares that the current module will not define any more key flags.
Normally, the module that calls the DEFINE_xxx functions claims the
flag to be its key flag. This is undesirable for modules that
define additional DEFINE_yyy functions with its own flag parsers and
serializers, since that module will accidentally claim flags defined
by DEFINE_yyy as its key flags. After calling this function, the
module disclaims flag definitions thereafter, so the key flags will
be correctly attributed to the caller of DEFINE_yyy.
After calling this function, the module will not be able to define
any more flags. This function will affect all FlagValues objects. |
10,290 | def find_replace_string(obj, find, replace):
try:
strobj = str(obj)
newStr = string.replace(strobj, find, replace)
if newStr == strobj:
return obj
else:
return newStr
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "find_replace_string",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
pass | Performs a string.replace() on the input object.
Args:
obj (object): The object to find/replace. It will be cast to ``str``.
find (str): The string to search for.
replace (str): The string to replace with.
Returns:
str: The replaced string. |
10,291 | def _processJobsWithRunningServices(self):
while True:
jobGraph = self.serviceManager.getJobGraphWhoseServicesAreRunning(0)
if jobGraph is None:
break
logger.debug(, jobGraph.jobStoreID)
jobGraph.services = []
self.toilState.updatedJobs.add((jobGraph, 0)) | Get jobs whose services have started |
10,292 | def get_random_url(ltd="com"):
url = [
"https://",
RandomInputHelper.get_random_value(8, [string.ascii_lowercase]),
".",
ltd
]
return "".join(url) | Get a random url with the given ltd.
Args:
ltd (str): The ltd to use (e.g. com).
Returns:
str: The random url. |
10,293 | def get(self, project):
try:
data = self.request(project + )
except:
raise
if not isinstance(data, clam.common.data.CLAMData):
raise Exception("Unable to retrieve CLAM Data")
else:
return data | Query the project status. Returns a ``CLAMData`` instance or raises an exception according to the returned HTTP Status code |
10,294 | def pixy_set_servos(self, s0, s1):
task = asyncio.ensure_future(self.core.pixy_set_servos(s0, s1))
self.loop.run_until_complete(task) | Sends the setServos Pixy command.
This method sets the pan/tilt servos that are plugged into Pixy's two servo ports.
:param s0: value 0 to 1000
:param s1: value 0 to 1000
:returns: No return value. |
10,295 | def qteMacroNameMangling(self, macroCls):
macroName = re.sub(r"([A-Z])", r, macroCls.__name__)
if macroName[0] == :
macroName = macroName[1:]
return macroName.lower() | Convert the class name of a macro class to macro name.
The name mangling inserts a '-' character after every capital
letter and then lowers the entire string.
Example: if the class name of ``macroCls`` is 'ThisIsAMacro'
then this method will return 'this-is-a-macro', ie. every
capital letter (except the first) will be prefixed with a
hyphen and changed to lower case.
The method returns the name mangled macro name or **None**
if an error occurred.
|Args|
* ``macroCls`` (**QtmacsMacro**): ``QtmacsMacro``- or derived
class (not an instance!)
|Returns|
**str**: the name mangled string or **None** if an error occurred.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. |
10,296 | def run(cmd, stdout=None, stderr=None, **kwargs):
devnull = None
try:
stdoutfilter = None
stderrfilter = None
wantstdout = False
wantstderr = False
if stdout is False:
devnull = open(, )
stdout = devnull
elif stdout is True:
stdout = subprocess.PIPE
wantstdout = True
elif callable(stdout):
stdoutfilter = partial(stdout)
stdout = subprocess.PIPE
else:
assert stdout is None, "Invalid stdout %r" % stdout
if stderr is False:
if devnull is None:
devnull = open(, )
stderr = devnull
elif stderr is True:
stderr = subprocess.PIPE
wantstderr = True
elif stderr == "STDOUT":
stderr = subprocess.STDOUT
elif callable(stderr):
stderrfilter = partial(stderr)
stderr = subprocess.PIPE
else:
assert stderr is None, "Invalid stderr %r" % stderr
if (stdoutfilter or stderrfilter) and asyncio:
exitcode, out, err, = _runasync(stdoutfilter,
stderrfilter,
cmd,
stdout=stdout,
stderr=stderr,
**kwargs)
if not wantstdout:
out = None
if not wantstderr:
err = None
return exitcode, out, err
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, **kwargs)
out, err = proc.communicate()
if not wantstdout:
if stdoutfilter:
stdoutfilter(out, True)
out = None
if not wantstderr:
if stderrfilter:
stderrfilter(err, True)
err = None
return proc.returncode, out, err
finally:
if devnull is not None:
devnull.close() | A blocking wrapper around subprocess.Popen(), but with a simpler interface
for the stdout/stderr arguments:
stdout=False / stderr=False
stdout/stderr will be redirected to /dev/null (or discarded in some
other suitable manner)
stdout=True / stderr=True
stdout/stderr will be captured and returned as a list of lines.
stdout=None
stdout will be redirected to the python process's stdout, which may be
a tty (same as using stdout=subprocess.None)
stderr=None:
stderr will be redirected to the python process's stderr, which may be
a tty (same as using stderr=subprocess.None)
stderr="STDOUT"
Same as using stderr=subprocess.STDOUT
The return value will be a tuple of (exitcode, stdout, stderr)
If stdout and/or stderr were not captured, they will be None instead. |
10,297 | def bind(self, cube):
if self.measure:
table, column = self.measure.bind(cube)
else:
table, column = cube.fact_table, cube.fact_pk
column = getattr(func, self.function)(column)
column = column.label(self.ref)
column.quote = True
return table, column | When one column needs to match, use the key. |
10,298 | def get_data(self, url=,headers={}, date=str(datetime.date.today()),
dict_to_store={}, type=, repo_name=):
url = (url + + type)
r3 = requests.get(url, headers=headers)
json = r3.json()
if type == :
self.views_json[repo_name] = json
elif type == :
self.clones_json[repo_name] = json
for day in json[type]:
timestamp_seconds = day[]/1000
try:
date_timestamp = datetime.datetime.utcfromtimestamp(
timestamp_seconds).strftime()
if date_timestamp != date:
tuple_in = (day[], day[])
tuple = (dict_to_store[timestamp_seconds][0] + tuple_in[0],
dict_to_store[timestamp_seconds][1] + tuple_in[1])
dict_to_store[timestamp_seconds] = tuple
except KeyError:
tuple = dict_to_store[timestamp_seconds] = (day[],
day[]) | Retrieves data from json and stores it in the supplied dict. Accepts
'clones' or 'views' as type. |
10,299 | def concat(objs, axis=0, join=, join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
sort=None, copy=True):
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy, sort=sort)
return op.get_result() | Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.