Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
26,000 |
def disable(self):
w.ActButton.disable(self)
g = get_root(self).globals
if self._expert:
self.config(bg=g.COL[])
else:
self.config(bg=g.COL[])
|
Disable the button, if in non-expert mode.
|
26,001 |
def discussion_is_still_open(self, discussion_type, auto_close_after):
discussion_enabled = getattr(self, discussion_type)
if (discussion_enabled and isinstance(auto_close_after, int) and
auto_close_after >= 0):
return (timezone.now() - (
self.start_publication or self.publication_date)).days < \
auto_close_after
return discussion_enabled
|
Checks if a type of discussion is still open
are a certain number of days.
|
26,002 |
def _check_stream_timeout(started, timeout):
if timeout:
elapsed = datetime.datetime.utcnow() - started
if elapsed.seconds > timeout:
raise StopIteration
|
Check if the timeout has been reached and raise a `StopIteration` if so.
|
26,003 |
def write_to_file(path, contents, file_type=):
FILE_TYPES = (, , )
if file_type not in FILE_TYPES:
raise ScriptWorkerException("Unknown file_type {} not in {}!".format(file_type, FILE_TYPES))
if file_type == :
contents = format_json(contents)
if file_type == :
with open(path, ) as fh:
fh.write(contents)
else:
with open(path, ) as fh:
print(contents, file=fh, end="")
|
Write ``contents`` to ``path`` with optional formatting.
Small helper function to write ``contents`` to ``file`` with optional formatting.
Args:
path (str): the path to write to
contents (str, object, or bytes): the contents to write to the file
file_type (str, optional): the type of file. Currently accepts
``text`` or ``binary`` (contents are unchanged) or ``json`` (contents
are formatted). Defaults to ``text``.
Raises:
ScriptWorkerException: with an unknown ``file_type``
TypeError: if ``file_type`` is ``json`` and ``contents`` isn't JSON serializable
|
26,004 |
def _save_db():
from pyci.utility import json_serial
import json
vms("Serializing DB to JSON in {}".format(datapath))
with open(datapath, ) as f:
json.dump(db, f, default=json_serial)
|
Serializes the contents of the script db to JSON.
|
26,005 |
def insertFile(self, qInserts=False):
if qInserts in (False, ): qInserts=False
try:
body = request.body.read()
indata = cjson.decode(body)["files"]
if not isinstance(indata, (list, dict)):
dbsExceptionHandler("dbsException-invalid-input", "Invalid Input DataType", self.logger.exception, \
"insertFile expects input as list or dirc")
businput = []
if isinstance(indata, dict):
indata = [indata]
indata = validateJSONInputNoCopy("files", indata)
for f in indata:
f.update({
"creation_date": f.get("creation_date", dbsUtils().getTime()),
"create_by" : dbsUtils().getCreateBy(),
"last_modification_date": f.get("last_modification_date", dbsUtils().getTime()),
"last_modified_by": f.get("last_modified_by", dbsUtils().getCreateBy()),
"file_lumi_list":f.get("file_lumi_list", []),
"file_parent_list":f.get("file_parent_list", []),
"file_assoc_list":f.get("assoc_list", []),
"file_output_config_list":f.get("file_output_config_list", [])})
businput.append(f)
self.dbsFile.insertFile(businput, qInserts)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert File input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/insertFile. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler(, dbsExceptionCode[], self.logger.exception, sError)
|
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param filesList: List of dictionaries containing following information
:type filesList: list of dicts
:key logical_file_name: File to be inserted (str) (Required)
:key is_file_valid: (optional, default = 1): (bool)
:key block: required: /a/b/c#d (str)
:key dataset: required: /a/b/c (str)
:key file_type: (optional, default = EDM) one of the predefined types, (str)
:key check_sum: (optional, default = '-1') (str)
:key event_count: (optional, default = -1) (int)
:key file_size: (optional, default = -1.) (float)
:key adler32: (optional, default = '') (str)
:key md5: (optional, default = '') (str)
:key auto_cross_section: (optional, default = -1.) (float)
:key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....]
:key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
|
26,006 |
async def _create_upstream_applications(self):
loop = asyncio.get_event_loop()
for steam_name, ApplicationsCls in self.applications.items():
application = ApplicationsCls(self.scope)
upstream_queue = asyncio.Queue()
self.application_streams[steam_name] = upstream_queue
self.application_futures[steam_name] = loop.create_task(
application(
upstream_queue.get,
partial(self.dispatch_downstream, steam_name=steam_name)
)
)
|
Create the upstream applications.
|
26,007 |
def asBinary(self):
binString = binary.bin(self._value)[2:]
return * (len(self._value) - len(binString)) + binString
|
Get |ASN.1| value as a text string of bits.
|
26,008 |
def _make_methods():
"Automagically generates methods based on the API endpoints"
for k, v in PokeAPI().get_endpoints().items():
string = "\t@BaseAPI._memoize\n"
string += ("\tdef get_{0}(self, id_or_name=, limit=None,"
.format(k.replace(, )) + )
string += ("\t\tparams = self._parse_params(locals().copy(), " +
"[])\n")
string += "\t\tquery_string = \n".format(v.split()[-2])
string += "\t\tquery_string += str(id_or_name) + \n"
string +=
string +=
print(string)
|
Automagically generates methods based on the API endpoints
|
26,009 |
def _get_annual_data(self, p_p_id):
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesConsommationAnnuelles"}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecAnnualError("Can not get annual data")
try:
json_output = yield from raw_res.json(content_type=)
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get annual data")
if not json_output.get():
raise PyHydroQuebecAnnualError("Could not get annual data")
if not json_output.get():
raise PyHydroQuebecAnnualError("Could not get annual data")
if not in json_output.get()[0]:
raise PyHydroQuebecAnnualError("Could not get annual data")
return json_output.get()[0][]
|
Get annual data.
|
26,010 |
def CrossEntropyFlat(*args, axis:int=-1, **kwargs):
"Same as `nn.CrossEntropyLoss`, but flattens input and target."
return FlattenedLoss(nn.CrossEntropyLoss, *args, axis=axis, **kwargs)
|
Same as `nn.CrossEntropyLoss`, but flattens input and target.
|
26,011 |
def typeset(self, container, text_align, last_line=False):
document = container.document
while len(self) > 0:
last_span = self[-1]
if last_span and last_span.ends_with_space:
self.cursor -= last_span.space.width
self.pop()
else:
break
else:
return
for glyph_span in self:
glyph_span.span.before_placing(container)
left = self.indent
if self._has_tab or text_align == TextAlign.JUSTIFY and last_line:
text_align =
extra_space = self.width - self.cursor
if text_align == TextAlign.JUSTIFY:
nr_spaces = sum(glyph_span.number_of_spaces for glyph_span in self)
if nr_spaces > 0:
add_to_spaces = extra_space / nr_spaces
for glyph_span in self:
if glyph_span.number_of_spaces > 0:
glyph_span.space.width += add_to_spaces
elif text_align == TextAlign.CENTER:
left += extra_space / 2.0
elif text_align == TextAlign.RIGHT:
left += extra_space
canvas = container.canvas
cursor = container.cursor
current_annotation = AnnotationState(container)
for span, glyph_and_widths in group_spans(self):
try:
width = canvas.show_glyphs(left, cursor, span, glyph_and_widths,
container)
except InlineFlowableException:
ascender = span.ascender(document)
if ascender > 0:
top = cursor - ascender
else:
inline_height = span.virtual_container.height
top = cursor - span.descender(document) - inline_height
span.virtual_container.place_at(container, left, top)
width = span.width
current_annotation.update(span, left, width)
left += width
current_annotation.place_if_any()
|
Typeset the line in `container` below its current cursor position.
Advances the container's cursor to below the descender of this line.
`justification` and `line_spacing` are passed on from the paragraph
style. `last_descender` is the previous line's descender, used in the
vertical positioning of this line. Finally, `last_line` specifies
whether this is the last line of the paragraph.
Returns the line's descender size.
|
26,012 |
def _create_identity(id_type=None, username=None, password=None, tenant_id=None,
tenant_name=None, api_key=None, verify_ssl=None,
return_context=False):
if id_type:
cls = _import_identity(id_type)
else:
cls = settings.get("identity_class")
if not cls:
raise exc.IdentityClassNotDefined("No identity class has "
"been defined for the current environment.")
if verify_ssl is None:
verify_ssl = get_setting("verify_ssl")
context = cls(username=username, password=password, tenant_id=tenant_id,
tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl)
if return_context:
return context
else:
global identity
identity = context
|
Creates an instance of the current identity_class and assigns it to the
module-level name 'identity' by default. If 'return_context' is True, the
module-level 'identity' is untouched, and instead the instance is returned.
|
26,013 |
def _babi_parser(tmp_dir,
babi_task_id,
subset,
dataset_split,
joint_training=True):
def _data_file(mode, task_id):
file_name = (_TASKS[task_id] + "_{}.txt")
return os.path.join(_DIR_NAME, subset, file_name.format(mode))
def _all_task_raw_data_generator(tmp_dir, data_file, dataset_split):
tf.logging.info("Preparing dataset of all task together")
globe_name = ("*_{}.txt")
mode_name = "test"
if dataset_split == problem.DatasetSplit.TRAIN:
mode_name = "train"
files_name = os.path.join(
tmp_dir, _DIR_NAME, subset,
globe_name.format(mode_name))
with tf.gfile.GFile(data_file, "wb") as outfile:
for filename in tf.gfile.Glob(files_name):
if filename == data_file:
continue
with tf.gfile.GFile(filename, "rb") as readfile:
shutil.copyfileobj(readfile, outfile)
def _parse_answer(answer):
if (joint_training or babi_task_id in ["qa8", "qa19", "qa0"
]):
return "".join([d for d in answer.split(",")])
else:
return answer
if dataset_split == problem.DatasetSplit.TRAIN:
babi_train_task_id = "qa0" if joint_training else babi_task_id
data_file = os.path.join(tmp_dir, _data_file("train", babi_train_task_id))
else:
data_file = os.path.join(tmp_dir, _data_file("test", babi_task_id))
if ((babi_task_id == "qa0" or joint_training) and
not tf.gfile.Exists(os.path.join(tmp_dir, data_file))):
_all_task_raw_data_generator(tmp_dir, data_file, dataset_split)
tf.logging.info("Parsing %s into training/testing instances...", data_file)
babi_instances = []
with tf.gfile.GFile(data_file, mode="r") as f:
story = []
for line in f:
line_num, line = line.strip().split(" ", 1)
if int(line_num) == 1:
story = []
if "\t" in line:
question, answer, _ = line.split("\t")
question = _normalize_string(question)
substories = [s for s in story if s]
answer = _parse_answer(answer)
instance = {
FeatureNames.STORY: substories,
FeatureNames.QUESTION: question,
FeatureNames.ANSWER: answer
}
babi_instances.append(instance)
story.append("")
else:
story.append(_normalize_string(line))
return babi_instances
|
Parsing the bAbi dataset (train and test).
Args:
tmp_dir: temp directory to download and extract the dataset
babi_task_id: babi task id
subset: babi subset
dataset_split: dataset split (train or eval)
joint_training: if training the model on all tasks.
Returns:
babi_instances: set of training examples, each a dict containing a story,
a question and an answer.
babi_lines: all the texts in the data separated based on their
appearance in the stories, questions, or answers.
|
26,014 |
def mmPrettyPrintTraces(traces, breakOnResets=None):
assert len(traces) > 0, "No traces found"
table = PrettyTable(["
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
|
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
|
26,015 |
def get_jobs(plugin_name,
verify_job=True, conn=None):
job_cur = _jobs_cursor(plugin_name).run(conn)
for job in job_cur:
if verify_job and not verify(job, Job()):
continue
yield job
|
:param plugin_name: <str>
:param verify_job: <bool>
:param conn: <connection> or <NoneType>
:return: <generator> yields <dict>
|
26,016 |
def shapefile(self, file):
driver = ogr.GetDriverByName()
dataset = driver.Open(file)
if dataset is not None:
layer = dataset.GetLayer()
spatialRef = layer.GetSpatialRef()
feature = layer.GetNextFeature()
geom = feature.GetGeometryRef()
spatialRef = geom.GetSpatialReference()
outSpatialRef = osr.SpatialReference()
outSpatialRef.ImportFromEPSG(4326)
coordTrans = osr.CoordinateTransformation(spatialRef, outSpatialRef)
env = geom.GetEnvelope()
xmin = env[0]
ymin = env[2]
xmax = env[1]
ymax = env[3]
pointMAX = ogr.Geometry(ogr.wkbPoint)
pointMAX.AddPoint(env[1], env[3])
pointMAX.Transform(coordTrans)
pointMIN = ogr.Geometry(ogr.wkbPoint)
pointMIN.AddPoint(env[0], env[2])
pointMIN.Transform(coordTrans)
self.bbox = str(pointMIN.GetPoint()[0])++str(pointMIN.GetPoint()[1])++str(pointMAX.GetPoint()[0])++str(pointMAX.GetPoint()[1])
self.query = None
else:
exit(" shapefile not found. Please verify your path to the shapefile")
|
reprojette en WGS84 et recupere l'extend
|
26,017 |
def commit(self):
last_line, last_col = self.qteWidget.getNumLinesAndColumns()
if self.cursorPos is None:
if qteKilledTextFromRectangle is None:
return
self.insertedText = list(qteKilledTextFromRectangle)
self.cursorPos = self.qteWidget.getCursorPosition()
else:
self.qteWidget.setCursorPosition(*self.cursorPos)
col = self.cursorPos[1]
for ii, text in enumerate(self.insertedText):
line = ii + self.cursorPos[0]
self.baseClass.insertAt(text, line, col)
|
Insert the specified text in all selected lines, always
at the same column position.
|
26,018 |
def profile_slope(self, kwargs_lens_list, lens_model_internal_bool=None, num_points=10):
theta_E = self.effective_einstein_radius(kwargs_lens_list)
x0 = kwargs_lens_list[0][]
y0 = kwargs_lens_list[0][]
x, y = util.points_on_circle(theta_E, num_points)
dr = 0.01
x_dr, y_dr = util.points_on_circle(theta_E + dr, num_points)
if lens_model_internal_bool is None:
lens_model_internal_bool = [True]*len(kwargs_lens_list)
alpha_E_x_i, alpha_E_y_i = self._lensModel.alpha(x0 + x, y0 + y, kwargs_lens_list, k=lens_model_internal_bool)
alpha_E_r = np.sqrt(alpha_E_x_i**2 + alpha_E_y_i**2)
alpha_E_dr_x_i, alpha_E_dr_y_i = self._lensModel.alpha(x0 + x_dr, y0 + y_dr, kwargs_lens_list,
k=lens_model_internal_bool)
alpha_E_dr = np.sqrt(alpha_E_dr_x_i ** 2 + alpha_E_dr_y_i ** 2)
slope = np.mean(np.log(alpha_E_dr / alpha_E_r) / np.log((theta_E + dr) / theta_E))
gamma = -slope + 2
return gamma
|
computes the logarithmic power-law slope of a profile
:param kwargs_lens_list: lens model keyword argument list
:param lens_model_internal_bool: bool list, indicate which part of the model to consider
:param num_points: number of estimates around the Einstein radius
:return:
|
26,019 |
def validate(self):
if not isinstance(self.value, bytes):
raise TypeError("opaque value must be bytes")
elif not isinstance(self.opaque_type, enums.OpaqueDataType):
raise TypeError("opaque data type must be an OpaqueDataType "
"enumeration")
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("opaque data name {0} must be a string".format(
position))
|
Verify that the contents of the OpaqueObject are valid.
Raises:
TypeError: if the types of any OpaqueObject attributes are invalid.
|
26,020 |
def role_list(endpoint_id):
client = get_client()
roles = client.endpoint_role_list(endpoint_id)
resolved_ids = LazyIdentityMap(
x["principal"] for x in roles if x["principal_type"] == "identity"
)
def principal_str(role):
principal = role["principal"]
if role["principal_type"] == "identity":
username = resolved_ids.get(principal)
return username or principal
elif role["principal_type"] == "group":
return (u"https://app.globus.org/groups/{}").format(principal)
else:
return principal
formatted_print(
roles,
fields=[
("Principal Type", "principal_type"),
("Role ID", "id"),
("Principal", principal_str),
("Role", "role"),
],
)
|
Executor for `globus access endpoint-role-list`
|
26,021 |
def make_url(self, returnURL, paymentReason, pipelineName,
transactionAmount, **params):
urlsuffix = urlsuffix[1:]
fmt = "https://%(endpoint_host)s%(base)s?%(urlsuffix)s"
final = fmt % vars()
return final
|
Generate the URL with the signature required for a transaction
|
26,022 |
def add_index(self, attribute, ordered=False):
return self._encode_invoke(map_add_index_codec, attribute=attribute, ordered=ordered)
|
Adds an index to this map for the specified entries so that queries can run faster.
Example:
Let's say your map values are Employee objects.
>>> class Employee(IdentifiedDataSerializable):
>>> active = false
>>> age = None
>>> name = None
>>> #other fields
>>>
>>> #methods
If you query your values mostly based on age and active fields, you should consider indexing these.
>>> map = self.client.get_map("employees")
>>> map.add_index("age" , true) #ordered, since we have ranged queries for this field
>>> map.add_index("active", false) #not ordered, because boolean field cannot have range
:param attribute: (str), index attribute of the value.
:param ordered: (bool), for ordering the index or not (optional).
|
26,023 |
def rebin_scale(a, scale=1):
newshape = tuple((side * scale) for side in a.shape)
return rebin(a, newshape)
|
Scale an array to a new shape.
|
26,024 |
def isscalar(cls, dataset, dim):
if not dataset.data:
return True
ds = cls._inner_dataset_template(dataset)
isscalar = []
for d in dataset.data:
ds.data = d
isscalar.append(ds.interface.isscalar(ds, dim))
return all(isscalar)
|
Tests if dimension is scalar in each subpath.
|
26,025 |
def decode(self, s):
if isinstance(s, basestring) and in s:
for x in self.decodings:
s = s.replace(x[0], x[1])
return s
|
Decode special characters encodings found in string I{s}.
@param s: A string to decode.
@type s: str
@return: The decoded string.
@rtype: str
|
26,026 |
def tag(self, *tag, **kwtags):
if not tag:
pass
elif len(tag) == 1 and isinstance(tag[0], dict):
self._meta.update(tag[0])
else:
raise TypeError(
)
self._meta.update(kwtags)
return self
|
Tag a Property instance with metadata dictionary
|
26,027 |
def close(self):
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify:
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist:
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
|
Close the file, and for mode "w" and "a" write the ending
records.
|
26,028 |
def posterior(self, x, sigma=1.):
pr0 = 1. / self.scale**2
prd = x.size / sigma**2
varp = 1. / (pr0 + prd)
mu = varp * (pr0 * self.loc + prd * x.mean())
return Normal(loc=mu, scale=np.sqrt(varp))
|
Model is X_1,...,X_n ~ N(theta, sigma^2), theta~self, sigma fixed
|
26,029 |
def set_filter(self, filter):
cairo.cairo_pattern_set_filter(self._pointer, filter)
self._check_status()
|
Sets the filter to be used for resizing when using this pattern.
See :ref:`FILTER` for details on each filter.
Note that you might want to control filtering
even when you do not have an explicit :class:`Pattern`,
(for example when using :meth:`Context.set_source_surface`).
In these cases, it is convenient to use :meth:`Context.get_source`
to get access to the pattern that cairo creates implicitly.
For example::
context.get_source().set_filter(cairocffi.FILTER_NEAREST)
|
26,030 |
def phase(args):
p = OptionParser(phase.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fw = must_open(opts.outfile, "w")
for gbfile in args:
for rec in SeqIO.parse(gbfile, "gb"):
bac_phase, keywords = get_phase(rec)
chr, clone = get_clone(rec)
keyword_field = ";".join(keywords)
print("\t".join((rec.id, str(bac_phase), keyword_field,
chr, clone)), file=fw)
|
%prog phase genbankfiles
Input has to be gb file. Search the `KEYWORDS` section to look for PHASE.
Also look for "chromosome" and "clone" in the definition line.
|
26,031 |
def transformChildrenToNative(self):
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for child in childArray:
child = child.transformToNative()
child.transformChildrenToNative()
|
Recursively replace children with their native representation.
Sort to get dependency order right, like vtimezone before vevent.
|
26,032 |
def convertDict2Attrs(self, *args, **kwargs):
for n,c in enumerate(self.attrs):
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
client = self.mambuclientclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambuclientclass = MambuClient
client = self.mambuclientclass(urlfunc=None, entid=None, *args, **kwargs)
client.init(c, *args, **kwargs)
self.attrs[n] = client
|
The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Client object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuClient just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuClient, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list.
|
26,033 |
def get_package_version(self, feed, group_id, artifact_id, version, show_deleted=None):
route_values = {}
if feed is not None:
route_values[] = self._serialize.url(, feed, )
if group_id is not None:
route_values[] = self._serialize.url(, group_id, )
if artifact_id is not None:
route_values[] = self._serialize.url(, artifact_id, )
if version is not None:
route_values[] = self._serialize.url(, version, )
query_parameters = {}
if show_deleted is not None:
query_parameters[] = self._serialize.query(, show_deleted, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response)
|
GetPackageVersion.
[Preview API] Get information about a package version.
:param str feed: Name or ID of the feed.
:param str group_id: Group ID of the package.
:param str artifact_id: Artifact ID of the package.
:param str version: Version of the package.
:param bool show_deleted: True to show information for deleted packages.
:rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>`
|
26,034 |
def get_title(self, index):
index = unicode_type(int(index))
if index in self._titles:
return self._titles[index]
else:
return None
|
Gets the title of a container pages.
Parameters
----------
index : int
Index of the container page
|
26,035 |
def _get_labels(self, y):
y = np.asarray(y)
if y.ndim == 1:
return y.reshape((y.size, 1))
assert y.ndim == 2
return y
|
Construct pylearn2 dataset labels.
Parameters
----------
y : array_like, optional
Labels.
|
26,036 |
def get_online_date(self, **kwargs):
qualifier = kwargs.get(, )
content = kwargs.get(, )
if qualifier == :
date_match = META_CREATION_DATE_REGEX.match(content)
(year, month, day) = date_match.groups()
creation_date = datetime.date(int(year), int(month), int(day))
return % (
format_date_string(creation_date.month),
format_date_string(creation_date.day),
creation_date.year,
)
return None
|
Get the online date from the meta creation date.
|
26,037 |
def pst_prior(pst,logger=None, filename=None, **kwargs):
if logger is None:
logger=Logger(,echo=False)
logger.log("plot pst_prior")
par = pst.parameter_data
if "parcov_filename" in pst.pestpp_options:
logger.warn("ignoring parcov_filename, using parameter bounds for prior cov")
logger.log("loading cov from parameter data")
cov = pyemu.Cov.from_parameter_data(pst)
logger.log("loading cov from parameter data")
logger.log("building mean parameter values")
li = par.partrans.loc[cov.names] == "log"
mean = par.parval1.loc[cov.names]
info = par.loc[cov.names,:].copy()
info.loc[:,"mean"] = mean
info.loc[li,"mean"] = mean[li].apply(np.log10)
logger.log("building mean parameter values")
logger.log("building stdev parameter values")
if cov.isdiagonal:
std = cov.x.flatten()
else:
std = np.diag(cov.x)
std = np.sqrt(std)
info.loc[:,"prior_std"] = std
logger.log("building stdev parameter values")
if std.shape != mean.shape:
logger.lraise("mean.shape {0} != std.shape {1}".
format(mean.shape,std.shape))
if "grouper" in kwargs:
raise NotImplementedError()
else:
par_adj = par.loc[par.partrans.apply(lambda x: x in ["log","none"]),:]
grouper = par_adj.groupby(par_adj.pargp).groups
if len(grouper) == 0:
raise Exception("no adustable parameters to plot")
fig = plt.figure(figsize=figsize)
if "fig_title" in kwargs:
plt.figtext(0.5,0.5,kwargs["fig_title"])
else:
plt.figtext(0.5,0.5,"pyemu.Pst.plot(kind=)\nfrom pest control file \n at {1}"
.format(pst.filename,str(datetime.now())),ha="center")
figs = []
ax_count = 0
grps_names = list(grouper.keys())
grps_names.sort()
for g in grps_names:
names = grouper[g]
logger.log("plotting priors for {0}".
format(.join(list(names))))
if ax_count % (nr * nc) == 0:
plt.tight_layout()
figs.append(fig)
fig = plt.figure(figsize=figsize)
axes = get_page_axes()
ax_count = 0
islog = False
vc = info.partrans.value_counts()
if vc.shape[0] > 1:
logger.warn("mixed partrans for group {0}".format(g))
elif "log" in vc.index:
islog = True
ax = axes[ax_count]
if "unique_only" in kwargs and kwargs["unique_only"]:
ms = info.loc[names,:].apply(lambda x: (x["mean"],x["prior_std"]),axis=1).unique()
for (m,s) in ms:
x, y = gaussian_distribution(m, s)
ax.fill_between(x, 0, y, facecolor=, alpha=0.5,
edgecolor="none")
else:
for m,s in zip(info.loc[names,],info.loc[names,]):
x,y = gaussian_distribution(m,s)
ax.fill_between(x,0,y,facecolor=,alpha=0.5,
edgecolor="none")
ax.set_title("{0}) group:{1}, {2} parameters".
format(abet[ax_count],g,names.shape[0]),loc="left")
ax.set_yticks([])
if islog:
ax.set_xlabel("$log_{10}$ parameter value",labelpad=0.1)
else:
ax.set_xlabel("parameter value", labelpad=0.1)
logger.log("plotting priors for {0}".
format(.join(list(names))))
ax_count += 1
for a in range(ax_count,nr*nc):
axes[a].set_axis_off()
axes[a].set_yticks([])
axes[a].set_xticks([])
plt.tight_layout()
figs.append(fig)
if filename is not None:
with PdfPages(filename) as pdf:
plt.tight_layout()
pdf.savefig(fig)
plt.close(fig)
logger.log("plot pst_prior")
else:
logger.log("plot pst_prior")
return figs
|
helper to plot prior parameter histograms implied by
parameter bounds. Saves a multipage pdf named <case>.prior.pdf
Parameters
----------
pst : pyemu.Pst
logger : pyemu.Logger
filename : str
PDF filename to save plots to. If None, return figs without saving. Default is None.
kwargs : dict
accepts 'grouper' as dict to group parameters on to a single axis (use
parameter groups if not passed),
'unqiue_only' to only show unique mean-stdev combinations within a
given group
Returns
-------
None
TODO
----
external parcov, unique mean-std pairs
|
26,038 |
def install_sql_hook():
try:
from django.db.backends.utils import CursorWrapper
except ImportError:
from django.db.backends.util import CursorWrapper
try:
real_execute = CursorWrapper.execute
real_executemany = CursorWrapper.executemany
except AttributeError:
|
If installed this causes Django's queries to be captured.
|
26,039 |
def cas2mach(Vcas, H):
Vtas = cas2tas(Vcas, H)
Mach = tas2mach(Vtas, H)
return Mach
|
Calibrated Airspeed to Mach number
|
26,040 |
def __create(self, client_id, cc_number, cvv, expiration_month,
expiration_year, user_name, email, address, **kwargs):
params = {
: client_id,
: cc_number,
: cvv,
: expiration_month,
: expiration_year,
: user_name,
: email,
: address
}
return self.make_call(self.__create, params, kwargs)
|
Call documentation: `/credit_card/create
<https://www.wepay.com/developer/reference/credit_card#create>`_, plus
extra keyword parameter:
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
|
26,041 |
def combo_exhaustive_label_definition_check( self,
ontology: pd.DataFrame,
label_predicate:str,
definition_predicates:str,
diff = True) -> List[List[dict]]:
inside, outside = [], []
header = [] + list(ontology.columns)
for row in ontology.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
label_obj = row[label_predicate]
if isinstance(label_obj, list):
if len(label_obj) != 1:
exit()
else:
label_obj = label_obj[0]
entity_label = self.local_degrade(label_obj)
label_search_results = self.label2rows.get(entity_label)
label_ilx_rows = label_search_results if label_search_results else []
definition_ilx_rows = []
for definition_predicate in definition_predicates:
definition_objs = row[definition_predicate]
if not definition_objs:
continue
definition_objs = [definition_objs] if not isinstance(definition_objs, list) else definition_objs
for definition_obj in definition_objs:
definition_obj = self.local_degrade(definition_obj)
definition_search_results = self.definition2rows.get(definition_obj)
if definition_search_results:
definition_ilx_rows.extend(definition_search_results)
ilx_rows = [dict(t) for t in {tuple(d.items()) for d in (label_ilx_rows + definition_ilx_rows)}]
if ilx_rows:
inside.append({
: row,
: ilx_rows,
})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return inside, outside, diff
return inside, outside
|
Combo of label & definition exhaustive check out of convenience
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
label_predicate: usually in qname form and is the colname of the DataFrame for the label
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
|
26,042 |
def check(self, item_id):
response = self._request("tasks/view/{id}".format(id=item_id))
if response.status_code == 404:
return False
try:
content = json.loads(response.content.decode())
status = content[]["status"]
if status == or status == "reported":
return True
except ValueError as e:
raise sandboxapi.SandboxError(e)
return False
|
Check if an analysis is complete
:type item_id: int
:param item_id: task_id to check.
:rtype: bool
:return: Boolean indicating if a report is done or not.
|
26,043 |
def make_chunks_from_unused(self,length,trig_overlap,play=0,min_length=0,
sl=0,excl_play=0,pad_data=0):
for seg in self.__sci_segs:
if seg.unused() > min_length:
end = seg.end() - pad_data
start = end - length
if (not play) or (play and (((end-sl-excl_play-729273613)%6370) <
(600+length-2*excl_play))):
trig_start = end - seg.unused() - trig_overlap
if (play == 2):
play_start = 729273613 + 6370 * \
math.floor((end-sl-excl_play-729273613) / 6370)
play_end = play_start + 600
trig_end = 0
if ( (play_end - 6370) > start ):
print "Two playground segments in this chunk"
print " Code to handle this case has not been implemented"
sys.exit(1)
else:
if play_start > trig_start:
trig_start = int(play_start)
if (play_end < end):
trig_end = int(play_end)
if (trig_end == 0) or (trig_end > trig_start):
seg.add_chunk(start, end, trig_start, trig_end)
else:
seg.add_chunk(start, end, trig_start)
seg.set_unused(0)
|
Create an extra chunk that uses up the unused data in the science segment.
@param length: length of chunk in seconds.
@param trig_overlap: length of time start generating triggers before the
start of the unused data.
@param play:
- 1 : only generate chunks that overlap with S2 playground data.
- 2 : as 1 plus compute trig start and end times to coincide
with the start/end of the playground
@param min_length: the unused data must be greater than min_length to make a
chunk.
@param sl: slide by sl seconds before determining playground data.
@param excl_play: exclude the first excl_play second from the start and end
of the chunk when computing if the chunk overlaps with playground.
@param pad_data: exclude the first and last pad_data seconds of the segment
when generating chunks
|
26,044 |
def show_user(self, login=None, envs=[], query=):
juicer.utils.Log.log_debug("Show User: %s", login)
(login, env))
continue
else:
url = "%s%s/" % (query, login)
_r = self.connectors[env].get(url)
if _r.status_code == Constants.PULP_GET_OK:
user = juicer.utils.load_json_str(_r.content)
juicer.utils.Log.log_info("Login: %s" % user[])
juicer.utils.Log.log_info("Name: %s" % user[])
juicer.utils.Log.log_info("Roles: %s" % .join(user[]))
if count < len(envs):
juicer.utils.Log.log_info("")
else:
_r.raise_for_status()
return True
|
`login` - Login or username of user
Show user in specified environments
|
26,045 |
def safe_makedirs(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
|
Safe makedirs.
Works in a multithreaded scenario.
|
26,046 |
def condition_details_has_owner(condition_details, owner):
if in condition_details:
result = condition_details_has_owner(condition_details[], owner)
if result:
return True
elif isinstance(condition_details, list):
for subcondition in condition_details:
result = condition_details_has_owner(subcondition, owner)
if result:
return True
else:
if in condition_details \
and owner == condition_details[]:
return True
return False
|
Check if the public_key of owner is in the condition details
as an Ed25519Fulfillment.public_key
Args:
condition_details (dict): dict with condition details
owner (str): base58 public key of owner
Returns:
bool: True if the public key is found in the condition details, False otherwise
|
26,047 |
def cold_spell_days(tas, thresh=, window=5, freq=):
r
t = utils.convert_units_to(thresh, tas)
over = tas < t
group = over.resample(time=freq)
return group.apply(rl.windowed_run_count, window=window, dim=)
|
r"""Cold spell days
The number of days that are part of a cold spell, defined as five or more consecutive days with mean daily
temperature below a threshold in °C.
Parameters
----------
tas : xarrray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature below which a cold spell begins [℃] or [K]. Default : '-10 degC'
window : int
Minimum number of days with temperature below threshold to qualify as a cold spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Cold spell days.
Notes
-----
Let :math:`T_i` be the mean daily temperature on day :math:`i`, the number of cold spell days during
period :math:`\phi` is given by
.. math::
\sum_{i \in \phi} \prod_{j=i}^{i+5} [T_j < thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
|
26,048 |
def get_gender(self, name, country=None):
if not self.case_sensitive:
name = name.lower()
if name not in self.names:
return self.unknown_value
elif not country:
def counter(country_values):
country_values = map(ord, country_values.replace(" ", ""))
return (len(country_values),
sum(map(lambda c: c > 64 and c-55 or c-48, country_values)))
return self._most_popular_gender(name, counter)
elif country in self.__class__.COUNTRIES:
index = self.__class__.COUNTRIES.index(country)
counter = lambda e: (ord(e[index])-32, 0)
return self._most_popular_gender(name, counter)
else:
raise NoCountryError("No such country: %s" % country)
|
Returns best gender for the given name and country pair
|
26,049 |
def p_statement_list_1(self, p):
p[0] = p[3]
if p[1] is not None:
p[0].children.insert(0, p[1])
|
statement_list : statement SEMICOLON statement_list
|
26,050 |
def unhandle(self, handler):
with self._hlock:
try:
self._handler_list.remove(handler)
except ValueError:
raise ValueError("Handler is not handling this event, so cannot unhandle it.")
return self
|
unregister handler (removing callback function)
|
26,051 |
def average_repetitions(df, keys_mean):
if not in df.columns:
raise Exception(
)
cols = list(df.columns.values)
keys_keep = list(set(df.columns.tolist()) - set(keys_mean))
agg_dict = {x: _first for x in keys_keep}
agg_dict.update({x: np.mean for x in keys_mean})
for key in (, , , ):
if key in agg_dict:
del(agg_dict[key])
extra_dimensions_raw = [, , , ]
extra_dimensions = [x for x in extra_dimensions_raw if x in df.columns]
df = df.groupby(extra_dimensions).agg(agg_dict)
df.reset_index(inplace=True)
return df[cols]
|
average duplicate measurements. This requires that IDs and norrec labels
were assigned using the *assign_norrec_to_df* function.
Parameters
----------
df
DataFrame
keys_mean: list
list of keys to average. For all other keys the first entry will be
used.
|
26,052 |
def from_json_format(conf):
if in conf:
conf[] = int(conf[], 8)
if in conf:
conf[] = int(conf[], 8)
|
Convert fields of parsed json dictionary to python format
|
26,053 |
def insert_object(self, db_object):
obj = self.to_dict(db_object)
obj[] = True
self.collection.insert_one(obj)
|
Create new entry in the database.
Parameters
----------
db_object : (Sub-class of)ObjectHandle
|
26,054 |
def _copy_files(source, target):
source_files = listdir(source)
if not exists(target):
makedirs(target)
for filename in source_files:
full_filename = join(source, filename)
if isfile(full_filename):
shutil.copy(full_filename, target)
|
Copy all the files in source directory to target.
Ignores subdirectories.
|
26,055 |
def _query(action=None,
command=None,
args=None,
method=,
header_dict=None,
data=None,
url=):
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
, vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
, vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if not in args.keys():
args[] = apikey
if action and not in args.keys():
args[] = .format(action, command)
if header_dict is None:
header_dict = {}
if method != :
header_dict[] =
decode = True
if method == :
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__[](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type=,
text=True,
status=True,
hide_fields=[, ],
opts=__opts__,
)
if in result[]:
if result[][]:
error_list = []
for error in result[][]:
msg = error[]
if msg == "Authentication failed":
raise SaltCloudSystemExit(
)
else:
error_list.append(msg)
raise SaltCloudException(
.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug(, result[])
return result[]
|
Make a web call to the Linode API.
|
26,056 |
def bind(function, *args, **kwargs):
def decorated(*inner_args, **inner_kwargs):
kwargs.update(inner_kwargs)
return function(*(inner_args + args), **kwargs)
copy_labels(function, decorated)
return decorated
|
Wraps the given function such that when it is called, the given arguments
are passed in addition to the connection argument.
:type function: function
:param function: The function that's ought to be wrapped.
:type args: list
:param args: Passed on to the called function.
:type kwargs: dict
:param kwargs: Passed on to the called function.
:rtype: function
:return: The wrapped function.
|
26,057 |
def clear(
self
):
self.mark_incomplete()
for object_class in self.object_classes:
self.session.query(object_class).delete()
self.close_session()
|
Delete all objects created by this task.
Iterate over `self.object_classes` and delete all objects of the listed classes.
|
26,058 |
def _group_until_different(items: Iterable[TIn],
key: Callable[[TIn], TKey],
value=lambda e: e):
return ((k, [value(i) for i in v]) for (k, v) in groupby(items, key))
|
Groups runs of items that are identical according to a keying function.
Args:
items: The items to group.
key: If two adjacent items produce the same output from this function,
they will be grouped.
value: Maps each item into a value to put in the group. Defaults to the
item itself.
Examples:
_group_until_different(range(11), key=is_prime) yields
(False, [0, 1])
(True, [2, 3])
(False, [4])
(True, [5])
(False, [6])
(True, [7])
(False, [8, 9, 10])
Yields:
Tuples containing the group key and item values.
|
26,059 |
def get_instance(self, payload):
return ShortCodeInstance(self._version, payload, account_sid=self._solution[], )
|
Build an instance of ShortCodeInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.short_code.ShortCodeInstance
:rtype: twilio.rest.api.v2010.account.short_code.ShortCodeInstance
|
26,060 |
def to_bytes(value):
if isinstance(value, text_type):
return value.encode()
elif isinstance(value, ffi.CData):
return ffi.string(value)
elif isinstance(value, binary_type):
return value
else:
raise ValueError()
|
Converts bytes, unicode, and C char arrays to bytes.
Unicode strings are encoded to UTF-8.
|
26,061 |
def get_current_channel(self):
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get()
|
Get the current tv channel.
|
26,062 |
def unwatch(value):
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
|
Return the :class:`Specatator` of a :class:`Watchable` instance.
|
26,063 |
def load_template_source(self, *ka):
template_name = ka[0]
for origin in self.get_template_sources(template_name):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
|
Backward compatible method for Django < 2.0.
|
26,064 |
def getChildren(self, name=None, ns=None):
if ns is None:
if name is None:
return self.children
prefix, name = splitPrefix(name)
if prefix is not None:
ns = self.resolvePrefix(prefix)
return [c for c in self.children if c.match(name, ns)]
|
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain a prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
|
26,065 |
def _server_response_handler(self, response: Dict[str, Any]):
code = response.get("CODE")
if code == 100:
if self.debug:
print("auth succeed")
self._login_fut.set_result(response)
if code == 101:
if self.debug:
print()
return True
|
处理100~199段状态码,针对不同的服务响应进行操作.
Parameters:
(response): - 响应的python字典形式数据
Return:
(bool): - 准确地说没有错误就会返回True
|
26,066 |
def make_repr(inst, attrs):
arg_str = ", ".join(
"%s=%r" % (a, getattr(inst, a)) for a in attrs if hasattr(inst, a))
repr_str = "%s(%s)" % (inst.__class__.__name__, arg_str)
return repr_str
|
Create a repr from an instance of a class
Args:
inst: The class instance we are generating a repr of
attrs: The attributes that should appear in the repr
|
26,067 |
def invalidate_cache(self, klass, instance=None, extra=None,
force_all=False):
values = self._get_cache_extras(klass, instance=instance,
extra=extra, force_all=force_all)
if values == CacheConfig.ALL:
self._increment_version()
elif values:
for value in values:
self._increment_version(extra=value)
|
Use this method to invalidate keys related to a particular
model or instance. Invalidating a cache is really just
incrementing the version for the right key(s).
:param klass: The model class you are invalidating. If the given \
class was not registered with this group no action will be taken.
:param instance: The instance you want to use with the registered\
instance_values. Usually the instance that was just saved. \
Defaults to None.
:param extra: A list of extra values that you would like incremented \
in addition to what was registered for this model.
:param force_all: Ignore all registered values and provided \
arguments and increment the major version for this group.
|
26,068 |
def enable_host_event_handler(self, host):
if not host.event_handler_enabled:
host.modified_attributes |= \
DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
host.event_handler_enabled = True
self.send_an_element(host.get_update_status_brok())
|
Enable event handlers for a host
Format of the line that triggers function call::
ENABLE_HOST_EVENT_HANDLER;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
|
26,069 |
def activate():
env_path = .join([deployment_root(),,env.project_fullname])
if not exists(env_path):
print env.host,"ERROR: The version",env.project_version,"does not exist at"
print env_path
sys.exit(1)
active = active_version()
servers = webserver_list()
if env.patch or active <> env.project_fullname:
for s in servers:
stop_webserver(s)
if not env.patch and active <> env.project_fullname:
if env.verbosity:
print env.host, "ACTIVATING version", env_path
if not env.nomigration:
sync_db()
if in env.INSTALLED_APPS and not env.nomigration and not env.manualmigration:
migration()
if env.manualmigration or env.MANUAL_MIGRATION: manual_migration()
activate_sites = [.join([d.name.replace(,),,env.project_version,]) for d in domain_sites()]
if in get_packages():
site_paths = [,]
else:
site_paths = []
for path in site_paths:
for site in _ls_sites(.join([path,])):
if site not in activate_sites:
sudo("rm %s/sites-enabled/%s"% (path,site))
for path in site_paths:
for site in activate_sites:
if not exists(.join([path,,site])):
sudo("chmod 644 %s" % .join([path,,site]))
sudo("ln -s %s/sites-available/%s %s/sites-enabled/%s"% (path,site,path,site))
if env.verbosity:
print " * enabled", "%s/sites-enabled/%s"% (path,site)
ln_path = .join([deployment_root(),,env.project_name])
run(+ln_path)
post_exec_hook()
run(% (env_path,ln_path))
if env.verbosity:
print env.host,env.project_fullname, "ACTIVATED"
else:
if env.verbosity and not env.patch:
print env.project_fullname,"is the active version"
if env.patch or active <> env.project_fullname:
for s in servers:
start_webserver(s)
print
return
|
Activates the version specified in ``env.project_version`` if it is different
from the current active version.
An active version is just the version that is symlinked.
|
26,070 |
def header(msg, *args, **kwargs):
msg = .join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
|
Display an header
|
26,071 |
def setup_zmq(self):
self.context = zmq.Context()
self.push = self.context.socket(zmq.PUSH)
self.push_port = self.push.bind_to_random_port("tcp://%s" % self.host)
eventlet.spawn(self.zmq_pull)
eventlet.sleep(0)
|
Set up a PUSH and a PULL socket. The PUSH socket will push out
requests to the workers. The PULL socket will receive responses from
the workers and reply through the server socket.
|
26,072 |
def annotate(row, ax, x=, y=, text=, xytext=(7, -5), textcoords=, **kwargs):
text = row[text] if text in row else str(text)
x = row[x] if x in row else float(x)
y = row[y] if y in row else float(y)
ax.annotate(text, (row[x], row[y]), xytext=xytext, textcoords=textcoords, **kwargs)
return row[text]
|
Add a text label to the plot of a DataFrame indicated by the provided axis (ax).
Reference:
https://stackoverflow.com/a/40979683/623735
|
26,073 |
def bytes2guid(s):
assert isinstance(s, bytes)
u = struct.unpack
v = []
v.extend(u("<IHH", s[:8]))
v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:]))
return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
|
Converts a serialized GUID to a text GUID
|
26,074 |
def ListComp(xp, fp, it, test=None):
xp.prefix = u""
fp.prefix = u" "
it.prefix = u" "
for_leaf = Leaf(token.NAME, u"for")
for_leaf.prefix = u" "
in_leaf = Leaf(token.NAME, u"in")
in_leaf.prefix = u" "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = u" "
if_leaf = Leaf(token.NAME, u"if")
if_leaf.prefix = u" "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, u"["),
inner,
Leaf(token.RBRACE, u"]")])
|
A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
|
26,075 |
def GetCallingModuleObjectAndName():
range_func = range if sys.version_info[0] >= 3 else xrange
for depth in range_func(1, sys.getrecursionlimit()):
|
Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
Returns:
The module object that called into this one.
Raises:
AssertionError: if no calling module could be identified.
|
26,076 |
def extractSNPs(snpsToExtract, options):
outPrefix = options.out + ".pruned_data"
plinkCommand = ["plink", "--noweb", "--bfile", options.bfile, "--extract",
snpsToExtract, "--make-bed", "--out", outPrefix]
runCommand(plinkCommand)
return outPrefix
|
Extract markers using Plink.
:param snpsToExtract: the name of the file containing markers to extract.
:param options: the options
:type snpsToExtract: str
:type options: argparse.Namespace
:returns: the prefix of the output files.
|
26,077 |
def refresh(self, row=None):
for widget in self.selection_widgets:
widget.setEnabled(self.listwidget.currentItem() is not None)
not_empty = self.listwidget.count() > 0
if self.sync_button is not None:
self.sync_button.setEnabled(not_empty)
|
Refresh widget
|
26,078 |
def strip_boolean_result(method, exc_type=None, exc_str=None, fail_ret=None):
@wraps(method)
def wrapped(*args, **kwargs):
ret = method(*args, **kwargs)
if ret[0]:
if len(ret) == 2:
return ret[1]
else:
return ret[1:]
else:
if exc_type:
raise exc_type(exc_str or )
return fail_ret
return wrapped
|
Translate method's return value for stripping off success flag.
There are a lot of methods which return a "success" boolean and have
several out arguments. Translate such a method to return the out arguments
on success and None on failure.
|
26,079 |
def get(self):
if self.outfile:
if self.result in [, , ]:
fd = open(self.outfile + , )
fd.write(str(self.data))
elif self.result == :
fd = open(self.outfile + , )
data = json.loads(self.data, object_pairs_hook=OrderedDict)
json.dump(data, fd)
elif self.result == :
fd = open(self.outfile + , )
root = ET.fromstring(self.data)
tree = ET.ElementTree(root)
tree.write(fd, encoding=)
fd.close()
else:
return self.data
|
Return form result
|
26,080 |
def strace(device, trace_address, breakpoint_address):
jlink = pylink.JLink()
jlink.open()
jlink.power_on()
jlink.set_tif(pylink.JLinkInterfaces.SWD)
jlink.connect(device)
jlink.reset()
jlink.breakpoint_clear_all()
op = pylink.JLinkStraceOperation.TRACE_START
jlink.strace_clear_all()
jlink.strace_start()
bphandle = jlink.breakpoint_set(breakpoint_address, thumb=True)
trhandle = jlink.strace_code_fetch_event(op, address=trace_address)
jlink.restart()
time.sleep(1)
while True:
if jlink.halted():
break
while True:
instructions = jlink.strace_read(1)
if len(instructions) == 0:
break
instruction = instructions[0]
print(jlink.disassemble_instruction(instruction))
jlink.power_off()
jlink.close()
|
Implements simple trace using the STrace API.
Args:
device (str): the device to connect to
trace_address (int): address to begin tracing from
breakpoint_address (int): address to breakpoint at
Returns:
``None``
|
26,081 |
def normpdf(x, mu, sigma):
u = (x-mu)/abs(sigma)
y = (1/(math.sqrt(2*pi)*abs(sigma)))*math.exp(-u*u/2)
return y
|
Describes the relative likelihood that a real-valued random variable X will
take on a given value.
http://en.wikipedia.org/wiki/Probability_density_function
|
26,082 |
def __find_incongruities(self, op, index):
if len(self) == 1:
return
hits = []
intervals = []
if self.order == :
one, two = ,
else:
one, two = ,
for i, iv in enumerate(self[:-1]):
next_iv = self[i+1]
if op(getattr(iv, one), getattr(next_iv, two)):
hits.append(i)
top = getattr(iv, one)
base = getattr(next_iv, two)
iv_gap = Interval(top, base)
intervals.append(iv_gap)
if index and hits:
return hits
elif intervals:
return Striplog(intervals)
else:
return
|
Private method. Finds gaps and overlaps in a striplog. Called by
find_gaps() and find_overlaps().
Args:
op (operator): ``operator.gt`` or ``operator.lt``
index (bool): If ``True``, returns indices of intervals with
gaps after them.
Returns:
Striplog: A striplog of all the gaps. A sort of anti-striplog.
|
26,083 |
def __process_results(results):
if in results and in results:
return []
result_list = []
split = results.split(sep=)[1:-1]
for entry in split:
entry_dict = {}
for value in entry.split():
if len(value) < 1:
continue
(desc, val) = value.split()
entry_dict[desc.replace(, )] = val.strip()
result_list.append(entry_dict)
return result_list
|
Processes the result from __query to get valid json from every entry.
:param results: Results from __query
:type results: str
:returns: python list of dictionaries containing the relevant results.
:rtype: list
|
26,084 |
def all(cls, domain=None):
Site = cls
site = Session.query(Site)
if domain:
site.filter(Site.domain == domain)
return site.all()
|
Return all sites
@param domain: The domain to filter by
@type domain: Domain
@rtype: list of Site
|
26,085 |
def compute_samples_displays(
self,
program: Union[circuits.Circuit, schedules.Schedule],
param_resolver: = None,
) -> study.ComputeDisplaysResult:
return self.compute_samples_displays_sweep(
program,
study.ParamResolver(param_resolver))[0]
|
Computes SamplesDisplays in the supplied Circuit or Schedule.
Args:
program: The circuit or schedule to simulate.
param_resolver: Parameters to run with the program.
Returns:
ComputeDisplaysResult for the simulation.
|
26,086 |
def print_version(self, file=None):
optparse.OptionParser.print_version(self, file)
file.flush()
|
Outputs version information to the file if specified, or to
the io_manager's stdout if available, or to sys.stdout.
|
26,087 |
def set_autoindent(self,value=None):
if value != 0 and not self.has_readline:
if os.name == :
warn("The auto-indent feature requires the readline library")
self.autoindent = 0
return
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
|
Set the autoindent flag, checking for readline support.
If called with no arguments, it acts as a toggle.
|
26,088 |
def distinct(self, *args, **_filter):
if not self.exists:
return iter([])
columns = []
clauses = []
for column in args:
if isinstance(column, ClauseElement):
clauses.append(column)
else:
if not self.has_column(column):
raise DatasetException("No such column: %s" % column)
columns.append(self.table.c[column])
clause = self._args_to_clause(_filter, clauses=clauses)
if not len(columns):
return iter([])
q = expression.select(columns,
distinct=True,
whereclause=clause,
order_by=[c.asc() for c in columns])
return self.db.query(q)
|
Return all the unique (distinct) values for the given ``columns``.
::
# returns only one row per year, ignoring the rest
table.distinct('year')
# works with multiple columns, too
table.distinct('year', 'country')
# you can also combine this with a filter
table.distinct('year', country='China')
|
26,089 |
def list(payment):
if isinstance(payment, resources.Payment):
payment = payment.id
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, payment_id=payment))
return resources.APIResourceCollection(resources.Refund, **response)
|
List all the refunds for a payment.
:param payment: The payment object or the payment id
:type payment: resources.Payment|string
:return: A collection of refunds
:rtype resources.APIResourceCollection
|
26,090 |
def make_model(self, add_indra_json=True):
self.add_indra_json = add_indra_json
for stmt in self.statements:
if isinstance(stmt, Modification):
self._add_modification(stmt)
if isinstance(stmt, SelfModification):
self._add_self_modification(stmt)
elif isinstance(stmt, RegulateActivity) or \
isinstance(stmt, RegulateAmount):
self._add_regulation(stmt)
elif isinstance(stmt, Complex):
self._add_complex(stmt)
elif isinstance(stmt, Gef):
self._add_gef(stmt)
elif isinstance(stmt, Gap):
self._add_gap(stmt)
elif isinstance(stmt, Influence):
self._add_influence(stmt)
network_description =
self.cx[].append({: ,
: self.network_name})
self.cx[].append({: ,
: network_description})
cx_str = self.print_cx()
return cx_str
|
Assemble the CX network from the collected INDRA Statements.
This method assembles a CX network from the set of INDRA Statements.
The assembled network is set as the assembler's cx argument.
Parameters
----------
add_indra_json : Optional[bool]
If True, the INDRA Statement JSON annotation is added to each
edge in the network. Default: True
Returns
-------
cx_str : str
The json serialized CX model.
|
26,091 |
def make_dot(self, filename_or_stream, auts):
if isinstance(filename_or_stream, str):
stream = file(filename_or_stream, )
else:
stream = filename_or_stream
dot = DotFile(stream)
for aut in auts:
dot.start(aut.name)
dot.node()
for st in aut.states:
label = st.name
if st.entering:
label += % .join(str(st) for st in st.entering)
if st.leaving:
label += % .join(str(st) for st in st.leaving)
label = % label
dot.state(st.name, label=label)
for st in aut.states:
for tr in st.transitions:
dot.transition(tr.s_from.name, tr.s_to.name, tr.when)
dot.end()
dot.finish()
|
Create a graphviz .dot representation of the automaton.
|
26,092 |
def main():
version_num=pmag.get_version()
orient_file,samp_file = "orient","er_samples.txt"
args=sys.argv
dir_path,out_path=,
default_outfile = True
if in args:
ind=args.index()
dir_path=args[ind+1]
if in args:
ind=args.index()
out_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-F" in args:
ind=args.index("-F")
orient_file=sys.argv[ind+1]
default_outfile = False
if "-f" in args:
ind=args.index("-f")
samp_file=sys.argv[ind+1]
orient_file=out_path++orient_file
samp_file=dir_path++samp_file
ErSamples=[]
Required=[,,,,]
Samps,file_type=pmag.magic_read(samp_file)
Locs=[]
OrKeys=[,,,,,,,,,,,]
print("file_type", file_type)
if file_type.lower()==:
SampKeys=[,,,,,,,,,,,]
elif file_type.lower()==:
SampKeys=[,]
else:
print()
for samp in Samps:
if samp[] not in Locs:Locs.append(samp[])
for location_name in Locs:
loc_samps=pmag.get_dictitem(Samps,,location_name,)
OrOut=[]
for samp in loc_samps:
if samp[] not in ErSamples:
ErSamples.append(samp[])
OrRec={}
if in list(samp.keys()) and samp[].strip()!="":
date=samp[].split()
OrRec[]=date[1]++date[2]++date[0][2:4]
for i in range(len(SampKeys)):
if SampKeys[i] in list(samp.keys()):OrRec[OrKeys[i]]=samp[SampKeys[i]]
for key in Required:
if key not in list(OrRec.keys()):OrRec[key]=""
OrOut.append(OrRec)
loc=location_name.replace(" ","_")
if default_outfile:
outfile=orient_file++loc+
else:
outfile=orient_file
pmag.magic_write(outfile,OrOut,location_name)
print("Data saved in: ", outfile)
|
NAME
convert_samples.py
DESCRIPTION
takes an er_samples or magic_measurements format file and creates an orient.txt template
SYNTAX
convert_samples.py [command line options]
OPTIONS
-f FILE: specify input file, default is er_samples.txt
-F FILE: specify output file, default is: orient_LOCATION.txt
INPUT FORMAT
er_samples.txt or magic_measurements format file
OUTPUT
orient.txt format file
|
26,093 |
def get_query_parameters(args, cell_body, date_time=datetime.datetime.now()):
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env=env, as_dict=False)
sql = args[]
if sql is None:
raise Exception()
if config:
jsonschema.validate(config, BigQuerySchema.QUERY_PARAMS_SCHEMA)
config = config or {}
config_parameters = config.get(, [])
return bigquery.Query.get_query_parameters(config_parameters, date_time=date_time)
|
Extract query parameters from cell body if provided
Also validates the cell body schema using jsonschema to catch errors before sending the http
request. This validation isn't complete, however; it does not validate recursive schemas,
but it acts as a good filter against most simple schemas
Args:
args: arguments passed to the magic cell
cell_body: body of the magic cell
date_time: The timestamp at which the date-time related parameters need to be resolved.
Returns:
Validated object containing query parameters
|
26,094 |
def get_shortlink(self, shortlink_id_or_url):
if "://" not in shortlink_id_or_url:
shortlink_id_or_url = self.merchant_api_base_url + + shortlink_id_or_url +
return self.do_req(, shortlink_id_or_url).json()
|
Retrieve registered shortlink info
Arguments:
shortlink_id_or_url:
Shortlink id or url, assigned by mCASH
|
26,095 |
def from_timestamp_pb(cls, stamp):
microseconds = int(stamp.seconds * 1e6)
bare = from_microseconds(microseconds)
return cls(
bare.year,
bare.month,
bare.day,
bare.hour,
bare.minute,
bare.second,
nanosecond=stamp.nanos,
tzinfo=pytz.UTC,
)
|
Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message
|
26,096 |
def get_module_uuid(plpy, moduleid):
plan = plpy.prepare("SELECT uuid FROM modules WHERE moduleid = $1;",
(,))
result = plpy.execute(plan, (moduleid,), 1)
if result:
return result[0][]
|
Retrieve page uuid from legacy moduleid.
|
26,097 |
def apply_async(self, args, kwargs, **options):
self._validate_required_class_vars()
cache_key = self._get_cache_key(**kwargs)
task_id = self.cache.get(cache_key)
if task_id:
with self.cache.lock( % cache_key):
task_meta = super(JobtasticTask, self).apply_async(
args,
kwargs,
**options
)
logging.info(, task_meta.status)
if task_meta.status in (PROGRESS, PENDING):
self.cache.set(
% cache_key,
task_meta.task_id,
timeout=self.herd_avoidance_timeout)
logging.info(
, cache_key)
return task_meta
|
Put this task on the Celery queue as a singleton. Only one of this type
of task with its distinguishing args/kwargs will be allowed on the
queue at a time. Subsequent duplicate tasks called while this task is
still running will just latch on to the results of the running task by
synchronizing the task uuid. Additionally, identical task calls will
return those results for the next ``cache_duration`` seconds.
|
26,098 |
def _dbsetup(self):
self._dbconn = sqlite3.connect(self._db_file)
sql =
self._dbconn.execute(sql)
sql =
self._dbconn.execute(sql)
self._dbconn.commit()
|
Create/open local SQLite database
|
26,099 |
def compute_alignments(self, prev_state, precomputed_values, mask=None):
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, )
align_scores = T.dot(act, self.Va)
if mask:
mask = (1 - mask) * -99.00
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
return align_weights
|
Compute the alignment weights based on the previous state.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.