Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
18,800 | def strel_pair(x, y):
x_center = int(np.abs(x))
y_center = int(np.abs(y))
result = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool)
result[y_center, x_center] = True
result[y_center + int(y), x_center + int(x)] = True
return result | Create a structing element composed of the origin and another pixel
x, y - x and y offsets of the other pixel
returns a structuring element |
18,801 | def MSTORE(self, address, value):
if istainted(self.pc):
for taint in get_taints(self.pc):
value = taint_with(value, taint)
self._allocate(address, 32)
self._store(address, value, 32) | Save word to memory |
18,802 | def authenticate(url, account, key, by=, expires=0, timestamp=None,
timeout=None, request_type="xml", admin_auth=False,
use_password=False, raise_on_error=False):
if timestamp is None:
timestamp = int(time.time()) * 1000
pak = ""
if not admin_auth:
pak = preauth.create_preauth(account, key, by, expires, timestamp)
if request_type == :
auth_request = RequestXml()
else:
auth_request = RequestJson()
request_data = {
: {
: by,
: account
}
}
ns = "urn:zimbraAccount"
if admin_auth:
ns = "urn:zimbraAdmin"
request_data[] = key
elif use_password:
request_data[] = {
"_content": key
}
else:
request_data[] = {
: timestamp,
: expires,
: pak
}
auth_request.add_request(
,
request_data,
ns
)
server = Communication(url, timeout)
if request_type == :
response = ResponseXml()
else:
response = ResponseJson()
server.send_request(auth_request, response)
if response.is_fault():
if raise_on_error:
raise AuthenticationFailed(
"Cannot authenticate user: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
return None
return response.get_response()[][] | Authenticate to the Zimbra server
:param url: URL of Zimbra SOAP service
:param account: The account to be authenticated against
:param key: The preauth key of the domain of the account or a password (if
admin_auth or use_password is True)
:param by: If the account is specified as a name, an ID or a
ForeignPrincipal
:param expires: When the token expires (or 0 for default expiration)
:param timestamp: When the token was requested (None for "now")
:param timeout: Timeout for the communication with the server. Defaults
to the urllib2-default
:param request_type: Which type of request to use ("xml" (default) or
"json")
:param admin_auth: This request should authenticate and generate an admin
token. The "key"-parameter therefore holds the admin password (implies
use_password)
:param use_password: The "key"-parameter holds a password. Do a password-
based user authentication.
:param raise_on_error: Should I raise an exception when an authentication
error occurs or just return None?
:return: The authentication token or None
:rtype: str or None or unicode |
18,803 | def close(self):
endpoint = self.endpoint.replace("/api/v1/spans", "")
logger.debug("Zipkin trace may be located at this URL {}/traces/{}".format(endpoint, self.trace_id)) | End the report. |
18,804 | def lpad(col, len, pad):
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad)) | Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')] |
18,805 | def _get_string_match_value(self, string, string_match_type):
if string_match_type == Type(**get_type_data()):
return string
elif string_match_type == Type(**get_type_data()):
return re.compile( + string, re.I)
elif string_match_type == Type(**get_type_data()):
return re.compile( + string + )
elif string_match_type == Type(**get_type_data()):
return re.compile( + string + , re.I) | Gets the match value |
18,806 | def find_by_reference_ids(reference_ids, _connection=None, page_size=100,
page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
if not isinstance(reference_ids, (list, tuple)):
err = "Video.find_by_reference_ids expects an iterable argument"
raise exceptions.PyBrightcoveError(err)
ids = .join(reference_ids)
return connection.ItemResultSet(
, Video, _connection, page_size,
page_number, sort_by, sort_order, reference_ids=ids) | List all videos identified by a list of reference ids |
18,807 | def serialize_dictionary(dictionary):
string_value = {}
for k, v in list(dictionary.items()):
if isinstance(v, QUrl):
string_value[k] = v.toString()
elif isinstance(v, (QDate, QDateTime)):
string_value[k] = v.toString(Qt.ISODate)
elif isinstance(v, datetime):
string_value[k] = v.isoformat()
elif isinstance(v, date):
string_value[k] = v.isoformat()
elif isinstance(v, dict):
string_value[k] = serialize_dictionary(v)
else:
string_value[k] = v
return string_value | Function to stringify a dictionary recursively.
:param dictionary: The dictionary.
:type dictionary: dict
:return: The string.
:rtype: basestring |
18,808 | def clean_item_no_list(i):
itype = type(i)
if itype == dict:
return clean_dict(i, clean_item_no_list)
elif itype == list:
return clean_tuple(i, clean_item_no_list)
elif itype == tuple:
return clean_tuple(i, clean_item_no_list)
elif itype == numpy.float32:
return float(i)
elif itype == numpy.float64:
return float(i)
elif itype == numpy.int16:
return int(i)
elif itype == numpy.uint16:
return int(i)
elif itype == numpy.int32:
return int(i)
elif itype == numpy.uint32:
return int(i)
elif itype == float:
return i
elif itype == str:
return i
elif itype == int:
return i
elif itype == bool:
return i
elif itype == type(None):
return i
logging.info("[2] Unable to handle type %s", itype)
return None | Return a json-clean item or None. Will log info message for failure. |
18,809 | def load_features(self, features, image_type=None, from_array=False,
threshold=0.001):
if from_array:
if isinstance(features, list):
features = features[0]
self._load_features_from_array(features)
elif path.exists(features[0]):
self._load_features_from_images(features)
else:
self._load_features_from_dataset(
features, image_type=image_type, threshold=threshold) | Load features from current Dataset instance or a list of files.
Args:
features: List containing paths to, or names of, features to
extract. Each element in the list must be a string containing
either a path to an image, or the name of a feature (as named
in the current Dataset). Mixing of paths and feature names
within the list is not allowed.
image_type: Optional suffix indicating which kind of image to use
for analysis. Only used if features are taken from the Dataset;
if features is a list of filenames, image_type is ignored.
from_array: If True, the features argument is interpreted as a
string pointing to the location of a 2D ndarray on disk
containing feature data, where rows are voxels and columns are
individual features.
threshold: If features are taken from the dataset, this is the
threshold passed to the meta-analysis module to generate fresh
images. |
18,810 | def autoconfig_url_from_registry():
if not ON_WINDOWS:
raise NotWindowsError()
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER,
) as key:
return winreg.QueryValueEx(key, )[0]
except WindowsError:
return | Get the PAC ``AutoConfigURL`` value from the Windows Registry.
This setting is visible as the "use automatic configuration script" field in
Internet Options > Connection > LAN Settings.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotWindowsError: If called on a non-Windows platform. |
18,811 | def query_all(self):
return self.query_model(self.model, self.condition, order_by=self.order_by,
group_by=self.group_by, having=self.having) | Query all records without limit and offset. |
18,812 | def get_comments_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1):
return self._get_resource_per_page(
resource=DELIVERY_NOTE_COMMENTS,
per_page=per_page,
page=page,
params={: delivery_note_id},
) | Get comments of delivery note per page
:param delivery_note_id: the delivery note
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list |
18,813 | def to_config(self, k, v):
if k == "setup":
return base.to_commandline(v)
return super(DataGenerator, self).to_config(k, v) | Hook method that allows conversion of individual options.
:param k: the key of the option
:type k: str
:param v: the value
:type v: object
:return: the potentially processed value
:rtype: object |
18,814 | def annotate(args):
from jcvi.utils.grouper import Grouper
valid_resolve_choices = ["alignment", "overlap"]
p = OptionParser(annotate.__doc__)
p.add_option("--resolve", default="alignment", choices=valid_resolve_choices,
help="Resolve ID assignment based on a certain metric" \
+ " [default: %default]")
p.add_option("--atg_name", default=False, action="store_true",
help="Specify is locus IDs in `new.bed` file follow ATG nomenclature" \
+ " [default: %default]")
g1 = OptionGroup(p, "Optional parameters (alignment):\n" \
+ "Use if resolving ambiguities based on sequence `alignment`")
g1.add_option("--pid", dest="pid", default=35., type="float",
help="Percent identity cutoff [default: %default]")
g1.add_option("--score", dest="score", default=250., type="float",
help="Alignment score cutoff [default: %default]")
p.add_option_group(g1)
g2 = OptionGroup(p, "Optional parameters (overlap):\n" \
+ "Use if resolving ambiguities based on `overlap` length\n" \
+ "Parameters equivalent to `intersectBed`")
g2.add_option("-f", dest="f", default=0.5, type="float",
help="Minimum overlap fraction (0.0 - 1.0) [default: %default]")
g2.add_option("-r", dest="r", default=False, action="store_true",
help="Require fraction overlap to be reciprocal [default: %default]")
g2.add_option("-s", dest="s", default=True, action="store_true",
help="Require same strandedness [default: %default]")
p.add_option_group(g2)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
nbedfile, obedfile = args
npf, opf = nbedfile.rsplit(".", 1)[0], obedfile.rsplit(".", 1)[0]
cbedfile = "consolidated.bed"
if not os.path.isfile(cbedfile):
consolidate(nbedfile, obedfile, cbedfile)
else:
logging.warning("`{0}` already exists. Skipping step".format(cbedfile))
logging.warning("Resolving ID assignment ambiguity based on `{0}`".\
format(opts.resolve))
if opts.resolve == "alignment":
pairsfile = "nw.pairs"
scoresfile = "nw.scores"
if not os.path.isfile(pairsfile):
get_pairs(cbedfile, pairsfile)
else:
logging.warning("`{0}` already exists. Checking for needle output".\
format(pairsfile))
if not os.path.isfile(scoresfile):
logging.error("`{0}` does not exist. Please process {1} using `needle`".\
format(scoresfile, pairsfile))
sys.exit()
else:
scoresfile = "ovl.scores"
calculate_ovl(nbedfile, obedfile, opts, scoresfile)
logging.warning("`{0}' exists. Storing scores in memory".\
format(scoresfile))
scores = read_scores(scoresfile, opts)
abedline = {}
cbed = Bed(cbedfile)
g = Grouper()
for c in cbed:
accn = c.accn
g.join(*accn.split(";"))
nbedline = {}
nbed = Bed(nbedfile)
for line in nbed: nbedline[line.accn] = line
splits = set()
for chr, chrbed in nbed.sub_beds():
abedline, splits = annotate_chr(chr, chrbed, g, scores, nbedline, abedline, opts, splits)
if splits is not None:
abedline = process_splits(splits, scores, nbedline, abedline)
abedfile = npf + ".annotated.bed"
afh = open(abedfile, "w")
for accn in abedline:
print(abedline[accn], file=afh)
afh.close()
sort([abedfile, "-i"]) | %prog annotate new.bed old.bed 2> log
Annotate the `new.bed` with features from `old.bed` for the purpose of
gene numbering.
Ambiguity in ID assignment can be resolved by either of the following 2 methods:
- `alignment`: make use of global sequence alignment score (calculated by `needle`)
- `overlap`: make use of overlap length (calculated by `intersectBed`)
Transfer over as many identifiers as possible while following guidelines:
http://www.arabidopsis.org/portals/nomenclature/guidelines.jsp#editing
Note: Following RegExp pattern describes the structure of the identifier
assigned to features in the `new.bed` file.
new_id_pat = re.compile(r"^\d+\.[cemtx]+\S+")
Examples: 23231.m312389, 23231.t004898, 23231.tRNA.144
Adjust the value of `new_id_pat` manually as per your ID naming conventions. |
18,815 | def raw_from_delimited(msgs: DelimitedMsg) -> RawMsgs:
delim = _rindex(msgs, b)
return tuple(msgs[:delim]), tuple(msgs[delim + 1:]) | \
From a message consisting of header frames, delimiter frame, and payload frames, return a tuple `(header, payload)`.
The payload frames may be returned as sequences of bytes (raw) or as `Message`s. |
18,816 | def fix_spelling(words, join=True, joinstring=):
return Vabamorf.instance().fix_spelling(words, join, joinstring) | Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False. |
18,817 | def similarity(w1, w2, threshold=0.5):
ratio = SM(None, str(w1).lower(), str(w2).lower()).ratio()
return ratio if ratio > threshold else 0 | compare two strings 'words', and
return ratio of smiliarity, be it larger than the threshold,
or 0 otherwise.
NOTE: if the result more like junk, increase the threshold value. |
18,818 | def getKnownPlayers(reset=False):
global playerCache
if not playerCache or reset:
jsonFiles = os.path.join(c.PLAYERS_FOLDER, "*.json")
for playerFilepath in glob.glob(jsonFiles):
filename = os.path.basename(playerFilepath)
name = re.sub("^player_", "", filename)
name = re.sub("\.json$", "", name)
player = PlayerRecord(name)
playerCache[player.name] = player
return playerCache | identify all of the currently defined players |
18,819 | def get_disparity(self, pair):
gray = []
if pair[0].ndim == 3:
for side in pair:
gray.append(cv2.cvtColor(side, cv2.COLOR_BGR2GRAY))
else:
gray = pair
return self._block_matcher.compute(gray[0], gray[1],
disptype=cv2.CV_32F) | Compute disparity from image pair (left, right).
First, convert images to grayscale if needed. Then pass to the
``_block_matcher`` for stereo matching. |
18,820 | def resume(profile_process=):
profile_process2int = {: 0, : 1}
check_call(_LIB.MXProcessProfilePause(int(0),
profile_process2int[profile_process],
profiler_kvstore_handle)) | Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker` |
18,821 | def choice_SlackBuild(self):
SlackBuild = ReadSBo(self.sbo_url).slackbuild(self.name, ".SlackBuild")
fill = self.fill_pager(SlackBuild)
self.pager(SlackBuild + fill) | View .SlackBuild file |
18,822 | def _insert_update(self, index: int, length: int) -> None:
ss, se = self._span
for spans in self._type_to_spans.values():
for span in spans:
if index < span[1] or span[1] == index == se:
span[1] += length
if index < span[0] or span[0] == index != ss:
span[0] += length | Update self._type_to_spans according to the added length. |
18,823 | def get_text(self):
row_lines = []
for line in zip_longest(*[column.get_cell_lines() for column in self.columns], fillvalue=):
row_lines.append(.join(line))
return .join(row_lines) | ::returns:
a rendered string representation of the given row |
18,824 | def download_url(url, filename=None, spoof=False, iri_fallback=True,
verbose=True, new=False, chunk_size=None):
r
def reporthook_(num_blocks, block_nBytes, total_nBytes, start_time=0):
total_seconds = time.time() - start_time + 1E-9
num_kb_down = int(num_blocks * block_nBytes) / 1024
num_mb_down = num_kb_down / 1024
percent_down = int(num_blocks * block_nBytes * 100 / total_nBytes)
kb_per_second = int(num_kb_down / (total_seconds))
fmt_msg =
msg = fmt_msg % (percent_down, num_mb_down, kb_per_second, total_seconds)
sys.stdout.write(msg)
sys.stdout.flush()
if verbose:
reporthook = functools.partial(reporthook_, start_time=time.time())
else:
reporthook = None
if filename is None:
filename = basename(url)
if verbose:
print( % (url, filename))
if new:
import requests
con = requests.get(url, stream=True, timeout=TIMEOUT)
try:
content_length = con.headers.get(, None)
if content_length is None:
with open(filename, ) as file_:
file_.write(con.content)
else:
if chunk_size is None:
chunk_size = 2 ** 20
content_length = int(content_length)
with open(filename, ) as file_:
chunk_iter = con.iter_content(chunk_size=chunk_size)
for count, chunk in enumerate(chunk_iter):
if chunk:
if reporthook:
reporthook(count, chunk_size, content_length)
file_.write(chunk)
finally:
con.close()
return filename
import urllib
try:
if spoof:
user_agents = [
,
,
,
,
,
]
class SpoofingOpener(urllib.FancyURLopener, object):
version = user_agents[0]
spoofing_opener = SpoofingOpener()
spoofing_opener.retrieve(url, filename=filename, reporthook=reporthook)
else:
if six.PY2:
urllib.urlretrieve(url, filename=filename, reporthook=reporthook)
elif six.PY3:
import urllib.request
urllib.request.urlretrieve(url, filename=filename, reporthook=reporthook)
else:
assert False,
except UnicodeError as ex:
import requests
print( % (ex,))
print()
resp = requests.get(url, timeout=TIMEOUT)
with open(filename, ) as file_:
file_.write(resp.content)
if verbose:
print()
print( % (filename,))
return filename | r""" downloads a url to a filename.
Args:
url (str): url to download
filename (str): path to download to. Defaults to basename of url
spoof (bool): if True pretends to by Firefox
iri_fallback : falls back to requests get call if there is a UnicodeError
References:
http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
http://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads
http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
TODO:
Delete any partially downloaded files
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> url = 'http://www.jrsoftware.org/download.php/ispack.exe'
>>> fpath = download_url(url)
>>> print(fpath)
ispack.exe |
18,825 | def generate_evenly_distributed_data(dim = 2000,
num_active = 40,
num_samples = 1000,
num_negatives = 500):
sparse_data = [numpy.random.choice(dim, size = num_active, replace = False)
for i in range(num_samples)]
data = [[0 for i in range(dim)] for i in range(num_samples)]
for datapoint, sparse_datapoint in zip(data, sparse_data):
for i in sparse_datapoint:
datapoint[i] = 1
negatives = data[:num_negatives]
positives = data[num_negatives:]
return positives, negatives | Generates a set of data drawn from a uniform distribution. The binning
structure from Poirazi & Mel is ignored, and all (dim choose num_active)
arrangements are possible. num_negatives samples are put into a separate
negatives category for output compatibility with generate_data, but are
otherwise identical. |
18,826 | def data_find_text(data, path):
el = data_find(data, path)
if not isinstance(el, (list, tuple)):
return None
texts = [child for child in el[1:] if not isinstance(child, (tuple, list, dict))]
if not texts:
return None
return " ".join(
[
six.ensure_text(x, encoding="utf-8", errors="strict")
for x in texts
]
) | Return the text value of the element-as-tuple in tuple ``data`` using
simplified XPath ``path``. |
18,827 | def getPhysicalMaximum(self,chn=None):
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.physical_max(chn)
else:
return 0
else:
physMax = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
physMax[i] = self.physical_max(i)
return physMax | Returns the maximum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMaximum(0)==1000.0
True
>>> f._close()
>>> del f |
18,828 | def error(request, message, extra_tags=, fail_silently=False, async=False):
if ASYNC and async:
messages.debug(_get_user(request), message)
else:
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently) | Adds a message with the ``ERROR`` level. |
18,829 | def serialize(ty, *values, **kwargs):
try:
parsed_ty = abitypes.parse(ty)
except Exception as e:
raise EthereumError(str(e))
if parsed_ty[0] != :
if len(values) > 1:
raise ValueError()
values = values[0]
if isinstance(values, str):
values = values.encode()
else:
values = tuple(val.encode() if isinstance(val, str) else val for val in values)
result, dyn_result = ABI._serialize(parsed_ty, values)
return result + dyn_result | Serialize value using type specification in ty.
ABI.serialize('int256', 1000)
ABI.serialize('(int, int256)', 1000, 2000) |
18,830 | def getFunc(self, o: Any) -> Callable:
for cls, func in self.routes.items():
if isinstance(o, cls):
return func
logger.error("Unhandled msg {}, available handlers are:".format(o))
for cls in self.routes.keys():
logger.error(" {}".format(cls))
raise RuntimeError("unhandled msg: {}".format(o)) | Get the next function from the list of routes that is capable of
processing o's type.
:param o: the object to process
:return: the next function |
18,831 | def removeGaps(self) :
for i in range(1, len(self.children)) :
if self.children[i].x1 > self.children[i-1].x2:
aux_moveTree(self.children[i-1].x2-self.children[i].x1, self.children[i]) | Remove all gaps between regions |
18,832 | def stopdocs():
"stop Sphinx watchdog"
for i in range(4):
pid = watchdog_pid()
if pid:
if not i:
sh(.format(pid))
sh(.format(pid))
time.sleep(.5)
else:
break | stop Sphinx watchdog |
18,833 | def updateColumnValue(self, column, value, index=None):
if index is None:
index = self.treeWidget().column(column.name())
if type(value) == datetime.date:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif type(value) == datetime.time:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif type(value) == datetime.datetime:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif type(value) in (float, int):
if column.enum():
self.setText(index, column.enum().displayText(value))
else:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif value is not None:
self.setText(index, nativestring(value))
else:
self.setText(index, )
self.setSortData(index, value)
try:
mapper = self.treeWidget().columnMappers().get(column.columnName())
except AttributeError:
mapper = None
if mapper is None:
form = column.stringFormat()
if form:
mapper = form.format
if mapper:
self.setText(index, mapper(value)) | Assigns the value for the column of this record to the inputed value.
:param index | <int>
value | <variant> |
18,834 | def editpropset(self):
self.ignore(whitespace)
if not self.nextstr():
self._raiseSyntaxExpects()
relp = self.relprop()
self.ignore(whitespace)
self.nextmust()
self.ignore(whitespace)
valu = self.valu()
return s_ast.EditPropSet(kids=(relp, valu)) | :foo=10 |
18,835 | def length(self):
def ProcessContentRange(content_range):
_, _, range_spec = content_range.partition()
byte_range, _, _ = range_spec.partition()
start, _, end = byte_range.partition()
return int(end) - int(start) + 1
if in self.info and in self.info:
return ProcessContentRange(self.info[])
elif in self.info:
return int(self.info.get())
elif in self.info:
return ProcessContentRange(self.info[])
return len(self.content) | Return the length of this response.
We expose this as an attribute since using len() directly can fail
for responses larger than sys.maxint.
Returns:
Response length (as int or long) |
18,836 | def alignment_a(self):
from molmod.transformations import Rotation
new_x = self.matrix[:, 0].copy()
new_x /= np.linalg.norm(new_x)
new_z = np.cross(new_x, self.matrix[:, 1])
new_z /= np.linalg.norm(new_z)
new_y = np.cross(new_z, new_x)
new_y /= np.linalg.norm(new_y)
return Rotation(np.array([new_x, new_y, new_z])) | Computes the rotation matrix that aligns the unit cell with the
Cartesian axes, starting with cell vector a.
* a parallel to x
* b in xy-plane with b_y positive
* c with c_z positive |
18,837 | def RegisterArtifact(self,
artifact_rdfvalue,
source="datastore",
overwrite_if_exists=False,
overwrite_system_artifacts=False):
artifact_name = artifact_rdfvalue.name
if artifact_name in self._artifacts:
if not overwrite_if_exists:
details = "artifact already exists and `overwrite_if_exists` is unset"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
elif not overwrite_system_artifacts:
artifact_obj = self._artifacts[artifact_name]
if not artifact_obj.loaded_from.startswith("datastore:"):
details = "system artifact cannot be overwritten"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
artifact_rdfvalue.loaded_from = source
artifact_rdfvalue.error_message = None
self._artifacts[artifact_rdfvalue.name] = artifact_rdfvalue | Registers a new artifact. |
18,838 | def _check_allowed_values(self, parameters):
for key, allowed_values in self.ALLOWED_VALUES:
self.log([u"Checking allowed values for parameter ", key])
if key in parameters:
value = parameters[key]
if value not in allowed_values:
self._failed(u"Parameter has value which is not allowed." % (key, value))
return
self.log(u"Passed") | Check whether the given parameter value is allowed.
Log messages into ``self.result``.
:param dict parameters: the given parameters |
18,839 | def _body(self, paragraphs):
body = []
for i in range(paragraphs):
paragraph = self._paragraph(random.randint(1, 10))
body.append(paragraph)
return .join(body) | Generate a body of text |
18,840 | def load_feedback():
result = {}
if os.path.exists(_feedback_file):
f = open(_feedback_file, )
cont = f.read()
f.close()
else:
cont =
try:
result = json.loads(cont) if cont else {}
except ValueError as e:
result = {"result":"crash", "text":"Feedback file has been modified by user !"}
return result | Open existing feedback file |
18,841 | def initialize(self, *args):
self.n_user = 0
self.users = {}
self.n_item = 0
self.items = {} | Initialize a recommender by resetting stored users and items. |
18,842 | def dir():
dir = [
, , , , ,
, , , , , ,
, , , , , ,
, , ,
]
if IS_PY2:
dir.append()
if sys.platform != or not IS_PY2:
dir.append()
return dir | Return the list of patched function names. Used for patching
functions imported from the module. |
18,843 | def is_string_dtype(arr_or_dtype):
def condition(dtype):
return dtype.kind in (, , ) and not is_period_dtype(dtype)
return _is_dtype(arr_or_dtype, condition) | Check whether the provided array or dtype is of the string dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False |
18,844 | def step(self, batch_size, ignore_stale_grad=False):
rescale_grad = self._scale / batch_size
self._check_and_rescale_grad(rescale_grad)
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
self._allreduce_grads()
self._update(ignore_stale_grad) | Makes one step of parameter update. Should be called after
`autograd.backward()` and outside of `record()` scope.
For normal parameter updates, `step()` should be used, which internally calls
`allreduce_grads()` and then `update()`. However, if you need to get the reduced
gradients to perform certain transformation, such as in gradient clipping, then
you may want to manually call `allreduce_grads()` and `update()` separately.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
ignore_stale_grad : bool, optional, default=False
If true, ignores Parameters with stale gradient (gradient that has not
been updated by `backward` after last step) and skip update. |
18,845 | def build_paths(self, end_entity_cert):
if not isinstance(end_entity_cert, byte_cls) and not isinstance(end_entity_cert, x509.Certificate):
raise TypeError(pretty_message(
,
type_name(end_entity_cert)
))
if isinstance(end_entity_cert, byte_cls):
if pem.detect(end_entity_cert):
_, _, end_entity_cert = pem.unarmor(end_entity_cert)
end_entity_cert = x509.Certificate.load(end_entity_cert)
path = ValidationPath(end_entity_cert)
paths = []
failed_paths = []
self._walk_issuers(path, paths, failed_paths)
if len(paths) == 0:
cert_name = end_entity_cert.subject.human_friendly
missing_issuer_name = failed_paths[0].first.issuer.human_friendly
raise PathBuildingError(pretty_message(
,
cert_name,
missing_issuer_name
))
return paths | Builds a list of ValidationPath objects from a certificate in the
operating system trust store to the end-entity certificate
:param end_entity_cert:
A byte string of a DER or PEM-encoded X.509 certificate, or an
instance of asn1crypto.x509.Certificate
:return:
A list of certvalidator.path.ValidationPath objects that represent
the possible paths from the end-entity certificate to one of the CA
certs. |
18,846 | def connect(host=, port=21050, database=None, timeout=None,
use_ssl=False, ca_cert=None, auth_mechanism=, user=None,
password=None, kerberos_service_name=, use_ldap=None,
ldap_user=None, ldap_password=None, use_kerberos=None,
protocol=None, krb_host=None):
if use_kerberos is not None:
warn_deprecate(, )
if use_kerberos:
auth_mechanism =
if use_ldap is not None:
warn_deprecate(, )
if use_ldap:
auth_mechanism =
if auth_mechanism:
auth_mechanism = auth_mechanism.upper()
else:
auth_mechanism =
if auth_mechanism not in AUTH_MECHANISMS:
raise NotSupportedError(
.format(auth_mechanism))
if ldap_user is not None:
warn_deprecate(, )
user = ldap_user
if ldap_password is not None:
warn_deprecate(, )
password = ldap_password
if protocol is not None:
if protocol.lower() == :
warn_protocol_param()
else:
raise NotSupportedError(
" is not a supported protocol; only HiveServer2 is "
"supported".format(protocol))
service = hs2.connect(host=host, port=port,
timeout=timeout, use_ssl=use_ssl,
ca_cert=ca_cert, user=user, password=password,
kerberos_service_name=kerberos_service_name,
auth_mechanism=auth_mechanism, krb_host=krb_host)
return hs2.HiveServer2Connection(service, default_db=database) | Get a connection to HiveServer2 (HS2).
These options are largely compatible with the impala-shell command line
arguments. See those docs for more information.
Parameters
----------
host : str
The hostname for HS2. For Impala, this can be any of the `impalad`s.
port : int, optional
The port number for HS2. The Impala default is 21050. The Hive port is
likely different.
database : str, optional
The default database. If `None`, the result is
implementation-dependent.
timeout : int, optional
Connection timeout in seconds. Default is no timeout.
use_ssl : bool, optional
Enable SSL.
ca_cert : str, optional
Local path to the the third-party CA certificate. If SSL is enabled but
the certificate is not specified, the server certificate will not be
validated.
auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'}
Specify the authentication mechanism. `'NOSASL'` for unsecured Impala.
`'PLAIN'` for unsecured Hive (because Hive requires the SASL
transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with
LDAP.
user : str, optional
LDAP user, if applicable.
password : str, optional
LDAP password, if applicable.
kerberos_service_name : str, optional
Authenticate to a particular `impalad` service principal. Uses
`'impala'` by default.
use_ldap : bool, optional
Specify `auth_mechanism='LDAP'` instead.
.. deprecated:: 0.11.0
ldap_user : str, optional
Use `user` parameter instead.
.. deprecated:: 0.11.0
ldap_password : str, optional
Use `password` parameter instead.
.. deprecated:: 0.11.0
use_kerberos : bool, optional
Specify `auth_mechanism='GSSAPI'` instead.
.. deprecated:: 0.11.0
protocol : str, optional
Do not use. HiveServer2 is the only protocol currently supported.
.. deprecated:: 0.11.0
Returns
-------
HiveServer2Connection
A `Connection` object (DB API 2.0-compliant). |
18,847 | def _jar_paths():
own_jar = os.getenv("H2O_JAR_PATH", "")
if own_jar != "":
if not os.path.isfile(own_jar):
raise H2OStartupError("Environment variable H2O_JAR_PATH is set to but file does not exists, unset environment variable or provide valid path to h2o.jar file." % own_jar)
yield own_jar
cwd_chunks = os.path.abspath(".").split(os.path.sep)
for i in range(len(cwd_chunks), 0, -1):
if cwd_chunks[i - 1] == "h2o-3":
yield os.path.sep.join(cwd_chunks[:i] + ["build", "h2o.jar"])
backend_dir = os.path.split(os.path.realpath(__file__))[0]
yield os.path.join(backend_dir, "bin", "h2o.jar")
prefix1 = prefix2 = sys.prefix
if prefix1.startswith(os.path.sep + "Library"):
prefix2 = os.path.join("", "System", prefix1)
elif prefix1.startswith(os.path.sep + "System"):
prefix2 = prefix1[len(os.path.join("", "System")):]
yield os.path.join(prefix1, "h2o_jar", "h2o.jar")
yield os.path.join(os.path.abspath(os.sep), "usr", "local", "h2o_jar", "h2o.jar")
yield os.path.join(prefix1, "local", "h2o_jar", "h2o.jar")
yield os.path.join(get_config_var("userbase"), "h2o_jar", "h2o.jar")
yield os.path.join(prefix2, "h2o_jar", "h2o.jar") | Produce potential paths for an h2o.jar executable. |
18,848 | def getUserInfo(self):
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result[]
return result | Returns a dictionary of user info that google stores. |
18,849 | def harmonic(y, **kwargs):
stft = core.stft(y)
stft_harm = decompose.hpss(stft, **kwargs)[0]
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
return y_harm | Extract harmonic elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of just the harmonic portion
See Also
--------
hpss : Separate harmonic and percussive components
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract harmonic component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic = librosa.effects.harmonic(y)
>>> # Use a margin > 1.0 for greater harmonic separation
>>> y_harmonic = librosa.effects.harmonic(y, margin=3.0) |
18,850 | def parse_changelog(path, **kwargs):
app, doctree = get_doctree(path, **kwargs)
first_list = None
for node in doctree[0]:
if isinstance(node, bullet_list):
first_list = node
break
releases, manager = construct_releases(first_list.children, app)
ret = changelog2dict(releases)
for key in ret.copy():
if key.startswith():
del ret[key]
for family in manager:
manager[family].pop(, None)
ret.update(manager[family])
return ret | Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`. |
18,851 | def add_sender_info( self, sender_txhash, nulldata_vin_outpoint, sender_out_data ):
assert sender_txhash in self.sender_info.keys(), "Missing sender info for %s" % sender_txhash
assert nulldata_vin_outpoint in self.sender_info[sender_txhash], "Missing outpoint %s for sender %s" % (nulldata_vin_outpoint, sender_txhash)
block_hash = self.sender_info[sender_txhash][nulldata_vin_outpoint][]
relindex = self.sender_info[sender_txhash][nulldata_vin_outpoint][]
relinput_index = self.sender_info[sender_txhash][nulldata_vin_outpoint][]
value_in_satoshis = sender_out_data[]
script_pubkey = sender_out_data[]
script_info = bits.btc_tx_output_parse_script(script_pubkey)
script_type = script_info[]
addresses = script_info.get(, [])
sender_info = {
"value": value_in_satoshis,
"script_pubkey": script_pubkey,
"script_type": script_type,
"addresses": addresses,
"nulldata_vin_outpoint": nulldata_vin_outpoint,
"txid": sender_txhash,
}
return True | Record sender information in our block info.
@sender_txhash: txid of the sender
@nulldata_vin_outpoint: the 'vout' index from the nulldata tx input that this transaction funded |
18,852 | def distance(p_a, p_b):
return sqrt((p_a.lat - p_b.lat) ** 2 + (p_a.lon - p_b.lon) ** 2) | Euclidean distance, between two points
Args:
p_a (:obj:`Point`)
p_b (:obj:`Point`)
Returns:
float: distance, in degrees |
18,853 | def latexsnippet(code, kvs, staffsize=17, initiallines=1):
snippet =
staffsize = int(kvs[]) if in kvs \
else staffsize
initiallines = int(kvs[]) if in kvs \
else initiallines
annotationsize = .5 * staffsize
if in kvs:
snippet = (
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(annotationsize, annotationsize, kvs[])
) + snippet
if in kvs:
snippet = (
"\\grechangedim{annotationseparation}{%s mm}{fixed}\n"
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(staffsize / 60, annotationsize, annotationsize, kvs[])
) + snippet
snippet = (
"\\gresetinitiallines{%s}\n" % initiallines +
"\\grechangestaffsize{%s}\n" % staffsize +
"\\grechangestyle{initial}{\\fontsize{%s}{%s}\\selectfont{}}" %
(2.5 * staffsize, 2.5 * staffsize)
) + snippet
snippet = "\\setlength{\\parskip}{0pt}\n" + snippet + code
return snippet | Take in account key/values |
18,854 | def isotime(at=None, subsecond=False):
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else
st += ( if tz == else tz)
return st | Stringify time in ISO 8601 format. |
18,855 | def save_form(self, request, form, change):
name = form.cleaned_data[]
origin_url = form.cleaned_data[]
res = ClonedRepo(name=name, origin=origin_url)
LOG.info("New repo form produced %s" % str(res))
form.save(commit=False)
return res | Here we pluck out the data to create a new cloned repo.
Form is an instance of NewRepoForm. |
18,856 | def loglike(self, endog, mu, freq_weights=1., scale=1.):
if isinstance(self.link, L.Power) and self.link.power == 1:
nobs2 = endog.shape[0] / 2.
SSR = np.sum((endog-self.fitted(mu))**2, axis=0)
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
return np.sum(freq_weights * ((endog * mu - mu**2/2)/scale -
endog**2/(2 * scale) - .5*np.log(2 * np.pi * scale))) | The log-likelihood in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,freq_weights,scale) as defined below. |
18,857 | def _create_subplots(self, fig, layout):
num_panels = len(layout)
axsarr = np.empty((self.nrow, self.ncol), dtype=object)
i = 1
for row in range(self.nrow):
for col in range(self.ncol):
axsarr[row, col] = fig.add_subplot(self.nrow, self.ncol, i)
i += 1
if self.dir == :
order =
if not self.as_table:
axsarr = axsarr[::-1]
elif self.dir == :
order =
if not self.as_table:
axsarr = np.array([row[::-1] for row in axsarr])
axs = axsarr.ravel(order)
for ax in axs[num_panels:]:
fig.delaxes(ax)
axs = axs[:num_panels]
return axs | Create suplots and return axs |
18,858 | def compile_template(instance, template, additionnal_context=None):
py3o_context = get_compilation_context(instance)
if additionnal_context is not None:
py3o_context.update(additionnal_context)
output_doc = StringIO()
odt_builder = Template(template, output_doc)
odt_builder.render(py3o_context)
return output_doc | Fill the given template with the instance's datas and return the odt file
For every instance class, common values are also inserted in the context
dict (and so can be used) :
* config values
:param obj instance: the instance of a model (like Userdatas, Company)
:param template: the template object to use
:param dict additionnal_context: A dict containing datas we'd like to add to
the py3o compilation template
:return: a stringIO object filled with the resulting odt's informations |
18,859 | def get_dbcollection_with_es(self, **kwargs):
es_objects = self.get_collection_es()
db_objects = self.Model.filter_objects(es_objects)
return db_objects | Get DB objects collection by first querying ES. |
18,860 | def _get_kwargs(profile=None, **connection_args):
if profile:
prefix = profile + ":keystone."
else:
prefix = "keystone."
def get(key, default=None):
return connection_args.get( + key,
__salt__[](prefix + key, default))
user = get(, )
password = get(, )
tenant = get(, )
tenant_id = get()
auth_url = get(, )
insecure = get(, False)
token = get()
endpoint = get(, )
user_domain_name = get(, )
project_domain_name = get(, )
if token:
kwargs = {: token,
: endpoint}
else:
kwargs = {: user,
: password,
: tenant,
: tenant_id,
: auth_url,
: user_domain_name,
: project_domain_name}
return kwargs | get connection args |
18,861 | def get_url(cls, url, uid, **kwargs):
if uid:
url = .format(url, uid)
else:
url = url
return cls._parse_url_and_validate(url) | Construct the URL for talking to an individual resource.
http://myapi.com/api/resource/1
Args:
url: The url for this resource
uid: The unique identifier for an individual resource
kwargs: Additional keyword argueents
returns:
final_url: The URL for this individual resource |
18,862 | def wr_txt(self, fout_txt):
with open(fout_txt, ) as prt:
for line in self.ver_list:
prt.write("{LINE}\n".format(LINE=line))
self.prt_txt(prt)
print(" WROTE: {TXT}".format(TXT=fout_txt)) | Write to a file GOEA results in an ASCII text format. |
18,863 | def _find_cellid(self, code):
from difflib import SequenceMatcher
maxvalue = 0.
maxid = None
for cellid, c in self.cellids.items():
matcher = SequenceMatcher(a=c, b=code)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxid, maxvalue = cellid, ratio
return maxid | Determines the most similar cell (if any) to the specified code. It
must have at least 50% overlap ratio and have been a loop-intercepted
cell previously.
Args:
code (str): contents of the code cell that were executed. |
18,864 | def updates(self):
updates = Updates()
found = updates.updates
for update in self._updates:
found.Add(update)
return updates | Get the contents of ``_updates`` (all updates) and puts them in an
Updates class to expose the list and summary functions.
Returns:
Updates: An instance of the Updates class with all updates for the
system.
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
updates = wua.updates()
# To get a list
updates.list()
# To get a summary
updates.summary() |
18,865 | def getDefaultItems(self):
return [
RtiRegItem(,
,
extensions=[, , , , ]),
RtiRegItem(,
,
extensions=[]),
RtiRegItem(,
,
extensions=[, ]),
RtiRegItem(,
,
extensions=[]),
RtiRegItem(,
,
extensions=[]),
RtiRegItem(,
,
extensions=[]),
RtiRegItem(,
,
extensions=[]),
RtiRegItem(,
,
extensions=[]),
RtiRegItem(,
,
extensions=[, , , , , , , ,
, , , , , , ]),
RtiRegItem(,
,
extensions=[])] | Returns a list with the default plugins in the repo tree item registry. |
18,866 | def unregister_provider(self, provider):
self._unregistering_providers.add(provider)
remaining_providers = self._providers - self._unregistering_providers
if not remaining_providers:
_log.debug(, self)
self.queue_consumer.unregister_provider(self)
_log.debug(, self)
self._unregistered_from_queue_consumer.send(True)
_log.debug(, self)
self._unregistered_from_queue_consumer.wait()
super(RpcConsumer, self).unregister_provider(provider) | Unregister a provider.
Blocks until this RpcConsumer is unregistered from its QueueConsumer,
which only happens when all providers have asked to unregister. |
18,867 | def load(dbname, dbmode=):
if dbmode == :
raise AttributeError("dbmode= not allowed for load.")
db = Database(dbname, dbmode=dbmode)
return db | Load an existing hdf5 database.
Return a Database instance.
:Parameters:
filename : string
Name of the hdf5 database to open.
mode : 'a', 'r'
File mode : 'a': append, 'r': read-only. |
18,868 | def remove_negativescores_nodes(self):
gravity_items = self.parser.css_select(self.top_node, "*[gravityScore]")
for item in gravity_items:
score = self.parser.getAttribute(item, )
score = int(score, 0)
if score < 1:
item.getparent().remove(item) | \
if there are elements inside our top node
that have a negative gravity score,
let's give em the boot |
18,869 | def _parse_01(ofiles, individual=False):
cols = []
dats = []
for ofile in ofiles:
with open(ofile) as infile:
dat = infile.read()
lastbits = dat.split(".mcmc.txt\n\n")[1:]
results = lastbits[0].split("\n\n")[0].split()
shape = (((len(results) - 3) / 4), 4)
dat = np.array(results[3:]).reshape(shape)
cols.append(dat[:, 3].astype(float))
if not individual:
cols = np.array(cols)
cols = cols.sum(axis=0) / len(ofiles)
dat[:, 3] = cols.astype(str)
df = pd.DataFrame(dat[:, 1:])
df.columns = ["delim", "prior", "posterior"]
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
df["nspecies"] = nspecies
return df
else:
res = []
for i in xrange(len(cols)):
x = dat
x[:, 3] = cols[i].astype(str)
x = pd.DataFrame(x[:, 1:])
x.columns = [, , ]
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
x["nspecies"] = nspecies
res.append(x)
return res | a subfunction for summarizing results |
18,870 | def post(self, request, *args, **kwargs):
current_timestamp = "%.0f" % time.time()
user_id_str = u"{0}".format(request.user.id)
token = generate_token(settings.CENTRIFUGE_SECRET, user_id_str, "{0}".format(current_timestamp), info="")
participant = Participant.objects.get(id=request.user.id)
channels = []
for thread in Thread.managers.get_threads_where_participant_is_active(participant_id=participant.id):
channels.append(
build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, thread.id, thread.participants.all())
)
threads_channel = build_channel(settings.CENTRIFUGO_THREAD_NAMESPACE, request.user.id, [request.user.id])
channels.append(threads_channel)
to_return = {
: user_id_str,
: current_timestamp,
: token,
: "{0}connection/".format(settings.CENTRIFUGE_ADDRESS),
: channels,
: settings.DEBUG,
}
return HttpResponse(json.dumps(to_return), content_type=) | Returns a token identifying the user in Centrifugo. |
18,871 | def _unique_by_email(users_and_watches):
def ensure_user_has_email(user, cluster_email):
if not getattr(user, , ):
user = EmailUser(cluster_email)
return user
cluster_email =
favorite_user = None
watches = []
for u, w in users_and_watches:
row_email = u.email or w[0].email
if cluster_email.lower() != row_email.lower():
if cluster_email != :
yield (ensure_user_has_email(favorite_user, cluster_email),
watches)
favorite_user, watches = u, []
cluster_email = row_email
elif ((not favorite_user.email or not u.is_authenticated) and
u.email and u.is_authenticated):
favorite_user = u
watches.extend(w)
if favorite_user is not None:
yield ensure_user_has_email(favorite_user, cluster_email), watches | Given a sequence of (User/EmailUser, [Watch, ...]) pairs
clustered by email address (which is never ''), yield from each
cluster a single pair like this::
(User/EmailUser, [Watch, Watch, ...]).
The User/Email is that of...
(1) the first incoming pair where the User has an email and is not
anonymous, or, if there isn't such a user...
(2) the first pair.
The list of Watches consists of all those found in the cluster.
Compares email addresses case-insensitively. |
18,872 | def adjustMask(self):
if self.currentMode() == XPopupWidget.Mode.Dialog:
self.clearMask()
return
path = self.borderPath()
bitmap = QBitmap(self.width(), self.height())
bitmap.fill(QColor())
with XPainter(bitmap) as painter:
painter.setRenderHint(XPainter.Antialiasing)
pen = QPen(QColor())
pen.setWidthF(0.75)
painter.setPen(pen)
painter.setBrush(QColor())
painter.drawPath(path)
self.setMask(bitmap) | Updates the alpha mask for this popup widget. |
18,873 | def create_cherry_pick(self, cherry_pick_to_create, project, repository_id):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if repository_id is not None:
route_values[] = self._serialize.url(, repository_id, )
content = self._serialize.body(cherry_pick_to_create, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, response) | CreateCherryPick.
[Preview API] Cherry pick a specific commit or commits that are associated to a pull request into a new branch.
:param :class:`<GitAsyncRefOperationParameters> <azure.devops.v5_0.git.models.GitAsyncRefOperationParameters>` cherry_pick_to_create:
:param str project: Project ID or project name
:param str repository_id: ID of the repository.
:rtype: :class:`<GitCherryPick> <azure.devops.v5_0.git.models.GitCherryPick>` |
18,874 | def delete(self, upload_id):
return super(UploadsProxy, self).delete(upload_id, file_upload=True) | Deletes an upload by ID. |
18,875 | def read_data(self, size):
result = self.dev.bulkRead(0x81, size, timeout=1200)
if not result or len(result) < size:
raise IOError()
if not isinstance(result[0], int):
result = map(ord, result)
return list(result) | Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int) |
18,876 | def dereference_object(object_type, object_uuid, status):
from .models import PersistentIdentifier
pids = PersistentIdentifier.query.filter_by(
object_type=object_type, object_uuid=object_uuid
)
if status:
pids = pids.filter_by(status=status)
for found_pid in pids.all():
click.echo(
.format(found_pid)
) | Show linked persistent identifier(s). |
18,877 | def descriptionHtml(self):
if self.cls is None:
return None
elif hasattr(self.cls, ):
return self.cls.descriptionHtml()
else:
return | HTML help describing the class. For use in the detail editor. |
18,878 | def search_process_log(self, pid, filter={}, start=0, limit=1000):
genericmy product paramtotallist
pid = self._get_pid(pid)
request_data = {: start, : limit, : filter}
return self._call_rest_api(, + pid + , data=request_data,
error=) | search_process_log(self, pid, filter={}, start=0, limit=1000)
Search in process logs
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in process log data)
:return: Count of records found and list of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my product param'}
search_result = opereto_client.search_globals(filter=filter)
if search_result['total'] > 0
print(search_result['list']) |
18,879 | def get_player(self, *tags: crtag, **params: keys):
url = self.api.PLAYER + + .join(tags)
return self._get_model(url, FullPlayer, **params) | Get a player information
Parameters
----------
\*tags: str
Valid player tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout |
18,880 | def string_chain(text, filters):
if filters is None:
return text
for filter_function in filters:
text = filter_function(text)
return text | Chain several filters after each other, applies the filter on the entire string
:param text: String to format
:param filters: Sequence of filters to apply on String
:return: The formatted String |
18,881 | def read_file(self):
file_obj = open(self.file, )
content = file_obj.read()
file_obj.close()
if content:
content = json.loads(content)
return content
else:
return {} | Open the file and assiging the permission to read/write and
return the content in json formate.
Return : json data |
18,882 | def get_default_config(self):
default_config = super(FlumeCollector, self).get_default_config()
default_config[] =
default_config[] =
default_config[] = 41414
default_config[] =
return default_config | Returns the default collector settings |
18,883 | def get_application(*args):
opts_tuple = args
def wsgi_app(environ, start_response):
root, _, conf = opts_tuple or bootstrap_app()
cherrypy.config.update({: })
cherrypy.tree.mount(root, , conf)
return cherrypy.tree(environ, start_response)
return wsgi_app | Returns a WSGI application function. If you supply the WSGI app and config
it will use that, otherwise it will try to obtain them from a local Salt
installation |
18,884 | def loop_exit_label(self, loop_type):
for i in range(len(self.LOOPS) - 1, -1, -1):
if loop_type == self.LOOPS[i][0]:
return self.LOOPS[i][1]
raise InvalidLoopError(loop_type) | Returns the label for the given loop type which
exits the loop. loop_type must be one of 'FOR', 'WHILE', 'DO' |
18,885 | def process_create_ex(self, executable, arguments, environment_changes, flags, timeout_ms, priority, affinity):
if not isinstance(executable, basestring):
raise TypeError("executable can only be an instance of type basestring")
if not isinstance(arguments, list):
raise TypeError("arguments can only be an instance of type list")
for a in arguments[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(environment_changes, list):
raise TypeError("environment_changes can only be an instance of type list")
for a in environment_changes[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(flags, list):
raise TypeError("flags can only be an instance of type list")
for a in flags[:10]:
if not isinstance(a, ProcessCreateFlag):
raise TypeError(
"array can only contain objects of type ProcessCreateFlag")
if not isinstance(timeout_ms, baseinteger):
raise TypeError("timeout_ms can only be an instance of type baseinteger")
if not isinstance(priority, ProcessPriority):
raise TypeError("priority can only be an instance of type ProcessPriority")
if not isinstance(affinity, list):
raise TypeError("affinity can only be an instance of type list")
for a in affinity[:10]:
if not isinstance(a, baseinteger):
raise TypeError(
"array can only contain objects of type baseinteger")
guest_process = self._call("processCreateEx",
in_p=[executable, arguments, environment_changes, flags, timeout_ms, priority, affinity])
guest_process = IGuestProcess(guest_process)
return guest_process | Creates a new process running in the guest with the extended options
for setting the process priority and affinity.
See :py:func:`IGuestSession.process_create` for more information.
in executable of type str
Full path to the file to execute in the guest. The file has to
exists in the guest VM with executable right to the session user in
order to succeed. If empty/null, the first entry in the
@a arguments array will be used instead (i.e. argv[0]).
in arguments of type str
Array of arguments passed to the new process.
Starting with VirtualBox 5.0 this array starts with argument 0
instead of argument 1 as in previous versions. Whether the zeroth
argument can be passed to the guest depends on the VBoxService
version running there. If you depend on this, check that the
:py:func:`IGuestSession.protocol_version` is 3 or higher.
in environment_changes of type str
Set of environment changes to complement
:py:func:`IGuestSession.environment_changes` . Takes precedence
over the session ones. The changes are in putenv format, i.e.
"VAR=VALUE" for setting and "VAR" for unsetting.
The changes are applied to the base environment of the impersonated
guest user (:py:func:`IGuestSession.environment_base` ) when
creating the process. (This is done on the guest side of things in
order to be compatible with older guest additions. That is one of
the motivations for not passing in the whole environment here.)
in flags of type :class:`ProcessCreateFlag`
Process creation flags, see :py:class:`ProcessCreateFlag` for
detailed description of available flags.
in timeout_ms of type int
Timeout (in ms) for limiting the guest process' running time.
Pass 0 for an infinite timeout. On timeout the guest process will be
killed and its status will be put to an appropriate value. See
:py:class:`ProcessStatus` for more information.
in priority of type :class:`ProcessPriority`
Process priority to use for execution, see :py:class:`ProcessPriority`
for available priority levels.
This is silently ignored if not supported by guest additions.
in affinity of type int
Processor affinity to set for the new process. This is a list of
guest CPU numbers the process is allowed to run on.
This is silently ignored if the guest does not support setting the
affinity of processes, or if the guest additions does not implemet
this feature.
return guest_process of type :class:`IGuestProcess`
Guest process object of the newly created process. |
18,886 | def search_dashboard_for_facets(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.search_dashboard_for_facets_with_http_info(**kwargs)
else:
(data) = self.search_dashboard_for_facets_with_http_info(**kwargs)
return data | Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread. |
18,887 | def agg(self, func, *fields, **name):
if name:
if len(name) > 1 or not in name:
raise TypeError("Unknown keyword args passed into `agg`: %s"
% name)
name = name.get()
if not isinstance(name, basestring):
raise TypeError("Column names must be strings, not `%s`"
% type(name))
else:
name = name
elif func.__name__ == :
name = "lambda%04d" % self.__lambda_num
self.__lambda_num += 1
name += "(%s)" % .join(fields)
else:
name = func.__name__
name += "(%s)" % .join(fields)
aggregated_column = []
if len(fields) > 1:
for groupkey in self.__grouptable[]:
agg_data = [tuple([row[field] for field in fields])
for row in self.__key_to_group_map[groupkey]]
aggregated_column.append(func(agg_data))
elif len(fields) == 1:
field = fields[0]
for groupkey in self.__grouptable[]:
agg_data = [row[field]
for row in self.__key_to_group_map[groupkey]]
aggregated_column.append(func(agg_data))
else:
for groupkey in self.__grouptable[]:
agg_data = self.__key_to_group_map[groupkey]
aggregated_column.append(func(agg_data))
self.__grouptable[name] = aggregated_column
return self | Calls the aggregation function `func` on each group in the GroubyTable,
and leaves the results in a new column with the name of the aggregation
function.
Call `.agg` with `name='desired_column_name' to choose a column
name for this aggregation. |
18,888 | def write_serializable_array(self, array):
if array is None:
self.write_byte(0)
else:
self.write_var_int(len(array))
for item in array:
item.Serialize(self) | Write an array of serializable objects to the stream.
Args:
array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin |
18,889 | def _read_structure_attributes(f):
line =
variogram_info = {}
while "end structure" not in line:
line = f.readline()
if line == :
raise Exception("EOF while reading structure")
line = line.strip().lower().split()
if line[0].startswith():
continue
if line[0] == "nugget":
nugget = float(line[1])
elif line[0] == "transform":
transform = line[1]
elif line[0] == "numvariogram":
numvariograms = int(line[1])
elif line[0] == "variogram":
variogram_info[line[1]] = float(line[2])
elif line[0] == "end":
break
elif line[0] == "mean":
warnings.warn(" attribute not supported, skipping",PyemuWarning)
else:
raise Exception("unrecognized line in structure definition:{0}".\
format(line[0]))
assert numvariograms == len(variogram_info)
return nugget,transform,variogram_info | function to read information from a PEST-style structure file
Parameters
----------
f : (file handle)
file handle open for reading
Returns
-------
nugget : float
the GeoStruct nugget
transform : str
the GeoStruct transformation
variogram_info : dict
dictionary of structure-level variogram information |
18,890 | def create_metric(metric_type, metric_id, data):
if not isinstance(data, list):
data = [data]
return { : metric_type,: metric_id, : data } | Create Hawkular-Metrics' submittable structure.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param data: A datapoint or a list of datapoints created with create_datapoint(value, timestamp, tags) |
18,891 | def unseal(self, data, return_options=False):
data = self._remove_magic(data)
data = urlsafe_nopadding_b64decode(data)
options = self._read_header(data)
data = self._add_magic(data)
data = self._unsign_data(data, options)
data = self._remove_magic(data)
data = self._remove_header(data, options)
data = self._decrypt_data(data, options)
data = self._decompress_data(data, options)
data = self._unserialize_data(data, options)
if return_options:
return data, options
else:
return data | Unseal data |
18,892 | def open(filepath, edit_local=False):
filepath = os.fspath(filepath)
ds = np.DataSource(None)
if edit_local is False:
tf = tempfile.mkstemp(prefix="", suffix=".wt5")
with _open(tf[1], "w+b") as tff:
with ds.open(str(filepath), "rb") as f:
tff.write(f.read())
filepath = tf[1]
f = h5py.File(filepath)
class_name = f["/"].attrs["class"]
name = f["/"].attrs["name"]
if class_name == "Data":
obj = wt_data.Data(filepath=str(filepath), name=name, edit_local=True)
elif class_name == "Collection":
obj = wt_collection.Collection(filepath=str(filepath), name=name, edit_local=True)
else:
obj = wt_group.Group(filepath=str(filepath), name=name, edit_local=True)
if edit_local is False:
setattr(obj, "_tmpfile", tf)
weakref.finalize(obj, obj.close)
return obj | Open any wt5 file, returning the top-level object (data or collection).
Parameters
----------
filepath : path-like
Path to file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
edit_local : boolean (optional)
If True, the file itself will be opened for editing. Otherwise, a
copy will be created. Default is False.
Returns
-------
WrightTools Collection or Data
Root-level object in file. |
18,893 | def info_community(self,teamid):
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",: +self.domain+,"User-Agent": user_agent}
req = self.session.get(+self.domain++teamid,headers=headers).content
soup = BeautifulSoup(req)
info = []
for i in soup.find(,cellpadding=2).find_all()[1:]:
info.append(%(i.find().text,i.find()[].split()[1],i.a.text,i.find_all()[2].text,i.find_all()[3].text))
return info | Get comunity info using a ID |
18,894 | def validate_image_size(image):
config = get_app_config()
valid_max_image_size_in_bytes = config.valid_max_image_size * 1024
if config and not image.size <= valid_max_image_size_in_bytes:
raise ValidationError(
_("The logo image file size must be less than or equal to %s KB.") % config.valid_max_image_size) | Validate that a particular image size. |
18,895 | def parse_manifest(self, manifest_xml):
manifest = dict()
try:
mdata = xmltodict.parse(manifest_xml)[][]
for module in mdata:
mod = dict()
mod[] = module[]
mod[] = module[]
mod[] = module[]
mod[] = module[]
mod[] = module[]
mod[] = module[]
mod[] = module[][]
mod[] = module[][]
mod[] = module[]
manifest[mod[]] = mod
except Exception as e:
raise
return manifest | Parse manifest xml file
:type manifest_xml: str
:param manifest_xml: raw xml content of manifest file |
18,896 | def _initialize_plugin_system(self) -> None:
self._preloop_hooks = []
self._postloop_hooks = []
self._postparsing_hooks = []
self._precmd_hooks = []
self._postcmd_hooks = []
self._cmdfinalization_hooks = [] | Initialize the plugin system |
18,897 | def delete_ip_address(context, id):
LOG.info("delete_ip_address %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
ip_address = db_api.ip_address_find(
context, id=id, scope=db_api.ONE)
if not ip_address or ip_address.deallocated:
raise q_exc.IpAddressNotFound(addr_id=id)
iptype = ip_address.address_type
if iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip:
raise n_exc.BadRequest(
resource="ip_addresses",
msg="Fixed ips cannot be updated using this interface.")
if ip_address.has_any_shared_owner():
raise q_exc.PortRequiresDisassociation()
db_api.update_port_associations_for_ip(context, [], ip_address)
ipam_driver.deallocate_ip_address(context, ip_address) | Delete an ip address.
: param context: neutron api request context
: param id: UUID representing the ip address to delete. |
18,898 | def renders(self, template_content, context=None, at_paths=None,
at_encoding=anytemplate.compat.ENCODING, **kwargs):
kwargs = self.filter_options(kwargs, self.render_valid_options())
paths = anytemplate.utils.mk_template_paths(None, at_paths)
if context is None:
context = {}
LOGGER.debug("Render template %s... %s context, options=%s",
template_content[:10],
"without" if context is None else "with a",
str(kwargs))
return self.renders_impl(template_content, context, at_paths=paths,
at_encoding=at_encoding, **kwargs) | :param template_content: Template content
:param context: A dict or dict-like object to instantiate given
template file or None
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
:return: Rendered string |
18,899 | def p_version_def(t):
global name_dict
id = t[2]
value = t[8]
lineno = t.lineno(1)
if id_unique(id, , lineno):
name_dict[id] = const_info(id, value, lineno) | version_def : VERSION ID LBRACE procedure_def procedure_def_list RBRACE EQUALS constant SEMI |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.