Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
383,500 | def to_html(
self,
suppress_newlines=False,
in_div_flag=False):
if in_div_flag or self.in_div_flag:
message = % self.html_attributes()
else:
message =
last_was_text = False
for m in self.message:
if last_was_text and not isinstance(m, Text):
message +=
message += m.to_html()
if isinstance(m, Text):
last_was_text = True
else:
message +=
last_was_text = False
if in_div_flag:
message +=
if suppress_newlines:
return message.replace(, )
return message | Render a MessageElement as html.
:param suppress_newlines: Whether to suppress any newlines in the
output. If this option is enabled, the entire html output will be
rendered on a single line.
:type suppress_newlines: bool
:param in_div_flag: Whether the message should be placed into an outer
div element.
:type in_div_flag: bool
:returns: HTML representation of the message.
:rtype: str |
383,501 | def xyz2lonlat(x, y, z):
lon = xu.rad2deg(xu.arctan2(y, x))
lat = xu.rad2deg(xu.arctan2(z, xu.sqrt(x**2 + y**2)))
return lon, lat | Convert cartesian to lon lat. |
383,502 | def authenticate(self,
username=None, password=None,
actions=None, response=None,
authorization=None):
if response is None:
with warnings.catch_warnings():
_ignore_warnings(self)
response = self._sessions[0].get(self._base_url, verify=self._tlsverify)
if response.ok:
return None
if response.status_code != requests.codes.unauthorized:
raise exceptions.DXFUnexpectedStatusCodeError(response.status_code,
requests.codes.unauthorized)
if self._insecure:
raise exceptions.DXFAuthInsecureError()
parsed = www_authenticate.parse(response.headers[])
if username is not None and password is not None:
headers = {
: + base64.b64encode(_to_bytes_2and3(username + + password)).decode()
}
elif authorization is not None:
headers = {
: authorization
}
else:
headers = {}
if in parsed:
info = parsed[]
if actions and self._repo:
scope = + self._repo + + .join(actions)
elif in info:
scope = info[]
else:
scope =
url_parts = list(urlparse.urlparse(info[]))
query = urlparse.parse_qs(url_parts[4])
query.update({
: info[],
: scope
})
url_parts[4] = urlencode(query, True)
url_parts[0] =
if self._auth_host:
url_parts[1] = self._auth_host
auth_url = urlparse.urlunparse(url_parts)
with warnings.catch_warnings():
_ignore_warnings(self)
r = self._sessions[0].get(auth_url, headers=headers, verify=self._tlsverify)
_raise_for_status(r)
rjson = r.json()
self.token = rjson[] if in rjson else rjson[]
return self._token
self._headers = headers
return None | Authenticate to the registry using a username and password,
an authorization header or otherwise as the anonymous user.
:param username: User name to authenticate as.
:type username: str
:param password: User's password.
:type password: str
:param actions: If you know which types of operation you need to make on the registry, specify them here. Valid actions are ``pull``, ``push`` and ``*``.
:type actions: list
:param response: When the ``auth`` function you passed to :class:`DXFBase`'s constructor is called, it is passed a HTTP response object. Pass it back to :meth:`authenticate` to have it automatically detect which actions are required.
:type response: requests.Response
:param authorization: ``Authorization`` header value.
:type authorization: str
:rtype: str
:returns: Authentication token, if the registry supports bearer tokens. Otherwise ``None``, and HTTP Basic auth is used (if the registry requires authentication). |
383,503 | def dataReceived(self, data):
try:
address = self.guid
data = json.loads(data)
threads.deferToThread(send_signal, self.dispatcher, data)
if in data:
return self.dispatcher.subscribe(self.transport, data)
if in data:
address = data[]
else:
address = self.guid
self.dispatcher.send(address, data)
except Exception as e:
raise
self.dispatcher.send(
self.guid,
{: data, : str(e)}
) | Takes "data" which we assume is json encoded
If data has a subject_id attribute, we pass that to the dispatcher
as the subject_id so it will get carried through into any
return communications and be identifiable to the client
falls back to just passing the message along... |
383,504 | def stem(self, word, alternate_vowels=False):
word = normalize(, word.lower())
word = word.replace(, )
if len(word) > 2:
for i in range(2, len(word)):
if word[i] in self._vowels and word[i - 2] in self._vowels:
if word[i - 1] == :
word = word[: i - 1] + + word[i:]
elif word[i - 1] == :
word = word[: i - 1] + + word[i:]
if alternate_vowels:
word = word.replace(, )
word = word.replace(, )
word = word.replace(, )
word = word.replace(, )
word = word.replace(, )
r1_start = max(3, self._sb_r1(word))
r2_start = self._sb_r2(word)
niss_flag = False
if word[-3:] == :
if len(word[r1_start:]) >= 3:
word = word[:-3]
elif word[-2:] == :
if len(word[r1_start:]) >= 2:
word = word[:-2]
elif word[-2:] == :
if len(word[r1_start:]) >= 2:
word = word[:-2]
elif word[-2:] == :
if len(word[r1_start:]) >= 2:
word = word[:-2]
niss_flag = True
elif word[-2:] == :
if len(word[r1_start:]) >= 2:
word = word[:-2]
niss_flag = True
elif word[-1:] == :
if len(word[r1_start:]) >= 1:
word = word[:-1]
niss_flag = True
elif word[-1:] == :
if (
len(word[r1_start:]) >= 1
and len(word) >= 2
and word[-2] in self._s_endings
):
word = word[:-1]
if niss_flag and word[-4:] == :
word = word[:-1]
if word[-3:] == :
if len(word[r1_start:]) >= 3:
word = word[:-3]
elif word[-2:] == :
if len(word[r1_start:]) >= 2:
word = word[:-2]
elif word[-2:] == :
if len(word[r1_start:]) >= 2:
word = word[:-2]
elif word[-2:] == :
if (
len(word[r1_start:]) >= 2
and len(word) >= 6
and word[-3] in self._st_endings
):
word = word[:-2]
if word[-4:] == :
if len(word[r2_start:]) >= 4 and word[-5] != :
word = word[:-4]
elif word[-4:] in {, }:
if len(word[r2_start:]) >= 4:
word = word[:-4]
if word[-2:] in {, } and len(word[r1_start:]) >= 2:
word = word[:-2]
elif word[-4:] == :
if len(word[r2_start:]) >= 4:
word = word[:-4]
if word[-4:] == and len(word[r2_start:]) >= 4:
word = word[:-4]
elif word[-2:] == and len(word[r2_start:]) >= 2:
word = word[:-2]
elif word[-3:] in {, }:
if len(word[r2_start:]) >= 3:
word = word[:-3]
if (
word[-2:] ==
and len(word[r2_start:]) >= 2
and word[-3] !=
):
word = word[:-2]
elif word[-2:] in {, }:
if len(word[r2_start:]) >= 2 and word[-3] != :
word = word[:-2]
for i in range(0, len(word)):
if word[i] == :
word = word[:i] + + word[i + 1 :]
elif word[i] == :
word = word[:i] + + word[i + 1 :]
_umlauts = dict(zip((ord(_) for _ in ), ))
word = word.translate(_umlauts)
return word | Return Snowball German stem.
Parameters
----------
word : str
The word to stem
alternate_vowels : bool
Composes ae as ä, oe as ö, and ue as ü before running the algorithm
Returns
-------
str
Word stem
Examples
--------
>>> stmr = SnowballGerman()
>>> stmr.stem('lesen')
'les'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabi' |
383,505 | def set_shape(self, id, new_shape):
old_shape = self.id_to_shape[id]
old_buffer = self.get_buffer(old_shape)
model, color = old_buffer.get(id)
new_data = self._create_turtle(id, new_shape, model, color)
old_buffer.remove(id)
self.id_to_shape[id] = new_shape
return new_data | Copies the turtle data from the old shape buffer to the new |
383,506 | def _Initialize(self, http, url):
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url | Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self. |
383,507 | def offer(self, item):
try:
self._buffer.put(item, block=False)
if self._consumer_callback is not None:
self._consumer_callback()
return True
except Queue.Full:
Log.debug("%s: Full in offer()" % str(self))
raise Queue.Full | Offer to the buffer
It is a non-blocking operation, and when the buffer is full, it raises Queue.Full exception |
383,508 | def _clean_up_gene_id(geneid, sp, curie_map):
geneid = re.sub(r, , geneid)
geneid = re.sub(r, , geneid)
geneid = re.sub(r, , geneid)
if sp == :
if re.match(r, geneid):
geneid = re.sub(
r,
r, geneid)
if sp == :
if re.match(r, geneid):
geneid = re.sub(
r, r, geneid)
geneid = re.sub(r, , geneid)
geneid = re.sub(r, , geneid)
geneid = re.sub(r, , geneid)
geneid = re.sub(r, , geneid)
geneid = re.sub(r, , geneid)
geneid = re.sub(r, , geneid)
pfx = pfxlcl[0]
if pfx is None or pfx not in curie_map:
geneid = None
return geneid | A series of identifier rewriting to conform with
standard gene identifiers.
:param geneid:
:param sp:
:return: |
383,509 | def do_mumble(self, args):
repetitions = args.repeat or 1
for i in range(min(repetitions, self.maxrepeats)):
output = []
if random.random() < .33:
output.append(random.choice(self.MUMBLE_FIRST))
for word in args.words:
if random.random() < .40:
output.append(random.choice(self.MUMBLES))
output.append(word)
if random.random() < .25:
output.append(random.choice(self.MUMBLE_LAST))
self.poutput(.join(output)) | Mumbles what you tell me to. |
383,510 | def MergeData(self, merge_data, raw_data=None):
self.FlushCache()
if raw_data is None:
raw_data = self.raw_data
for k, v in iteritems(merge_data):
if isinstance(v, dict) and k not in self.type_infos:
if k not in self.valid_contexts:
raise InvalidContextError("Invalid context specified: %s" % k)
context_data = raw_data.setdefault(k, collections.OrderedDict())
self.MergeData(v, context_data)
else:
descriptor = self.type_infos.get(k)
if descriptor is None:
msg = ("Missing config definition for %s. This option is likely "
"deprecated or renamed. Check the release notes." % k)
if flags.FLAGS.disallow_missing_config_definitions:
raise MissingConfigDefinitionError(msg)
if isinstance(v, string_types):
v = v.strip()
if self.initialized and k in self.constants:
raise ConstModificationError(
"Attempting to modify constant value %s" % k)
raw_data[k] = v | Merges data read from a config file into the current config. |
383,511 | def printDuplicatedTPEDandTFAM(tped, tfam, samples, oldSamples, prefix):
outputTPED = None
try:
outputTPED = open(prefix + ".duplicated_samples.tped", "w")
except IOError:
msg = "%(prefix)s.duplicated_samples.tped: cant write " \
"file" % locals()
raise ProgramError(msg)
for row in newTFAM:
print >>outputTFAM, "\t".join(row)
outputTFAM.close() | Print the TPED and TFAM of the duplicated samples.
:param tped: the ``tped`` containing duplicated samples.
:param tfam: the ``tfam`` containing duplicated samples.
:param samples: the updated position of the samples in the tped containing
only duplicated samples.
:param oldSamples: the original duplicated sample positions.
:param prefix: the prefix of all the files.
:type tped: :py:class:`numpy.array`
:type tfam: :py:class:`numpy.array`
:type samples: dict
:type oldSamples: dict
:type prefix: str
The ``tped`` and ``tfam`` files are written in
``prefix.duplicated_samples.tped`` and ``prefix.duplicated_samples.tfam``,
respectively. |
383,512 | def increment(self, delta=1, text=None):
return self.update(value=min(self.max, self.value + delta), text=text) | Redraw the progress bar, incrementing the value by delta
(default=1) and optionally changing the text. Returns the
ProgressBar's new value. See also .update(). |
383,513 | def _proc_member(self, tarfile):
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile) | Choose the right processing method depending on
the type and call it. |
383,514 | def attach_tracker(self, stanza, tracker=None):
if stanza.xep0184_received is not None:
raise ValueError(
"requesting delivery receipts for delivery receipts is not "
"allowed"
)
if stanza.type_ == aioxmpp.MessageType.ERROR:
raise ValueError(
"requesting delivery receipts for errors is not supported"
)
if tracker is None:
tracker = aioxmpp.tracking.MessageTracker()
stanza.xep0184_request_receipt = True
stanza.autoset_id()
self._bare_jid_maps[stanza.to, stanza.id_] = tracker
return tracker | Return a new tracker or modify one to track the stanza.
:param stanza: Stanza to track.
:type stanza: :class:`aioxmpp.Message`
:param tracker: Existing tracker to attach to.
:type tracker: :class:`.tracking.MessageTracker`
:raises ValueError: if the stanza is of type
:attr:`~aioxmpp.MessageType.ERROR`
:raises ValueError: if the stanza contains a delivery receipt
:return: The message tracker for the stanza.
:rtype: :class:`.tracking.MessageTracker`
The `stanza` gets a :xep:`184` reciept request attached and internal
handlers are set up to update the `tracker` state once a confirmation
is received.
.. warning::
See the :ref:`api-tracking-memory`. |
383,515 | def remove(self, elem):
self._values.remove(elem)
self._message_listener.Modified() | Removes an item from the list. Similar to list.remove(). |
383,516 | def _lstrip_word(word, prefix):
if six.text_type(word).startswith(prefix):
return six.text_type(word)[len(prefix):]
return word | Return a copy of the string after the specified prefix was removed
from the beginning of the string |
383,517 | def stat(filename, retry_params=None, _account_id=None):
common.validate_file_path(filename)
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
status, headers, content = api.head_object(
api_utils._quote_filename(filename))
errors.check_status(status, [200], filename, resp_headers=headers,
body=content)
file_stat = common.GCSFileStat(
filename=filename,
st_size=common.get_stored_content_length(headers),
st_ctime=common.http_time_to_posix(headers.get()),
etag=headers.get(),
content_type=headers.get(),
metadata=common.get_metadata(headers))
return file_stat | Get GCSFileStat of a Google Cloud storage file.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Returns:
a GCSFileStat object containing info about this file.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't. |
383,518 | def get_heat_capacity(self, temperature, structure, n, u, cutoff=1e2):
k = 1.38065e-23
kt = k*temperature
hbar_w = 1.05457e-34*self.omega(structure, n, u)
if hbar_w > kt * cutoff:
return 0.0
c = k * (hbar_w / kt) ** 2
c *= np.exp(hbar_w / kt) / (np.exp(hbar_w / kt) - 1)**2
return c * 6.022e23 | Gets the directional heat capacity for a higher order tensor
expansion as a function of direction and polarization.
Args:
temperature (float): Temperature in kelvin
structure (float): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
cutoff (float): cutoff for scale of kt / (hbar * omega)
if lower than this value, returns 0 |
383,519 | def nodeprep(string, allow_unassigned=False):
chars = list(string)
_nodeprep_do_mapping(chars)
do_normalization(chars)
check_prohibited_output(
chars,
(
stringprep.in_table_c11,
stringprep.in_table_c12,
stringprep.in_table_c21,
stringprep.in_table_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9,
lambda x: x in _nodeprep_prohibited
))
check_bidi(chars)
if not allow_unassigned:
check_unassigned(
chars,
(
stringprep.in_table_a1,
)
)
return "".join(chars) | Process the given `string` using the Nodeprep (`RFC 6122`_) profile. In the
error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError` is
raised. |
383,520 | def parse(self):
token_type = self.current_token.type.lower()
handler = getattr(self, f, None)
if handler is None:
raise self.error(f)
return handler() | Parse and return an nbt literal from the token stream. |
383,521 | def get_queue_info(self, instance, cursor):
cursor.execute(self.QUEUE_INFO_STATEMENT)
for queue_name, ticker_lag, ev_per_sec in cursor:
yield queue_name, {
: ticker_lag,
: ev_per_sec,
} | Collects metrics for all queues on the connected database. |
383,522 | def _check_valid_translation(self, translation):
if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number):
raise ValueError()
t = translation.squeeze()
if len(t.shape) != 1 or t.shape[0] != 3:
raise ValueError() | Checks that the translation vector is valid. |
383,523 | def merge_requests(self, **kwargs):
path = % (self.manager.path, self.get_id())
data_list = self.manager.gitlab.http_list(path, as_list=False,
**kwargs)
manager = ProjectMergeRequestManager(self.manager.gitlab,
parent=self.manager._parent)
return RESTObjectList(manager, ProjectMergeRequest, data_list) | List the merge requests related to this milestone.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of merge requests |
383,524 | def retrieve(self, cursor):
assert isinstance(cursor, dict), "expected cursor type "
query = self.get_query()
assert isinstance(query, peewee.Query)
query
return query.get(**cursor) | Retrieve items from query |
383,525 | def listurl_get(self, q, **kwargs):
request = TOPRequest()
request[] = q
for k, v in kwargs.iteritems():
if k not in (, , ) and v==None: continue
request[k] = v
self.create(self.execute(request), fields=[], models={:TaobaokeItem})
return self.taobaoke_item | taobao.taobaoke.listurl.get 淘宝客关键词搜索URL
淘宝客关键词搜索URL |
383,526 | def request(self, location, fragment_enc=False):
_l = as_unicode(location)
_qp = as_unicode(self.to_urlencoded())
if fragment_enc:
return "%s
else:
if "?" in location:
return "%s&%s" % (_l, _qp)
else:
return "%s?%s" % (_l, _qp) | Given a URL this method will add a fragment, a query part or extend
a query part if it already exists with the information in this instance.
:param location: A URL
:param fragment_enc: Whether the information should be placed in a
fragment (True) or in a query part (False)
:return: The extended URL |
383,527 | def fetch(self):
params = values.of({})
payload = self._version.fetch(
,
self._uri,
params=params,
)
return VariableInstance(
self._version,
payload,
service_sid=self._solution[],
environment_sid=self._solution[],
sid=self._solution[],
) | Fetch a VariableInstance
:returns: Fetched VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance |
383,528 | def size(config, accounts=(), day=None, group=None, human=True, region=None):
config = validate.callback(config)
destination = config.get()
client = boto3.Session().client()
day = parse(day)
def export_size(client, account):
paginator = client.get_paginator()
count = 0
size = 0
session = get_session(account[], region)
account_id = session.client().get_caller_identity()[]
prefix = destination.get(, ).rstrip() + % account_id
prefix = "%s/%s/%s" % (prefix, group, day.strftime("%Y/%m/%d"))
account[] = account_id
for page in paginator.paginate(
Bucket=destination[],
Prefix=prefix):
for k in page.get(, ()):
size += k[]
count += 1
return (count, size)
total_size = 0
accounts_report = []
logging.getLogger().setLevel(logging.ERROR)
with ThreadPoolExecutor(max_workers=16) as w:
futures = {}
for account in config.get():
if accounts and account[] not in accounts:
continue
futures[w.submit(export_size, client, account)] = account
for f in as_completed(futures):
account = futures[f]
count, size = f.result()
account.pop()
account.pop()
total_size += size
if human:
account[] = GetHumanSize(size)
else:
account[] = size
account[] = count
accounts_report.append(account)
accounts_report.sort(key=operator.itemgetter(), reverse=True)
print(tabulate(accounts_report, headers=))
log.info("total size:%s", GetHumanSize(total_size)) | size of exported records for a given day. |
383,529 | def request(self, url, method=, params=None, data=None,
expected_response_code=200):
url = "{0}/{1}".format(self._baseurl, url)
if params is None:
params = {}
auth = {
: self._username,
: self._password
}
params.update(auth)
if data is not None and not isinstance(data, str):
data = json.dumps(data)
retry = True
_try = 0
while retry:
try:
response = session.request(
method=method,
url=url,
params=params,
data=data,
headers=self._headers,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
_try += 1
if self._retries != 0:
retry = _try < self._retries
else:
raise requests.exceptions.ConnectionError
if response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code) | Make a http request to API. |
383,530 | def get_parent_tag(mention):
span = _to_span(mention)
i = _get_node(span.sentence)
return str(i.getparent().tag) if i.getparent() is not None else None | Return the HTML tag of the Mention's parent.
These may be tags such as 'p', 'h2', 'table', 'div', etc.
If a candidate is passed in, only the tag of its first Mention is returned.
:param mention: The Mention to evaluate
:rtype: string |
383,531 | def _item_list(profile=None, **connection_args):
*
kstone = auth(profile, **connection_args)
ret = []
for item in kstone.items.list():
ret.append(item.__dict__)
return ret | Template for writing list functions
Return a list of available items (keystone items-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.item_list |
383,532 | def endswith(self, name: str) -> List[str]:
return sorted(keyword for keyword in self if keyword.endswith(name)) | Return a list of all keywords ending with the given string.
>>> from hydpy.core.devicetools import Keywords
>>> keywords = Keywords('first_keyword', 'second_keyword',
... 'keyword_3', 'keyword_4',
... 'keyboard')
>>> keywords.endswith('keyword')
['first_keyword', 'second_keyword'] |
383,533 | def parse_clubs(self, clubs_page):
character_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u, {: }).find(u).find(u).find_all(u, recursive=False)[1]
try:
clubs_header = second_col.find(u, text=u)
character_info[u] = []
if clubs_header:
curr_elt = clubs_header.nextSibling
while curr_elt is not None:
if curr_elt.name == u:
link = curr_elt.find(u)
club_id = int(re.match(r, link.get(u)).group(u))
num_members = int(re.match(r, curr_elt.find(u).text).group(u))
character_info[u].append(self.session.club(club_id).set({: link.text, : num_members}))
curr_elt = curr_elt.nextSibling
except:
if not self.session.suppress_parse_exceptions:
raise
return character_info | Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes. |
383,534 | def deserialize(self,
node: SchemaNode,
cstruct: Union[str, ColanderNullType]) \
-> Optional[Pendulum]:
if not cstruct:
return colander.null
try:
result = coerce_to_pendulum(cstruct,
assume_local=self.use_local_tz)
except (ValueError, ParserError) as e:
raise Invalid(node, "Invalid date/time: value={!r}, error="
"{!r}".format(cstruct, e))
return result | Deserializes string representation to Python object. |
383,535 | def content_recommendations(access_token, content_item_id):
headers = {: + str(access_token)}
recommendations_url =\
construct_content_recommendations_url(enrichment_url, content_item_id)
request = requests.get(recommendations_url, headers=headers)
if request.status_code == 200:
recommendations = request.json()
return recommendations
return {: request.status_code, "message": request.text} | Name: content_recommendations
Parameters: access_token, content_item_id
Return: dictionary |
383,536 | def restore(self, value, context=None):
value = super(DatetimeWithTimezoneColumn, self).restore(value, context)
if value in (, ):
value = datetime.date.now()
if isinstance(value, datetime.datetime):
tz = pytz.timezone(context.timezone)
if tz is not None:
if value.tzinfo is None:
base_tz = pytz.timezone(orb.system.settings().server_timezone)
if base_tz == tz:
value = tz.fromutc(value)
else:
value = base_tz.fromutc(value).astimezone(tz)
else:
value = value.astimezone(tz)
else:
log.warning()
return value | Restores the value from a table cache for usage.
:param value | <variant>
context | <orb.Context> || None |
383,537 | def v_unique_name_children(ctx, stmt):
def sort_pos(p1, p2):
if p1.line < p2.line:
return (p1,p2)
else:
return (p2,p1)
dict = {}
chs = stmt.i_children
def check(c):
key = (c.i_module.i_modulename, c.arg)
if key in dict:
dup = dict[key]
(minpos, maxpos) = sort_pos(c.pos, dup.pos)
pos = chk_uses_pos(c, maxpos)
err_add(ctx.errors, pos,
, (stmt.arg, stmt.pos, c.arg, minpos))
else:
dict[key] = c
if c.keyword == :
for case in c.i_children:
for cc in case.i_children:
check(cc)
for c in chs:
check(c) | Make sure that each child of stmt has a unique name |
383,538 | def print_partlist(input, timeout=20, showgui=False):
print raw_partlist(input=input, timeout=timeout, showgui=showgui) | print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None |
383,539 | def c_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07,
o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None,
relabel=True):
if isinstance(other, Residue):
other = Polypeptide([other])
if not isinstance(other, Polypeptide):
raise TypeError(
)
if abs(omega) >= 90:
peptide_conformation =
else:
peptide_conformation =
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation][]
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation][]
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths[]
r1 = self[-1]
r1_ca = r1[]._vector
r1_c = r1[]._vector
r1_o = r1[]._vector
p1 = r1_o[:]
axis = numpy.cross((r1_ca - r1_c), (r1_o - r1_c))
q = Quaternion.angle_and_axis(angle=o_c_n_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_c)
p1 = r1_c + (c_n_length * unit_vector(p1 - r1_c))
measured_psi = dihedral(r1[], r1[], r1[], p1)
q = Quaternion.angle_and_axis(
angle=(psi - measured_psi), axis=(r1_c - r1_ca))
p1 = q.rotate_vector(v=p1, point=r1_c)
r1[]._vector = q.rotate_vector(v=r1_o, point=r1_c)
other.translate(vector=(p1 - other[0][]._vector))
v1 = r1_c - other[0][]._vector
v2 = other[0][]._vector - other[0][]._vector
measured_c_n_ca = angle_between_vectors(v1, v2)
axis = numpy.cross(v1, v2)
other.rotate(angle=(c_n_ca_angle - measured_c_n_ca),
axis=axis, point=other[0][]._vector)
measured_omega = dihedral(
r1[], r1[], other[0][], other[0][])
other.rotate(angle=(omega - measured_omega),
axis=(other[0][] - r1[]), point=other[0][]._vector)
measured_phi = dihedral(
r1[], other[0][], other[0][], other[0][])
other.rotate(angle=(phi - measured_phi),
axis=(other[0][] - other[0][]), point=other[0][]._vector)
self.extend(other)
if relabel:
self.relabel_all()
self.tags[] = False
return | Joins other to self at the C-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float, optional
Psi torsion angle (degrees) between final `Residue` of self
and first `Residue` of other.
omega: float, optional
Omega torsion angle (degrees) between final `Residue` of
self and first `Residue` of other.
phi: float, optional
Phi torsion angle (degrees) between final `Residue` of self
and first `Residue` of other.
o_c_n_angle: float or None, optional
Desired angle between O, C (final `Residue` of self) and N
(first `Residue` of other) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None, optional
Desired angle between C (final `Residue` of self) and N, CA
(first `Residue` of other) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_length: float or None, optional
Desired peptide bond length between final `Residue` of self
and first `Residue` of other. If `None`, default value is taken
from `ideal_backbone_bond_lengths`.
relabel: bool, optional
If `True`, `relabel_all` is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a Polypeptide. |
383,540 | def initializeSessionAsBob(sessionState, sessionVersion, parameters):
sessionState.setSessionVersion(sessionVersion)
sessionState.setRemoteIdentityKey(parameters.getTheirIdentityKey())
sessionState.setLocalIdentityKey(parameters.getOurIdentityKey().getPublicKey())
secrets = bytearray()
if sessionVersion >= 3:
secrets.extend(RatchetingSession.getDiscontinuityBytes())
secrets.extend(Curve.calculateAgreement(parameters.getTheirIdentityKey().getPublicKey(),
parameters.getOurSignedPreKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(),
parameters.getOurIdentityKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(),
parameters.getOurSignedPreKey().getPrivateKey()))
if sessionVersion >= 3 and parameters.getOurOneTimePreKey() is not None:
secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(),
parameters.getOurOneTimePreKey().getPrivateKey()))
derivedKeys = RatchetingSession.calculateDerivedKeys(sessionVersion, secrets)
sessionState.setSenderChain(parameters.getOurRatchetKey(), derivedKeys.getChainKey())
sessionState.setRootKey(derivedKeys.getRootKey()) | :type sessionState: SessionState
:type sessionVersion: int
:type parameters: BobAxolotlParameters |
383,541 | def a_alpha_and_derivatives(self, T, full=True, quick=True):
r
if not full:
return self.a
else:
a_alpha = self.a
da_alpha_dT = 0.0
d2a_alpha_dT2 = 0.0
return a_alpha, da_alpha_dT, d2a_alpha_dT2 | r'''Method to calculate `a_alpha` and its first and second
derivatives for this EOS. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Uses the set values of `a`.
.. math::
a\alpha = a
\frac{d a\alpha}{dT} = 0
\frac{d^2 a\alpha}{dT^2} = 0 |
383,542 | def team(self):
team_dict = self._json_data.get()
if team_dict and team_dict.get():
return self._client.team(id=team_dict.get())
else:
return None | Team to which the scope is assigned. |
383,543 | def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
text = doctrim(docstr)
lines = text.split()
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
shiftlevel = 0
if level < min_level:
shiftlevel = min_level - level
level = min_level
sections = [(lev+shiftlevel, tit) for lev,tit in sections]
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += []
md += _doc2md(lines[head:], shiftlevel)
if more_info:
return (md, sections)
else:
return "\n".join(md) | Convert a docstring to a markdown text. |
383,544 | def getTableMisnestedNodePosition(self):
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore | Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node |
383,545 | def _ParseValueData(self, knowledge_base, value_data):
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail(
.format(
type(value_data), self.ARTIFACT_DEFINITION_NAME))
if not knowledge_base.GetHostname():
hostname_artifact = artifacts.HostnameArtifact(name=value_data)
knowledge_base.SetHostname(hostname_artifact) | Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails. |
383,546 | def remove_request(self, uuid):
for key in list(self._request):
if self._request[key] == uuid:
del self._request[key] | Remove any RPC request(s) using this uuid.
:param str uuid: Rpc Identifier.
:return: |
383,547 | def fix(self, *args, **kwargs):
self.set(**kwargs)
pnames = list(args) + list(kwargs.keys())
for pname in pnames:
if not pname in self._pnames:
self._error("Naughty. is not a valid fit parameter name.")
else:
n = self._pnames.index(pname)
if self.results: value = self.results[0][n]
else: value = self._pguess[n]
if type(self._pnames) is not list: self._pnames = list(self._pnames)
if type(self._pguess) is not list: self._pguess = list(self._pguess)
if type(self._cnames) is not list: self._cnames = list(self._cnames)
if type(self._constants) is not list: self._constants = list(self._constants)
self._pnames.pop(n)
self._pguess.pop(n)
self._cnames.append(pname)
self._constants.append(value)
self._update_functions()
return self | Turns parameters to constants. As arguments, parameters must be strings.
As keyword arguments, they can be set at the same time.
Note this will NOT work when specifying a non-string fit function,
because there is no flexibility in the number of arguments. To get
around this, suppose you've defined a function stuff(x,a,b). Instead
of sending the stuff object to self.set_functions() directly, make it
a string function, e.g.:
self.set_functions('stuff(x,a,b)', 'a,b', stuff=stuff) |
383,548 | def parent_link_record_exists(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError()
return self.dr_entries.pl_record is not None or self.ce_entries.pl_record is not None | Determine whether this Rock Ridge entry has a parent link record (used
for relocating deep directory records).
Parameters:
None:
Returns:
True if this Rock Ridge entry has a parent link record, False otherwise. |
383,549 | def monitor_experiment(args):
if args.time <= 0:
print_error()
exit(1)
while True:
try:
os.system()
update_experiment()
show_experiment_info()
time.sleep(args.time)
except KeyboardInterrupt:
exit(0)
except Exception as exception:
print_error(exception)
exit(1) | monitor the experiment |
383,550 | def to_cartesian(r, theta, theta_units="radians"):
assert theta_units in [, ],\
"kwarg theta_units must specified in radians or degrees"
if theta_units == "degrees":
theta = to_radians(theta)
theta = to_proper_radians(theta)
x = r * cos(theta)
y = r * sin(theta)
return x, y | Converts polar r, theta to cartesian x, y. |
383,551 | def cpp_app_builder(build_context, target):
yprint(build_context.conf, , target)
if target.props.executable and target.props.main:
raise KeyError(
)
if target.props.executable:
if target.props.executable not in target.artifacts.get(AT.app):
target.artifacts.add(AT.app, target.props.executable)
entrypoint = [target.props.executable]
elif target.props.main:
prog = build_context.targets[target.props.main]
binary = list(prog.artifacts.get(AT.binary).keys())[0]
entrypoint = [ + binary]
else:
raise KeyError()
build_app_docker_and_bin(
build_context, target, entrypoint=entrypoint) | Pack a C++ binary as a Docker image with its runtime dependencies.
TODO(itamar): Dynamically analyze the binary and copy shared objects
from its buildenv image to the runtime image, unless they're installed. |
383,552 | def _carregar(self):
if self._convencao is None:
if self._caminho.endswith((, )):
self._convencao = constantes.WINDOWS_STDCALL
else:
self._convencao = constantes.STANDARD_C
if self._convencao == constantes.STANDARD_C:
loader = ctypes.CDLL
elif self._convencao == constantes.WINDOWS_STDCALL:
loader = ctypes.WinDLL
else:
raise ValueError(.format(
self._convencao))
self._libsat = loader(self._caminho) | Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada
ainda não tiver sido definida, será determinada pela extensão do
arquivo da biblioteca.
:raises ValueError: Se a convenção de chamada não puder ser determinada
ou se não for um valor válido. |
383,553 | def fa(a, b, alpha=2):
return np.sum((a > b / alpha) & (a < b * alpha), dtype=float) / len(a) * 100 | Returns the factor of 'alpha' (2 or 5 normally) |
383,554 | def map(self, f_list: List[Callable[[np.ndarray], int]], *, axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:
return self.layers[""].map(f_list, axis, chunksize, selection) | Apply a function along an axis without loading the entire dataset in memory.
Args:
f: Function(s) that takes a numpy ndarray as argument
axis: Axis along which to apply the function (0 = rows, 1 = columns)
chunksize: Number of rows (columns) to load per chunk
selection: Columns (rows) to include
Returns:
numpy.ndarray result of function application
The result is a list of numpy arrays, one per supplied function in f_list.
This is more efficient than repeatedly calling map() one function at a time. |
383,555 | async def close(self):
if self._server:
self._server.close()
self._server = None
self.event().fire() | Stop serving the :attr:`.Server.sockets` and close all
concurrent connections. |
383,556 | def unzip(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
shutil.copytree(self.src_dir, directory) | Write contents of zipfile to directory |
383,557 | def get_all_item_data(items, conn, graph=None, output=, **kwargs):
if kwargs.get():
template = kwargs.pop()
else:
template = "sparqlAllItemDataTemplate.rq"
template_kwargs = {"prefix": NSM.prefix(), "output": output}
if isinstance(items, list):
template_kwargs[] = items
else:
template_kwargs[] = Uri(items).sparql
if kwargs.get("special_union"):
template_kwargs[] = kwargs.get("special_union")
if kwargs.get():
template_kwargs.update(kwargs[].query_kwargs)
if kwargs.get("filters"):
template_kwargs[] = make_sparql_filter(kwargs.get())
sparql = render_without_request(template, **template_kwargs)
return conn.query(sparql, **kwargs) | queries a triplestore with the provided template or uses a generic
template that returns triples 3 edges out in either direction from the
provided item_uri
args:
items: the starting uri or list of uris to the query
conn: the rdfframework triplestore connection to query against
output: 'json' or 'rdf'
kwargs:
template: template to use in place of the generic template
rdfclass: rdfclass the items are based on.
filters: list of filters to apply |
383,558 | def _worker_fn(samples, batchify_fn, dataset=None):
global _worker_dataset
batch = batchify_fn([_worker_dataset[i] for i in samples])
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(batch)
return buf.getvalue() | Function for processing data in worker process. |
383,559 | def is_up(coordinate, current_time):
cfht.date = current_time.iso.replace(, )
cfht.horizon = math.radians(-7)
sun.compute(cfht)
sun_rise = Time(str(sun.rise_time).replace(, ))
sun_set = Time(str(sun.set_time).replace(, ))
if current_time < sun_set or current_time > sun_rise:
return False
fb._ra = coordinate.ra.radian
fb._dec = coordinate.dec.radian
cfht.horizon = math.radians(40)
fb.compute(cfht)
fb_rise_time = Time(str(fb.rise_time).replace(, ))
fb_set_time = Time(str(fb.set_time).replace(, ))
if (current_time > fb_set_time > fb_set_time or
fb_rise_time > current_time > fb_set_time):
return False
return True | Given the position and time determin if the given target is up.
@param coordinate: the J2000 location of the source
@param current_time: The time of the observations
@return: True/False |
383,560 | def create_comment_commit(self, body, commit_id, path, position, pr_id):
comments_url = f"{self.GITHUB_API_URL}/repos/{self.user}/{self.repo}/pulls/{pr_id}/comments"
data = {: body, : commit_id, : path, : position}
return requests.post(comments_url, json=data, headers=self.auth_header) | Posts a comment to a given commit at a certain pull request.
Check https://developer.github.com/v3/pulls/comments/#create-a-comment
param body: str -> Comment text
param commit_id: str -> SHA of the commit
param path: str -> Relative path of the file to be commented
param position: int -> The position in the diff to add a review comment
param pr_id: int -> Github pull request id |
383,561 | def library(self):
if not self._library:
try:
data = self.query(Library.key)
self._library = Library(self, data)
except BadRequest:
data = self.query()
return Library(self, data)
return self._library | Library to browse or search your media. |
383,562 | def is_consonant(note1, note2, include_fourths=True):
return (is_perfect_consonant(note1, note2, include_fourths) or
is_imperfect_consonant(note1, note2)) | Return True if the interval is consonant.
A consonance is a harmony, chord, or interval considered stable, as
opposed to a dissonance.
This function tests whether the given interval is consonant. This
basically means that it checks whether the interval is (or sounds like)
a unison, third, sixth, perfect fourth or perfect fifth.
In classical music the fourth is considered dissonant when used
contrapuntal, which is why you can choose to exclude it. |
383,563 | def unique_items(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))] | Return the unique items from iterable *seq* (in order). |
383,564 | def remote_image_request(self, image_url, params=None):
data = self._init_data(params)
data[] = image_url
response = requests.post(REQUESTS_URL, headers={
: self.auth.authorize(, REQUESTS_URL, data),
: USER_AGENT,
}, data=data)
return self._unwrap_error(response) | Send an image for classification. The imagewill be retrieved from the
URL specified. The params parameter is optional.
On success this method will immediately return a job information. Its
status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as
it usually takes 6-12 seconds for the server to process an image. In
order to retrieve the annotation data, you need to keep updating the job
status using the :py:meth:`cloudsight.API.image_response` method until
the status changes. You may also use the :py:meth:`cloudsight.API.wait`
method which does this automatically.
:param image_url: Image URL.
:param params: Additional parameters for CloudSight API. |
383,565 | def doi_input(doi_string, download=True):
log.debug(.format(doi_string))
doi_string = doi_string[4:]
if in doi_string:
log.debug()
xml_url = plos_doi_to_xmlurl(doi_string)
else:
log.critical()
sys.exit()
return url_input(xml_url, download) | This method accepts a DOI string and attempts to download the appropriate
xml file. If successful, it returns a path to that file. As with all URL
input types, the success of this method depends on supporting per-publisher
conventions and will fail on unsupported publishers |
383,566 | def check_folder_exists(project, path, folder_name):
ResolutionError/
if folder_name is None or path is None:
return False
try:
folder_list = dxpy.api.container_list_folder(project, {"folder": path, "only": "folders"})
except dxpy.exceptions.DXAPIError as e:
if e.name == :
raise ResolutionError(str(e.msg))
else:
raise e
target_folder = path + + folder_name
target_folder, _skip = clean_folder_path(target_folder, )
return target_folder in folder_list[] | :param project: project id
:type project: string
:param path: path to where we should look for the folder in question
:type path: string
:param folder_name: name of the folder in question
:type folder_name: string
:returns: A boolean True or False whether the folder exists at the specified path
:type: boolean
:raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception
This function returns a boolean value that indicates whether a folder of the
specified name exists at the specified path
Note: this function will NOT work on the root folder case, i.e. '/' |
383,567 | def apply(self, builder):
if in self.attributes:
builder.apply_theme(
self.attributes[],
builder.theme_options,
) | Apply the Slide Configuration to a Builder. |
383,568 | def write(self, buf, url):
(store_name, path) = self._split_url(url)
adapter = self._create_adapter(store_name)
with adapter.open(path, ) as f:
f.write(buf.encode()) | Write buffer to storage at a given url |
383,569 | def __extract_tags(self):
tags = list()
current = None
for line in self._comment:
parts = re.match(r, line)
if parts:
current = (parts.group(1), list())
tags.append(current)
if current:
if line == :
current = None
else:
current[1].append(line)
for tag in tags:
self._tags.append((tag[0], os.linesep.join(tag[1]))) | Extract tags from the DocBlock. |
383,570 | def subroutine(*effects):
def subroutine(value, context, *args, **kwargs):
d = defer.succeed(value)
for effect in effects:
d.addCallback(effect, context, *args, **kwargs)
return d
return subroutine | Returns an effect performing a list of effects. The value passed to each
effect is a result of the previous effect. |
383,571 | def scatter_plot(data, index_x, index_y, percent=100.0, seed=1, size=50, title=None, outfile=None, wait=True):
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
data = plot.create_subsample(data, percent=percent, seed=seed)
x = []
y = []
if data.class_index == -1:
c = None
else:
c = []
for i in range(data.num_instances):
inst = data.get_instance(i)
x.append(inst.get_value(index_x))
y.append(inst.get_value(index_y))
if c is not None:
c.append(inst.get_value(inst.class_index))
fig, ax = plt.subplots()
if c is None:
ax.scatter(x, y, s=size, alpha=0.5)
else:
ax.scatter(x, y, c=c, s=size, alpha=0.5)
ax.set_xlabel(data.attribute(index_x).name)
ax.set_ylabel(data.attribute(index_y).name)
if title is None:
title = "Attribute scatter plot"
if percent != 100:
title += " (%0.1f%%)" % percent
ax.set_title(title)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
ax.grid(True)
fig.canvas.set_window_title(data.relationname)
plt.draw()
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show() | Plots two attributes against each other.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param data: the dataset
:type data: Instances
:param index_x: the 0-based index of the attribute on the x axis
:type index_x: int
:param index_y: the 0-based index of the attribute on the y axis
:type index_y: int
:param percent: the percentage of the dataset to use for plotting
:type percent: float
:param seed: the seed value to use for subsampling
:type seed: int
:param size: the size of the circles in point
:type size: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool |
383,572 | def in_template_path(fn):
return os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../templates",
fn,
) | Return `fn` in template context, or in other words add `fn` to template
path, so you don't need to write absolute path of `fn` in template
directory manually.
Args:
fn (str): Name of the file in template dir.
Return:
str: Absolute path to the file. |
383,573 | def validatefeatures(self,features):
validatedfeatures = []
for feature in features:
if isinstance(feature, int) or isinstance(feature, float):
validatedfeatures.append( str(feature) )
elif self.delimiter in feature and not self.sklearn:
raise ValueError("Feature contains delimiter: " + feature)
elif self.sklearn and isinstance(feature, str):
validatedfeatures.append(feature)
else:
validatedfeatures.append(feature)
return validatedfeatures | Returns features in validated form, or raises an Exception. Mostly for internal use |
383,574 | def _CompileTemplate(
template_str, builder, meta=, format_char=, default_formatter=,
whitespace=):
meta_left, meta_right = SplitMeta(meta)
balance_counter = 0
comment_counter = 0
has_defines = False
for token_type, token in _Tokenize(template_str, meta_left, meta_right,
whitespace):
if token_type == COMMENT_BEGIN_TOKEN:
comment_counter += 1
continue
if token_type == COMMENT_END_TOKEN:
comment_counter -= 1
if comment_counter < 0:
raise CompilationError()
continue
return builder.Root(), has_defines | Compile the template string, calling methods on the 'program builder'.
Args:
template_str: The template string. It should not have any compilation
options in the header -- those are parsed by FromString/FromFile
builder: The interface of _ProgramBuilder isn't fixed. Use at your own
risk.
meta: The metacharacters to use, e.g. '{}', '[]'.
default_formatter: The formatter to use for substitutions that are missing a
formatter. The 'str' formatter the "default default" -- it just tries
to convert the context value to a string in some unspecified manner.
whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone
on a line, with only whitespace on either side, then the whitespace is
removed. In 'strip-line' mode, every line is stripped of its
leading and trailing whitespace.
Returns:
The compiled program (obtained from the builder)
Raises:
The various subclasses of CompilationError. For example, if
default_formatter=None, and a variable is missing a formatter, then
MissingFormatter is raised.
This function is public so it can be used by other tools, e.g. a syntax
checking tool run before submitting a template to source control. |
383,575 | def color(self):
return (self.get_value(CONST.STATUSES_KEY).get(),
self.get_value(CONST.STATUSES_KEY).get()) | Get light color. |
383,576 | def resolve_metric_as_tuple(metric):
if "." in metric:
_, metric = metric.split(".")
r = [
(operator, match) for operator, match in ALL_METRICS if match[0] == metric
]
if not r or len(r) == 0:
raise ValueError(f"Metric {metric} not recognised.")
else:
return r[0] | Resolve metric key to a given target.
:param metric: the metric name.
:type metric: ``str``
:rtype: :class:`Metric` |
383,577 | def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG):
if n_bits % 2:
n_bits += 1
w_len = len(word) // 2
w_1 = set(word[:w_len])
w_2 = set(word[w_len:])
fingerprint = 0
for letter in most_common:
if n_bits:
fingerprint <<= 1
if letter in w_1:
fingerprint += 1
fingerprint <<= 1
if letter in w_2:
fingerprint += 1
n_bits -= 2
else:
break
if n_bits > 0:
fingerprint <<= n_bits
return fingerprint | Return the occurrence halved fingerprint.
Based on the occurrence halved fingerprint from :cite:`Cislak:2017`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The occurrence halved fingerprint
Examples
--------
>>> ohf = OccurrenceHalved()
>>> bin(ohf.fingerprint('hat'))
'0b1010000000010'
>>> bin(ohf.fingerprint('niall'))
'0b10010100000'
>>> bin(ohf.fingerprint('colin'))
'0b1001010000'
>>> bin(ohf.fingerprint('atcg'))
'0b10100000000000'
>>> bin(ohf.fingerprint('entreatment'))
'0b1111010000110000' |
383,578 | def coerce(self, value):
if self._coerce is not None:
value = self._coerce(value)
return value | Coerce a cleaned value. |
383,579 | def _install_one(
repo_url, branch, destination, commit=, patches=None,
exclude_modules=None, include_modules=None, base=False, work_directory=
):
patches = patches or []
patches = [
core.FilePatch(file=patch[], work_directory=work_directory)
if in patch else core.Patch(**patch)
for patch in patches
]
addon_cls = core.Base if base else core.Addon
addon = addon_cls(
repo_url, branch, commit=commit, patches=patches,
exclude_modules=exclude_modules, include_modules=include_modules)
addon.install(destination) | Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to checkout.
:param string destination: the folder where the add-on should end up at.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:param string work_directory: the path to the directory of the yaml file.
:param list patches: Optional list of patches to apply. |
383,580 | def identical_signature_wrapper(original_function, wrapped_function):
s which
will call the ``wrapped_function``.
__wrapped__def {0}({1}):\n return __wrapped__({2})timeout=1<string>exec'
)
six.exec_(function_def, context)
return wraps(original_function)(context[original_function.__name__]) | Return a function with identical signature as ``original_function``'s which
will call the ``wrapped_function``. |
383,581 | def mount(directory, lower_dir, upper_dir, mount_table=None):
return OverlayFS.mount(directory, lower_dir, upper_dir,
mount_table=mount_table) | Creates a mount |
383,582 | def aget(dct, key):
r
key = iter(key)
try:
head = next(key)
except StopIteration:
return dct
if isinstance(dct, list):
try:
idx = int(head)
except ValueError:
raise IndexNotIntegerError(
"non-integer index %r provided on a list."
% head)
try:
value = dct[idx]
except IndexError:
raise IndexOutOfRange(
"index %d is out of range (%d elements in list)."
% (idx, len(dct)))
else:
try:
value = dct[head]
except KeyError:
raise MissingKeyError(
"missing key %r in dict."
% (head, ))
except Exception:
raise NonDictLikeTypeError(
"can't query subvalue %r of a leaf%s."
% (head,
(" (leaf value is %r)" % dct)
if len(repr(dct)) < 15 else ""))
return aget(value, key) | r"""Allow to get values deep in a dict with iterable keys
Accessing leaf values is quite straightforward:
>>> dct = {'a': {'x': 1, 'b': {'c': 2}}}
>>> aget(dct, ('a', 'x'))
1
>>> aget(dct, ('a', 'b', 'c'))
2
If key is empty, it returns unchanged the ``dct`` value.
>>> aget({'x': 1}, ())
{'x': 1} |
383,583 | def env(var_name, default=False):
try:
value = os.environ[var_name]
if str(value).strip().lower() in [, , , , , , , ]:
return None
return value
except:
from traceback import format_exc
msg = "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default)
sys.stderr.write(format_exc())
sys.stderr.write(msg)
return default | Get the environment variable. If not found use a default or False, but print to stderr a warning about the missing env variable. |
383,584 | def parse_template(self, tmp, reset=False, only_body=False):
logging.debug(, tmp)
if self.sent_time:
self.modified_since_sent = True
if only_body:
self.body = tmp
else:
m = re.match(r,
tmp)
assert m
d = m.groupdict()
headertext = d[]
self.body = d[]
if reset:
self.headers = {}
| parses a template or user edited string to fills this envelope.
:param tmp: the string to parse.
:type tmp: str
:param reset: remove previous envelope content
:type reset: bool |
383,585 | def _dcm_array_to_matrix3(self, dcm):
assert(dcm.shape == (3, 3))
a = Vector3(dcm[0][0], dcm[0][1], dcm[0][2])
b = Vector3(dcm[1][0], dcm[1][1], dcm[1][2])
c = Vector3(dcm[2][0], dcm[2][1], dcm[2][2])
return Matrix3(a, b, c) | Converts dcm array into Matrix3
:param dcm: 3x3 dcm array
:returns: Matrix3 |
383,586 | def update_by_example(cls, collection, example_data, new_value, keep_null=False, wait_for_sync=None, limit=None):
kwargs = {
: new_value,
: {
: keep_null,
: wait_for_sync,
: limit,
}
}
return cls._construct_query(name=,
collection=collection, example=example_data, result=False,
**kwargs) | This will find all documents in the collection that match the specified example object,
and partially update the document body with the new value specified. Note that document meta-attributes
such as _id, _key, _from, _to etc. cannot be replaced.
Note: the limit attribute is not supported on sharded collections. Using it will result in an error.
Returns result dict of the request.
:param collection Collection instance
:param example_data An example document that all collection documents are compared against.
:param new_value A document containing all the attributes to update in the found documents.
:param keep_null This parameter can be used to modify the behavior when handling null values.
Normally, null values are stored in the database. By setting the keepNull parameter to false,
this behavior can be changed so that all attributes in data with null values will be removed
from the updated document.
:param wait_for_sync if set to true, then all removal operations will instantly be synchronised to disk.
If this is not specified, then the collection's default sync behavior will be applied.
:param limit an optional value that determines how many documents to update at most. If limit is
specified but is less than the number of documents in the collection, it is undefined
which of the documents will be updated.
:returns dict |
383,587 | async def websocket_disconnect(self, message):
self.closing = True
await self.send_upstream(message)
await super().websocket_disconnect(message) | Handle the disconnect message.
This is propagated to all upstream applications. |
383,588 | def commit(func):
def wrap(**kwarg):
with session_withcommit() as session:
a = func(**kwarg)
session.add(a)
return session.query(songs).order_by(
songs.song_id.desc()).first().song_id
return wrap | Used as a decorator for automatically making session commits |
383,589 | def utterances_from_tier(eafob: Eaf, tier_name: str) -> List[Utterance]:
try:
speaker = eafob.tiers[tier_name][2]["PARTICIPANT"]
except KeyError:
speaker = None
tier_utterances = []
annotations = sort_annotations(
list(eafob.get_annotation_data_for_tier(tier_name)))
for i, annotation in enumerate(annotations):
eaf_stem = eafob.eaf_path.stem
utter_id = "{}.{}.{}".format(eaf_stem, tier_name, i)
start_time = eafob.time_origin + annotation[0]
end_time = eafob.time_origin + annotation[1]
text = annotation[2]
utterance = Utterance(eafob.media_path, eafob.eaf_path, utter_id,
start_time, end_time, text, speaker)
tier_utterances.append(utterance)
return tier_utterances | Returns utterances found in the given Eaf object in the given tier. |
383,590 | def Dirname(self):
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
result.last.inode = None
break
result.Pop(-1)
return result | Get a new copied object with only the directory path. |
383,591 | def fetchone(self, query, *args):
cursor = self.connection.cursor()
try:
cursor.execute(query, args)
return cursor.fetchone()
finally:
cursor.close() | Returns the first result of the given query.
:param query: The query to be executed as a `str`.
:param params: A `tuple` of parameters that will be replaced for
placeholders in the query.
:return: The retrieved row with each field being one element in a
`tuple`. |
383,592 | def writelines_nl(fileobj: TextIO, lines: Iterable[str]) -> None:
fileobj.write(.join(lines) + ) | Writes lines, plus terminating newline characters, to the file.
(Since :func:`fileobj.writelines` doesn't add newlines...
http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file) |
383,593 | def load_file(self, app, pathname, relpath, pypath):
try:
view_class = self.get_file_view_cls(relpath)
return create_view_from_file(pathname, source_template=relpath, view_class=view_class)
except DeclarativeViewError:
pass | Loads a file and creates a View from it. Files are split
between a YAML front-matter and the content (unless it is a .yml file). |
383,594 | def _ensure_frames(cls, documents):
frames = []
for document in documents:
if not isinstance(document, Frame):
frames.append(cls(document))
else:
frames.append(document)
return frames | Ensure all items in a list are frames by converting those that aren't. |
383,595 | def ParseMessage(descriptor, byte_str):
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg | Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object. |
383,596 | def infer_datetime_units(dates):
dates = np.asarray(dates).ravel()
if np.asarray(dates).dtype == :
dates = pd.to_datetime(dates, box=False)
dates = dates[pd.notnull(dates)]
reference_date = dates[0] if len(dates) > 0 else
reference_date = pd.Timestamp(reference_date)
else:
reference_date = dates[0] if len(dates) > 0 else
reference_date = format_cftime_datetime(reference_date)
unique_timedeltas = np.unique(np.diff(dates))
if unique_timedeltas.dtype == np.dtype():
unique_timedeltas = pd.to_timedelta(unique_timedeltas, box=False)
units = _infer_time_units_from_diff(unique_timedeltas)
return % (units, reference_date) | Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`) |
383,597 | def flush_stream_threads(process, out_formatter=None,
err_formatter=terminal.fg.red, size=1):
out = FlushStreamThread(process=process, stream_name="stdout",
formatter=out_formatter, size=size)
err = FlushStreamThread(process=process, stream_name="stderr",
formatter=err_formatter, size=size)
out.start()
err.start()
yield out, err
out.join()
err.join() | Context manager that creates 2 threads, one for each standard
stream (stdout/stderr), updating in realtime the piped data.
The formatters are callables that receives manipulates the data,
e.g. coloring it before writing to a ``sys`` stream. See
``FlushStreamThread`` for more information. |
383,598 | def get_item_query_session(self):
if not self.supports_item_query():
raise errors.Unimplemented()
return sessions.ItemQuerySession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the item query service.
return: (osid.assessment.ItemQuerySession) - an
``ItemQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_query()`` is ``true``.* |
383,599 | def prepare_inputs(self, times=None, weather=None):
if weather is not None:
self.weather = weather
if self.weather is None:
self.weather = pd.DataFrame(index=times)
if times is not None:
self.times = times
self.solar_position = self.location.get_solarposition(
self.times, method=self.solar_position_method)
self.airmass = self.location.get_airmass(
solar_position=self.solar_position, model=self.airmass_model)
if not any([x in [, , ] for x in self.weather.columns]):
self.weather[[, , ]] = self.location.get_clearsky(
self.solar_position.index, self.clearsky_model,
solar_position=self.solar_position,
airmass_absolute=self.airmass[])
if not {, , } <= set(self.weather.columns):
raise ValueError(
"Uncompleted irradiance data set. Please check you input " +
"data.\nData set needs to have , and .\n" +
"Detected data: {0}".format(list(self.weather.columns)))
if isinstance(self.system, SingleAxisTracker):
self.tracking = self.system.singleaxis(
self.solar_position[],
self.solar_position[])
self.tracking[] = (
self.tracking[]
.fillna(self.system.axis_tilt))
self.tracking[] = (
self.tracking[]
.fillna(self.system.axis_azimuth))
self.aoi = self.tracking[]
get_irradiance = partial(
self.system.get_irradiance,
self.tracking[],
self.tracking[],
self.solar_position[],
self.solar_position[])
else:
self.aoi = self.system.get_aoi(
self.solar_position[],
self.solar_position[])
get_irradiance = partial(
self.system.get_irradiance,
self.solar_position[],
self.solar_position[])
self.total_irrad = get_irradiance(
self.weather[],
self.weather[],
self.weather[],
airmass=self.airmass[],
model=self.transposition_model)
if self.weather.get() is None:
self.weather[] = 0
if self.weather.get() is None:
self.weather[] = 20
return self | Prepare the solar position, irradiance, and weather inputs to
the model.
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or DataFrame, default None
If ``None``, the weather attribute is used. If the weather
attribute is also ``None`` assumes air temperature is 20 C, wind
speed is 0 m/s and irradiation calculated from clear sky
data. Column names must be ``'wind_speed'``, ``'temp_air'``,
``'dni'``, ``'ghi'``, ``'dhi'``. Do not pass incomplete irradiation
data. Use method
:py:meth:`~pvlib.modelchain.ModelChain.complete_irradiance`
instead.
Notes
-----
Assigns attributes: ``times``, ``solar_position``, ``airmass``,
``total_irrad``, `aoi` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.