Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,800 | def connect_ssh(*args, **kwargs):
client = SSHClient()
client.connect(*args, **kwargs)
return client | Create a new connected :class:`SSHClient` instance. All arguments
are passed to :meth:`SSHClient.connect`. |
4,801 | def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True):
nids = as_set(nids)
if status is None:
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if with_wti:
yield task, wi, ti
else:
yield task
else:
op = operator_from_str(op)
status = Status.as_status(status)
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status):
if with_wti:
yield task, wi, ti
else:
yield task | Generators that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task |
4,802 | def check_network_health(self):
r
health = HealthDict()
health[] = []
health[] = []
health[] = []
health[] = []
health[] = []
health[] = []
health[] = []
hits = sp.where(self[] > self.Np - 1)[0]
if sp.size(hits) > 0:
health[] = sp.unique(hits)
return health
P12 = self[]
hits = sp.where(P12[:, 0] == P12[:, 1])[0]
if sp.size(hits) > 0:
health[] = hits
Ps = self.num_neighbors(self.pores())
if sp.sum(Ps == 0) > 0:
health[] = sp.where(Ps == 0)[0]
temp = []
am = self.create_adjacency_matrix(fmt=, triu=True)
Cs = csg.connected_components(am, directed=False)[1]
if sp.unique(Cs).size > 1:
for i in sp.unique(Cs):
temp.append(sp.where(Cs == i)[0])
b = sp.array([len(item) for item in temp])
c = sp.argsort(b)[::-1]
for i in range(0, len(c)):
health[].append(temp[c[i]])
if i > 0:
health[].extend(temp[c[i]])
am = self.create_adjacency_matrix(fmt=, triu=True).tocoo()
hits = sp.where(am.data > 1)[0]
if len(hits):
mergeTs = []
hits = sp.vstack((am.row[hits], am.col[hits])).T
ihits = hits[:, 0] + 1j*hits[:, 1]
conns = self[]
iconns = conns[:, 0] + 1j*conns[:, 1]
for item in ihits:
mergeTs.append(sp.where(iconns == item)[0])
health[] = mergeTs
adjmat = self.create_adjacency_matrix(fmt=)
num_full = adjmat.sum()
temp = sprs.triu(adjmat, k=1)
num_upper = temp.sum()
if num_full > num_upper:
biTs = sp.where(self[][:, 0] >
self[][:, 1])[0]
health[] = biTs.tolist()
return health | r"""
This method check the network topological health by checking for:
(1) Isolated pores
(2) Islands or isolated clusters of pores
(3) Duplicate throats
(4) Bidirectional throats (ie. symmetrical adjacency matrix)
(5) Headless throats
Returns
-------
A dictionary containing the offending pores or throat numbers under
each named key.
It also returns a list of which pores and throats should be trimmed
from the network to restore health. This list is a suggestion only,
and is based on keeping the largest cluster and trimming the others.
Notes
-----
- Does not yet check for duplicate pores
- Does not yet suggest which throats to remove
- This is just a 'check' and does not 'fix' the problems it finds |
4,803 | def cli(ctx, timeout, proxy, output, quiet, lyric, again):
ctx.obj = NetEase(timeout, proxy, output, quiet, lyric, again) | A command tool to download NetEase-Music's songs. |
4,804 | def flush_to_index(self):
assert self._smref is not None
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache() | Flush changes in our configuration file to the index |
4,805 | def _getphoto_location(self,pid):
logger.debug(%(pid))
lat=None
lon=None
accuracy=None
resp=self.fb.photos_geo_getLocation(photo_id=pid)
if resp.attrib[]!=:
logger.error("%s - fb: photos_geo_getLocation failed with status: %s",\
resp.attrib[]);
return (None,None,None)
for location in resp.find():
lat=location.attrib[]
lon=location.attrib[]
accuracy=location.attrib[]
return (lat,lon,accuracy) | Asks fb for photo location information
returns tuple with lat,lon,accuracy |
4,806 | def view_page(name=None):
if request.method == :
if name is None:
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, )
file_handle.write(request.forms.content.encode())
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header(, )
response.set_header(, )
if name is None:
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], )
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)[]
history = commit_history("{0}.rst".format(name))
return template(,
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, ) | Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object |
4,807 | def send(node_name):
my_data = nago.core.get_my_info()
if not node_name:
node_name = nago.settings.get()
node = nago.core.get_node(node_name)
json_params = {}
json_params[] = node_name
json_params[] = "node_info"
for k, v in my_data.items():
nago.core.log("sending %s to %s" % (k, node[]), level="notice")
json_params[k] = v
return node.send_command(, , node_name=node.token, key="node_info", **my_data) | Send our information to a remote nago instance
Arguments:
node -- node_name or token for the node this data belongs to |
4,808 | def logger_init(level):
levellist = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
handler = logging.StreamHandler()
fmt = (
)
handler.setFormatter(logging.Formatter(fmt))
logger = logging.root
logger.addHandler(handler)
logger.setLevel(levellist[level]) | Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`. |
4,809 | def serialize(self):
result = self.to_project_config(with_packages=True)
result.update(self.to_profile_info(serialize_credentials=True))
result[] = deepcopy(self.cli_vars)
return result | Serialize the full configuration to a single dictionary. For any
instance that has passed validate() (which happens in __init__), it
matches the Configuration contract.
Note that args are not serialized.
:returns dict: The serialized configuration. |
4,810 | def traverse(self, id_=None):
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details[] == :
children = r_client.smembers(_children_key(details[]))
if children is not None:
nodes.update(children)
yield details | Traverse groups and yield info dicts for jobs |
4,811 | async def connect(
self,
hostname: str = None,
port: int = None,
source_address: DefaultStrType = _default,
timeout: DefaultNumType = _default,
loop: asyncio.AbstractEventLoop = None,
use_tls: bool = None,
validate_certs: bool = None,
client_cert: DefaultStrType = _default,
client_key: DefaultStrType = _default,
tls_context: DefaultSSLContextType = _default,
cert_bundle: DefaultStrType = _default,
) -> SMTPResponse:
await self._connect_lock.acquire()
if hostname is not None:
self.hostname = hostname
if loop is not None:
self.loop = loop
if use_tls is not None:
self.use_tls = use_tls
if validate_certs is not None:
self.validate_certs = validate_certs
if port is not None:
self.port = port
if self.port is None:
self.port = SMTP_TLS_PORT if self.use_tls else SMTP_PORT
if timeout is not _default:
self.timeout = timeout
if source_address is not _default:
self._source_address = source_address
if client_cert is not _default:
self.client_cert = client_cert
if client_key is not _default:
self.client_key = client_key
if tls_context is not _default:
self.tls_context = tls_context
if cert_bundle is not _default:
self.cert_bundle = cert_bundle
if self.tls_context is not None and self.client_cert is not None:
raise ValueError(
"Either a TLS context or a certificate/key must be provided"
)
response = await self._create_connection()
return response | Initialize a connection to the server. Options provided to
:meth:`.connect` take precedence over those used to initialize the
class.
:keyword hostname: Server name (or IP) to connect to
:keyword port: Server port. Defaults to 25 if ``use_tls`` is
False, 465 if ``use_tls`` is True.
:keyword source_address: The hostname of the client. Defaults to the
result of :func:`socket.getfqdn`. Note that this call blocks.
:keyword timeout: Default timeout value for the connection, in seconds.
Defaults to 60.
:keyword loop: event loop to run on. If not set, uses
:func:`asyncio.get_event_loop()`.
:keyword use_tls: If True, make the initial connection to the server
over TLS/SSL. Note that if the server supports STARTTLS only, this
should be False.
:keyword validate_certs: Determines if server certificates are
validated. Defaults to True.
:keyword client_cert: Path to client side certificate, for TLS.
:keyword client_key: Path to client side key, for TLS.
:keyword tls_context: An existing :class:`ssl.SSLContext`, for TLS.
Mutually exclusive with ``client_cert``/``client_key``.
:keyword cert_bundle: Path to certificate bundle, for TLS verification.
:raises ValueError: mutually exclusive options provided |
4,812 | def update(self, other=None, **kwargs):
if other is None:
other = ()
if hasattr(other, ):
for key in other:
self._update(key, other[key])
else:
for key,value in other:
self._update(key, value)
for key,value in six.iteritems(kwargs):
self._update(key, value) | x.update(E, **F) -> None. update x from trie/dict/iterable E or F.
If E has a .keys() method, does: for k in E: x[k] = E[k]
If E lacks .keys() method, does: for (k, v) in E: x[k] = v
In either case, this is followed by: for k in F: x[k] = F[k] |
4,813 | def formatted(text, *args, **kwargs):
if not text or "{" not in text:
return text
strict = kwargs.pop("strict", True)
max_depth = kwargs.pop("max_depth", 3)
objects = list(args) + [kwargs] if kwargs else args[0] if len(args) == 1 else args
if not objects:
return text
definitions = {}
markers = RE_FORMAT_MARKERS.findall(text)
while markers:
key = markers.pop()
if key in definitions:
continue
val = _find_value(key, objects)
if strict and val is None:
return None
val = str(val) if val is not None else "{%s}" % key
markers.extend(m for m in RE_FORMAT_MARKERS.findall(val) if m not in definitions)
definitions[key] = val
if not max_depth or not isinstance(max_depth, int) or max_depth <= 0:
return text
expanded = dict((k, _rformat(k, v, definitions, max_depth)) for k, v in definitions.items())
return text.format(**expanded) | Args:
text (str | unicode): Text to format
*args: Objects to extract values from (as attributes)
**kwargs: Optional values provided as named args
Returns:
(str): Attributes from this class are expanded if mentioned |
4,814 | def _isstring(dtype):
return dtype.type == numpy.unicode_ or dtype.type == numpy.string_ | Given a numpy dtype, determines whether it is a string. Returns True
if the dtype is string or unicode. |
4,815 | def parse(self) -> Statement:
self.opt_separator()
start = self.offset
res = self.statement()
if res.keyword not in ["module", "submodule"]:
self.offset = start
raise UnexpectedInput(self, " or ")
if self.name is not None and res.argument != self.name:
raise ModuleNameMismatch(res.argument, self.name)
if self.rev:
revst = res.find1("revision")
if revst is None or revst.argument != self.rev:
raise ModuleRevisionMismatch(revst.argument, self.rev)
try:
self.opt_separator()
except EndOfInput:
return res
raise UnexpectedInput(self, "end of input") | Parse a complete YANG module or submodule.
Args:
mtext: YANG module text.
Raises:
EndOfInput: If past the end of input.
ModuleNameMismatch: If parsed module name doesn't match `self.name`.
ModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`.
UnexpectedInput: If top-level statement isn't ``(sub)module``. |
4,816 | def username(anon, obj, field, val):
return anon.faker.user_name(field=field) | Generates a random username |
4,817 | def max_brightness(self):
self._max_brightness, value = self.get_cached_attr_int(self._max_brightness, )
return value | Returns the maximum allowable brightness value. |
4,818 | def snyder_opt(self, structure):
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.66914e-23 * \
(self.long_v(structure) + 2.*self.trans_v(structure))/3. \
/ num_density ** (-2./3.) * (1 - nsites ** (-1./3.)) | Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units) |
4,819 | def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
if not gssapi_requested:
return
if gss_host is None:
gss_host = self.hostname
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
self.gss_host = gss_host | Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``. |
4,820 | def rnaseq2ga(quantificationFilename, sqlFilename, localName, rnaType,
dataset=None, featureType="gene",
description="", programs="", featureSetNames="",
readGroupSetNames="", biosampleId=""):
readGroupSetName = ""
if readGroupSetNames:
readGroupSetName = readGroupSetNames.strip().split(",")[0]
featureSetIds = ""
readGroupIds = ""
if dataset:
featureSetIdList = []
if featureSetNames:
for annotationName in featureSetNames.split(","):
featureSet = dataset.getFeatureSetByName(annotationName)
featureSetIdList.append(featureSet.getId())
featureSetIds = ",".join(featureSetIdList)
if readGroupSetName:
readGroupSet = dataset.getReadGroupSetByName(readGroupSetName)
readGroupIds = ",".join(
[x.getId() for x in readGroupSet.getReadGroups()])
if rnaType not in SUPPORTED_RNA_INPUT_FORMATS:
raise exceptions.UnsupportedFormatException(rnaType)
rnaDB = RnaSqliteStore(sqlFilename)
if rnaType == "cufflinks":
writer = CufflinksWriter(rnaDB, featureType, dataset=dataset)
elif rnaType == "kallisto":
writer = KallistoWriter(rnaDB, featureType, dataset=dataset)
elif rnaType == "rsem":
writer = RsemWriter(rnaDB, featureType, dataset=dataset)
writeRnaseqTable(rnaDB, [localName], description, featureSetIds,
readGroupId=readGroupIds, programs=programs,
biosampleId=biosampleId)
writeExpressionTable(writer, [(localName, quantificationFilename)]) | Reads RNA Quantification data in one of several formats and stores the data
in a sqlite database for use by the GA4GH reference server.
Supports the following quantification output types:
Cufflinks, kallisto, RSEM. |
4,821 | def get_links(self, recall, timeout):
for _ in range(recall):
try:
soup = BeautifulSoup(self.source)
out_links = []
for tag in soup.findAll(["a", "link"], href=True):
tag["href"] = urljoin(self.url, tag["href"])
out_links.append(tag["href"])
return sorted(out_links)
except:
time.sleep(timeout) | Gets links in page
:param recall: max times to attempt to fetch url
:param timeout: max times
:return: array of out_links |
4,822 | def disable_signing(self):
self.mav.signing.secret_key = None
self.mav.signing.sign_outgoing = False
self.mav.signing.allow_unsigned_callback = None
self.mav.signing.link_id = 0
self.mav.signing.timestamp = 0 | disable MAVLink2 signing |
4,823 | def view_hmap(token, dstore):
try:
poe = valid.probability(token.split()[1])
except IndexError:
poe = 0.1
mean = dict(extract(dstore, ))[]
oq = dstore[]
hmap = calc.make_hmap_array(mean, oq.imtls, [poe], len(mean))
dt = numpy.dtype([(, U32)] + [(imt, F32) for imt in oq.imtls])
array = numpy.zeros(len(hmap), dt)
for i, vals in enumerate(hmap):
array[i] = (i, ) + tuple(vals)
array.sort(order=list(oq.imtls)[0])
return rst_table(array[:20]) | Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE |
4,824 | def _popup(self):
res = ()
for child in self.formulas:
if type(child) == type(self):
superchilds = child.formulas
res += superchilds
else:
res += (child, )
return tuple(res) | recursively find commutative binary operator
among child formulas and pop up them at the same level |
4,825 | def load_config(path):
path = os.path.abspath(path)
if os.path.isdir(path):
config, wordlists = _load_data(path)
elif os.path.isfile(path):
config = _load_config(path)
wordlists = {}
else:
raise InitializationError(.format(path))
for name, wordlist in wordlists.items():
if name in config:
raise InitializationError("Conflict: list {!r} is defined both in config "
"and in *.txt file. If it's a {!r} list, "
"you should remove it from config."
.format(name, _CONF.TYPE.WORDS))
config[name] = wordlist
return config | Loads configuration from a path.
Path can be a json file, or a directory containing config.json
and zero or more *.txt files with word lists or phrase lists.
Returns config dict.
Raises InitializationError when something is wrong. |
4,826 | def to_json(self):
return json.dumps({
"statistics": self.get_statistics()
, "authors": [json.loads(author.to_json()) for author in self.get_authors()]
}, indent=2) | Serialises the content of the KnowledgeBase as JSON.
:return: TODO |
4,827 | def start(self, *args, **kwargs):
if self.is_running():
raise RuntimeError()
self._running = self.run(*args, **kwargs)
try:
yielded = next(self._running)
except StopIteration:
raise TypeError()
if yielded is not None:
raise TypeError() | Starts the instance.
:raises RuntimeError: has been already started.
:raises TypeError: :meth:`run` is not canonical. |
4,828 | def multchoicebox(message=, title=, choices=[]):
return psidialogs.multi_choice(message=message, title=title, choices=choices) | Original doc: Present the user with a list of choices.
allow him to select multiple items and return them in a list.
if the user doesn't choose anything from the list, return the empty list.
return None if he cancelled selection. |
4,829 | def count(self):
return functools.reduce(lambda x, y: x * y, (x.count for x in self.bounds)) | Total number of array cells |
4,830 | def get_resource_siblings(raml_resource):
path = raml_resource.path
return [res for res in raml_resource.root.resources
if res.path == path] | Get siblings of :raml_resource:.
:param raml_resource: Instance of ramlfications.raml.ResourceNode. |
4,831 | def nsx_controller_connection_addr_port(self, **kwargs):
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop()
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
port = ET.SubElement(connection_addr, "port")
port.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
4,832 | def mag_to_fnu(self, mag):
if len(band) != 2 or band[1] != :
raise ValueError( + band)
return abmag_to_fnu_cgs(mag) | SDSS *primed* magnitudes to F_ν. The primed magnitudes are the "USNO"
standard-star system defined in Smith+ (2002AJ....123.2121S) and
Fukugita+ (1996AJ....111.1748F). This system is anchored to the AB
magnitude system, and as far as I can tell it is not known to have
measurable offsets from that system. (As of DR10, the *unprimed* SDSS
system is known to have small offsets from AB, but I do not believe
that that necessarily has implications for u'g'r'i'z'.)
However, as far as I can tell the filter responses of the USNO
telescope are not published -- only those of the main SDSS 2.5m
telescope. The whole reason for the existence of both the primed and
unprimed ugriz systems is that their responses do not quite match. For
my current application, which involves a completely different
telescope anyway, the difference shouldn't matter. |
4,833 | def loads(string, triples=False, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples)) | Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects |
4,834 | def search_dashboard_deleted_for_facet(self, facet, **kwargs):
kwargs[] = True
if kwargs.get():
return self.search_dashboard_deleted_for_facet_with_http_info(facet, **kwargs)
else:
(data) = self.search_dashboard_deleted_for_facet_with_http_info(facet, **kwargs)
return data | Lists the values of a specific facet over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread. |
4,835 | def analyze_text( self, text, **kwargs ):
SYHSYHBIOs
text;
Regardless the return type, a layer named NOUN_CHUNKS will be added
to the input Text containing noun phrase annotations;
cutPhrasesforce_parsingsyntax_layercutMaxThresholdreturn_type Unexpected return type: Unsupported argument given: (!) Unknown type of syntactic parser: OBI'] else l for l in np_labels ]
all_np_labels.extend( np_labels )
if annotate_text:
self.annotateText( text, NOUN_CHUNKS, all_np_labels )
if return_type == "text":
return text
elif return_type == "labels":
return all_np_labels
elif return_type == "tokens":
return self.get_phrases(text, all_np_labels)
else:
return self.get_phrase_texts(text, all_np_labels) | Analyzes given Text for noun phrase chunks.
As result of analysis, a layer NOUN_CHUNKS will be attached to the input
Text object, containing a noun phrases detected from the Text;
Note: for preprocessing the Text, MaltParser is used by default. In order
to obtain a decent performance with MaltParser, it is advisable to analyse
texts at their full extent with this method. Splitting a text into smaller
chunks, such as clauses or sentences, and analysing one-small-chunk-at-time
may be rather demanding in terms of performance, because a file-based
preprocessing is used for obtaining the dependency relations.
Parameters
----------
text: estnltk.text.Text
The input text that should be analysed for noun phrases;
force_parsing : bool
If True, uses the *self.parser* to parse the given *text*, and overrides
the syntactic annotations in *text* with the new layer obtained from the
parser;
(default: False)
syntax_layer : str
Specifies which layer of syntactic annotations should be used as a
basis for NP chunking; If the *syntax_layer* exists within the *text*
(and force_parsing==False), uses the syntactic annotations from
*text[syntax_layer]*;
(default: LAYER_CONLL)
cutPhrases: bool
If True, all phrases exceeding the cutMaxThreshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
(default: True)
cutMaxThreshold: int
Threshold indicating the maximum number of words allowed in a
phrase.
If cutPhrases is set, all phrases exceeding the threshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
Automatic analysis of the Balanced Corpus of Estonian suggests
that 97% of all NP chunks are likely chunks of length 1-3, thus
the default threshold is set to 3;
(default value: 3)
return_type: string
If return_type=="text" (Default),
returns the input Text object;
If return_type=="labels",
returns a list of NP labels (strings), containing a label for
each word token in Text, indicating whether the word is at the
beginning of a phrase ('B'), inside a phrase ('I') or does
not belong to any phrase ('O').
If return_type=="tokens",
returns a list of phrases, where each phrase is a list of
tokens, and each token is a dictionary representing word;
If return_type=="strings",
returns a list of text strings, where each string is phrase's
text;
Regardless the return type, a layer named NOUN_CHUNKS will be added
to the input Text containing noun phrase annotations; |
4,836 | def output_to_table(obj, olist=, oformat=, table_ends=False, prefix=""):
para = ""
property_list = []
if olist == :
property_list = obj.inputs
elif olist == :
for item in obj.__dict__:
if "_" != item[0]:
property_list.append(item)
for item in property_list:
if hasattr(obj, item):
value = getattr(obj, item)
value_str = format_value(value)
if oformat == "latex":
delimeter = " & "
else:
delimeter = ","
para += "{0}{1}{2}\\\\\n".format(prefix + format_name(item), delimeter, value_str)
if table_ends:
para = add_table_ends(para, oformat)
return para | Compile the properties to a table.
:param olist: list, Names of the parameters to be in the output table
:param oformat: str, The type of table to be output
:param table_ends: bool, Add ends to the table
:param prefix: str, A string to be added to the start of each parameter name
:return: para, str, table as a string |
4,837 | def main():
parser = OptionParser()
parser.add_option(, ,
help=,
dest=,
type=,
default=)
parser.add_option(, ,
help=,
dest=,
type=,
default=)
parser.add_option(, ,
help=,
dest=,
type=,
default=)
parser.add_option(, ,
help=,
dest=,
type=,
default=)
parser.add_option(, ,
help=,
dest=,
type=,
default=)
parser.add_option(, ,
help=,
dest=,
type=,
default=)
parser.add_option(, ,
help=,
dest=,
type=,
default=)
parser.add_option(, ,
help=,
dest=,
action=,
default=False)
options, _ = parser.parse_args()
try:
lockfile = os.path.join(options.lockdir, )
with open(lockfile, ) as lock:
fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
work(options)
except IOError:
info("=> Another instance is already running")
sys.exit(254) | Main entry point |
4,838 | def get_filepath(self, filename):
return os.path.join(self.parent_folder, self.product_id, self.add_file_extension(filename)).replace(, ) | Creates file path for the file.
:param filename: name of the file
:type filename: str
:return: filename with path on disk
:rtype: str |
4,839 | async def _connect(self, connection_lost_callbk=None):
self.connection_lost_callbk = connection_lost_callbk
url = self._config[]
LOG.info("Connecting to ElkM1 at %s", url)
scheme, dest, param, ssl_context = parse_url(url)
conn = partial(Connection, self.loop, self._connected,
self._disconnected, self._got_data, self._timeout)
try:
if scheme == :
await serial_asyncio.create_serial_connection(
self.loop, conn, dest, baudrate=param)
else:
await asyncio.wait_for(self.loop.create_connection(
conn, host=dest, port=param, ssl=ssl_context), timeout=30)
except (ValueError, OSError, asyncio.TimeoutError) as err:
LOG.warning("Could not connect to ElkM1 (%s). Retrying in %d seconds",
err, self._connection_retry_timer)
self.loop.call_later(self._connection_retry_timer, self.connect)
self._connection_retry_timer = 2 * self._connection_retry_timer \
if self._connection_retry_timer < 32 else 60 | Asyncio connection to Elk. |
4,840 | def add_install_defaults(args):
if attr == "genomes" and len(args.genomes) > 0:
continue
for x in default_args.get(attr, []):
x = str(x)
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _datatarget_defaults(args, default_args)
if "isolate" in default_args and args.isolate is not True:
args.isolate = default_args["isolate"]
return args | Add any saved installation defaults to the upgrade. |
4,841 | def set_line_join(self, line_join):
cairo.cairo_set_line_join(self._pointer, line_join)
self._check_status() | Set the current :ref:`LINE_JOIN` within the cairo context.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default line cap is :obj:`MITER <LINE_JOIN_MITER>`.
:param line_join: A :ref:`LINE_JOIN` string. |
4,842 | def get_blank_row(self, filler="-", splitter="+"):
return self.get_pretty_row(
["" for _ in self.widths],
filler,
splitter,
) | Gets blank row
:param filler: Fill empty columns with this char
:param splitter: Separate columns with this char
:return: Pretty formatted blank row (with no meaningful data in it) |
4,843 | def filter_by_analysis_period(self, analysis_period):
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_moys(analysis_period.moys)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data |
4,844 | def hardware_custom_profile_kap_custom_profile_xstp_xstp_hello_interval(self, **kwargs):
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
custom_profile = ET.SubElement(hardware, "custom-profile")
kap_custom_profile = ET.SubElement(custom_profile, "kap-custom-profile")
name_key = ET.SubElement(kap_custom_profile, "name")
name_key.text = kwargs.pop()
xstp = ET.SubElement(kap_custom_profile, "xstp")
xstp_hello_interval = ET.SubElement(xstp, "xstp_hello_interval")
xstp_hello_interval.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
4,845 | def setSignals(self, vehID, signals):
self._connection._sendIntCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_SIGNALS, vehID, signals) | setSignals(string, integer) -> None
Sets an integer encoding the state of the vehicle's signals. |
4,846 | def pix2vec(nside, ipix, nest=False):
lon, lat = healpix_to_lonlat(ipix, nside, order= if nest else )
return ang2vec(*_lonlat_to_healpy(lon, lat)) | Drop-in replacement for healpy `~healpy.pixelfunc.pix2vec`. |
4,847 | def getSegmentOnCell(self, c, i, segIdx):
segList = self.cells4.getNonEmptySegList(c,i)
seg = self.cells4.getSegment(c, i, segList[segIdx])
numSyn = seg.size()
assert numSyn != 0
result = []
result.append([int(segIdx), bool(seg.isSequenceSegment()),
seg.getPositiveActivations(),
seg.getTotalActivations(), seg.getLastActiveIteration(),
seg.getLastPosDutyCycle(),
seg.getLastPosDutyCycleIteration()])
for s in xrange(numSyn):
sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))
result.append([int(sc), int(si), seg.getPermanence(s)])
return result | Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`. |
4,848 | def append(self, data: Union[bytes, bytearray, memoryview]) -> None:
size = len(data)
if size > self._large_buf_threshold:
if not isinstance(data, memoryview):
data = memoryview(data)
self._buffers.append((True, data))
elif size > 0:
if self._buffers:
is_memview, b = self._buffers[-1]
new_buf = is_memview or len(b) >= self._large_buf_threshold
else:
new_buf = True
if new_buf:
self._buffers.append((False, bytearray(data)))
else:
b += data
self._size += size | Append the given piece of data (should be a buffer-compatible object). |
4,849 | def preprocess(self):
s convertible.
'
self.processed_tables = []
self.flags_by_table = []
self.units_by_table = []
for worksheet, rtable in enumerate(self.raw_tables):
ptable, flags, units = self.preprocess_worksheet(rtable, worksheet)
self.processed_tables.append(ptable)
self.flags_by_table.append(flags)
self.units_by_table.append(units)
return self.processed_tables | Performs initial cell conversions to standard types. This will strip units, scale numbers,
and identify numeric data where it's convertible. |
4,850 | def p_sigtypes(self, p):
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) | sigtypes : sigtypes sigtype |
4,851 | def document_from_string(self, schema, request_string):
key = self.get_key_for_schema_and_document_string(schema, request_string)
if key not in self.cache_map:
self.cache_map[key] = self.fallback_backend.document_from_string(
schema, request_string
)
self.get_worker().queue(self.queue_backend, key, schema, request_string)
return self.cache_map[key] | This method returns a GraphQLQuery (from cache if present) |
4,852 | def get_bins_by_resource(self, resource_id):
mgr = self._get_provider_manager(, local=True)
lookup_session = mgr.get_bin_lookup_session(proxy=self._proxy)
return lookup_session.get_bins_by_ids(
self.get_bin_ids_by_resource(resource_id)) | Gets the list of ``Bin`` objects mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.resource.BinList) - list of bins
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
4,853 | def calc_acceleration(xdata, dt):
acceleration = _np.diff(_np.diff(xdata))/dt**2
return acceleration | Calculates the acceleration from the position
Parameters
----------
xdata : ndarray
Position data
dt : float
time between measurements
Returns
-------
acceleration : ndarray
values of acceleration from position
2 to N. |
4,854 | def _get_layer_converter_fn(layer, add_custom_layers = False):
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
convert_func = _KERAS_LAYER_REGISTRY[layer_type]
if convert_func is _layers2.convert_activation:
act_name = _layers2._get_activation_name_from_keras_layer(layer)
if act_name == :
return None
return convert_func
elif add_custom_layers:
return None
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) | Get the right converter function for Keras |
4,855 | def text(what="sentence", *args, **kwargs):
if what == "character":
return character(*args, **kwargs)
elif what == "characters":
return characters(*args, **kwargs)
elif what == "word":
return word(*args, **kwargs)
elif what == "words":
return words(*args, **kwargs)
elif what == "sentence":
return sentence(*args, **kwargs)
elif what == "sentences":
return sentences(*args, **kwargs)
elif what == "paragraph":
return paragraph(*args, **kwargs)
elif what == "paragraphs":
return paragraphs(*args, **kwargs)
elif what == "title":
return title(*args, **kwargs)
else:
raise NameError() | An aggregator for all above defined public methods. |
4,856 | def reference_to_greatcircle(reference_frame, greatcircle_frame):
pole = greatcircle_frame.pole.transform_to(coord.ICRS)
ra0 = greatcircle_frame.ra0
center = greatcircle_frame.center
R_rot = rotation_matrix(greatcircle_frame.rotation, )
if not np.isnan(ra0):
xaxis = np.array([np.cos(ra0), np.sin(ra0), 0.])
zaxis = pole.cartesian.xyz.value
if np.abs(zaxis[2]) >= 1e-15:
xaxis[2] = -(zaxis[0]*xaxis[0] + zaxis[1]*xaxis[1]) / zaxis[2]
else:
xaxis[2] = 0.
xaxis = xaxis / np.sqrt(np.sum(xaxis**2))
yaxis = np.cross(zaxis, xaxis)
R = np.stack((xaxis, yaxis, zaxis))
elif center is not None:
R1 = rotation_matrix(pole.ra, )
R2 = rotation_matrix(90*u.deg - pole.dec, )
Rtmp = matrix_product(R2, R1)
rot = center.cartesian.transform(Rtmp)
rot_lon = rot.represent_as(coord.UnitSphericalRepresentation).lon
R3 = rotation_matrix(rot_lon, )
R = matrix_product(R3, R2, R1)
else:
R1 = rotation_matrix(pole.ra, )
R2 = rotation_matrix(pole.dec, )
R = matrix_product(R2, R1)
return matrix_product(R_rot, R) | Convert a reference coordinate to a great circle frame. |
4,857 | def QA_util_code_tolist(code, auto_fill=True):
if isinstance(code, str):
if auto_fill:
return [QA_util_code_tostr(code)]
else:
return [code]
elif isinstance(code, list):
if auto_fill:
return [QA_util_code_tostr(item) for item in code]
else:
return [item for item in code] | 转换code==> list
Arguments:
code {[type]} -- [description]
Keyword Arguments:
auto_fill {bool} -- 是否自动补全(一般是用于股票/指数/etf等6位数,期货不适用) (default: {True})
Returns:
[list] -- [description] |
4,858 | def _compute_dependencies(self):
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
for req in self._parsed_pkg_info.get_all() or []:
distvers, mark = self._preparse_requirement(req)
parsed = parse_requirements(distvers).next()
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={:extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all() or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm | Recompute this distribution's dependencies. |
4,859 | def previous(self, cli):
if len(self.focus_stack) > 1:
try:
return self[self.focus_stack[-2]]
except KeyError:
pass | Return the previously focussed :class:`.Buffer` or `None`. |
4,860 | def foreach(self, f):
from pyspark.rdd import _wrap_function
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.taskcontext import TaskContext
if callable(f):
def func_without_process(_, iterator):
for x in iterator:
f(x)
return iter([])
func = func_without_process
else:
if not hasattr(f, ):
raise Exception("Provided object does not have a method")
if not callable(getattr(f, )):
raise Exception("Attribute in provided object is not callable")
def doesMethodExist(method_name):
exists = hasattr(f, method_name)
if exists and not callable(getattr(f, method_name)):
raise Exception(
"Attribute in provided object is not callable" % method_name)
return exists
open_exists = doesMethodExist()
close_exists = doesMethodExist()
def func_with_open_process_close(partition_id, iterator):
epoch_id = TaskContext.get().getLocalProperty()
if epoch_id:
epoch_id = int(epoch_id)
else:
raise Exception("Could not get batch id from TaskContext")
should_process = True
if open_exists:
should_process = f.open(partition_id, epoch_id)
error = None
try:
if should_process:
for x in iterator:
f.process(x)
except Exception as ex:
error = ex
finally:
if close_exists:
f.close(error)
if error:
raise error
return iter([])
func = func_with_open_process_close
serializer = AutoBatchedSerializer(PickleSerializer())
wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer)
jForeachWriter = \
self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter(
wrapped_func, self._df._jdf.schema())
self._jwrite.foreach(jForeachWriter)
return self | Sets the output of the streaming query to be processed using the provided writer ``f``.
This is often used to write the output of a streaming query to arbitrary storage systems.
The processing logic can be specified in two ways.
#. A **function** that takes a row as input.
This is a simple way to express your processing logic. Note that this does
not allow you to deduplicate generated data when failures cause reprocessing of
some input data. That would require you to specify the processing logic in the next
way.
#. An **object** with a ``process`` method and optional ``open`` and ``close`` methods.
The object can have the following methods.
* ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing
(for example, open a connection, start a transaction, etc). Additionally, you can
use the `partition_id` and `epoch_id` to deduplicate regenerated data
(discussed later).
* ``process(row)``: *Non-optional* method that processes each :class:`Row`.
* ``close(error)``: *Optional* method that finalizes and cleans up (for example,
close connection, commit transaction, etc.) after all rows have been processed.
The object will be used by Spark in the following way.
* A single copy of this object is responsible of all the data generated by a
single task in a query. In other words, one instance is responsible for
processing one partition of the data generated in a distributed manner.
* This object must be serializable because each task will get a fresh
serialized-deserialized copy of the provided object. Hence, it is strongly
recommended that any initialization for writing data (e.g. opening a
connection or starting a transaction) is done after the `open(...)`
method has been called, which signifies that the task is ready to generate data.
* The lifecycle of the methods are as follows.
For each partition with ``partition_id``:
... For each batch/epoch of streaming data with ``epoch_id``:
....... Method ``open(partitionId, epochId)`` is called.
....... If ``open(...)`` returns true, for each row in the partition and
batch/epoch, method ``process(row)`` is called.
....... Method ``close(errorOrNull)`` is called with error (if any) seen while
processing rows.
Important points to note:
* The `partitionId` and `epochId` can be used to deduplicate generated data when
failures cause reprocessing of some input data. This depends on the execution
mode of the query. If the streaming query is being executed in the micro-batch
mode, then every partition represented by a unique tuple (partition_id, epoch_id)
is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used
to deduplicate and/or transactionally commit data and achieve exactly-once
guarantees. However, if the streaming query is being executed in the continuous
mode, then this guarantee does not hold and therefore should not be used for
deduplication.
* The ``close()`` method (if exists) will be called if `open()` method exists and
returns successfully (irrespective of the return value), except if the Python
crashes in the middle.
.. note:: Evolving.
>>> # Print every row using a function
>>> def print_row(row):
... print(row)
...
>>> writer = sdf.writeStream.foreach(print_row)
>>> # Print every row using a object with process() method
>>> class RowPrinter:
... def open(self, partition_id, epoch_id):
... print("Opened %d, %d" % (partition_id, epoch_id))
... return True
... def process(self, row):
... print(row)
... def close(self, error):
... print("Closed with error: %s" % str(error))
...
>>> writer = sdf.writeStream.foreach(RowPrinter()) |
4,861 | def mu(self):
mu = self._models[0].mu
assert all([mu == model.mu for model in self._models])
return mu | See docs for `Model` abstract base class. |
4,862 | def set_url_part(url, **kwargs):
d = parse_url_to_dict(url)
d.update(kwargs)
return unparse_url_dict(d) | Change one or more parts of a URL |
4,863 | def add_oxidation_state_by_site_fraction(structure, oxidation_states):
try:
for i, site in enumerate(structure):
new_sp = collections.defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site
.species)):
specie = Specie(el.symbol, oxidation_states[i][j])
new_sp[specie] += occu
structure[i] = new_sp
return structure
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the list.") | Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]] |
4,864 | def clear_cached_values(self):
self._prof_interp = None
self._prof_y = None
self._prof_z = None
self._marg_interp = None
self._marg_z = None
self._post = None
self._post_interp = None
self._interp = None
self._ret_type = None | Removes all of the cached values and interpolators |
4,865 | def filter_data(data, kernel, mode=, fill_value=0.0,
check_normalization=False):
from scipy import ndimage
if kernel is not None:
if isinstance(kernel, Kernel2D):
kernel_array = kernel.array
else:
kernel_array = kernel
if check_normalization:
if not np.allclose(np.sum(kernel_array), 1.0):
warnings.warn(,
AstropyUserWarning)
return ndimage.convolve(data.astype(float), kernel_array, mode=mode,
cval=fill_value)
else:
return data | Convolve a 2D image with a 2D kernel.
The kernel may either be a 2D `~numpy.ndarray` or a
`~astropy.convolution.Kernel2D` object.
Parameters
----------
data : array_like
The 2D array of the image.
kernel : array-like (2D) or `~astropy.convolution.Kernel2D`
The 2D kernel used to filter the input ``data``. Filtering the
``data`` will smooth the noise and maximize detectability of
objects with a shape similar to the kernel.
mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` determines how the array borders are handled. For
the ``'constant'`` mode, values outside the array borders are
set to ``fill_value``. The default is ``'constant'``.
fill_value : scalar, optional
Value to fill data values beyond the array borders if ``mode``
is ``'constant'``. The default is ``0.0``.
check_normalization : bool, optional
If `True` then a warning will be issued if the kernel is not
normalized to 1. |
4,866 | def register_surrogateescape():
if six.PY3:
return
try:
codecs.lookup_error(FS_ERRORS)
except LookupError:
codecs.register_error(FS_ERRORS, surrogateescape_handler) | Registers the surrogateescape error handler on Python 2 (only) |
4,867 | def get_clients(self, limit=None, offset=None):
data = {}
if limit:
data[] = limit
if offset:
data[] = offset
result = self._request(, , data=json.dumps(data))
return result.json() | Returns a list of clients. |
4,868 | def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command(, name, *args) | Returns a list of values ordered identically to ``keys`` |
4,869 | def p_subidentifiers(self, p):
n = len(p)
if n == 3:
p[0] = p[1] + [p[2]]
elif n == 2:
p[0] = [p[1]] | subidentifiers : subidentifiers subidentifier
| subidentifier |
4,870 | def run(path, code=None, params=None, ignore=None, select=None, **meta):
complexity = params.get(, 10)
no_assert = params.get(, False)
show_closures = params.get(, False)
visitor = ComplexityVisitor.from_code(code, no_assert=no_assert)
blocks = visitor.blocks
if show_closures:
blocks = add_inner_blocks(blocks)
return [
{: block.lineno, : block.col_offset, : , : ,
: % (block.name, block.complexity)}
for block in visitor.blocks if block.complexity > complexity
] | Check code with Radon.
:return list: List of errors. |
4,871 | def obj_to_json(self, file_path=None, indent=2, sort_keys=False,
quote_numbers=True):
data = [row.obj_to_ordered_dict(self.columns) for row in self]
if not quote_numbers:
for row in data:
for k, v in row.items():
if isinstance(v, (bool, int, float)):
row[k] = str(row[k])
ret = json.dumps(data, indent=indent, sort_keys=sort_keys)
if sys.version_info[0] == 2:
ret = ret.replace(, )
self._save_file(file_path, ret)
return ret | This will return a str of a json list.
:param file_path: path to data file, defaults to
self's contents if left alone
:param indent: int if set to 2 will indent to spaces and include
line breaks.
:param sort_keys: sorts columns as oppose to column order.
:param quote_numbers: bool if True will quote numbers that are strings
:return: string representing the grid formation
of the relevant data |
4,872 | def hr_size(num, suffix=) -> str:
for unit in :
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit if unit != else , suffix)
num /= 1024.0
return "%.1f%s%s" % (num, , suffix) | Human-readable data size
From https://stackoverflow.com/a/1094933
:param num: number of bytes
:param suffix: Optional size specifier
:return: Formatted string |
4,873 | def output(self, stream, disabletransferencoding = None):
if self._sendHeaders:
raise HttpProtocolException()
self.outputstream = stream
try:
content_length = len(stream)
except Exception:
pass
else:
self.header(b, str(content_length).encode())
if disabletransferencoding is not None:
self.disabledeflate = disabletransferencoding
self._startResponse() | Set output stream and send response immediately |
4,874 | def encrypt(self, plaintext):
if not isinstance(plaintext, int):
raise ValueError()
if not self.in_range.contains(plaintext):
raise OutOfRangeError()
return self.encrypt_recursive(plaintext, self.in_range, self.out_range) | Encrypt the given plaintext value |
4,875 | def add_access_policy_filter(request, query, column_name):
q = d1_gmn.app.models.Subject.objects.filter(
subject__in=request.all_subjects_set
).values()
filter_arg = .format(column_name)
return query.filter(**{filter_arg: q}) | Filter records that do not have ``read`` or better access for one or more of the
active subjects.
Since ``read`` is the lowest access level that a subject can have, this method only
has to filter on the presence of the subject. |
4,876 | def xml_endtag (self, name):
self.level -= 1
assert self.level >= 0
self.write(self.indent*self.level)
self.writeln(u"</%s>" % xmlquote(name)) | Write XML end tag. |
4,877 | def get_angle(self, verify = False):
LSB = self.bus.read_byte_data(self.address, self.angle_LSB)
MSB = self.bus.read_byte_data(self.address, self.angle_MSB)
DATA = (MSB << 6) + LSB
if not verify:
return (360.0 / 2**14) * DATA
else:
status = self.get_diagnostics()
if not (status[]) and not(status[]) and not(status[]):
return (360.0 / 2**14) * DATA
else:
return None | Retuns measured angle in degrees in range 0-360. |
4,878 | def interconnect_link_topologies(self):
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies | Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies: |
4,879 | def DiscreteUniform(n=10,LB=1,UB=99,B=100):
B = 100
s = [0]*n
for i in range(n):
s[i] = random.randint(LB,UB)
return s,B | DiscreteUniform: create random, uniform instance for the bin packing problem. |
4,880 | def iflatten(seq, isSeq=isSeq):
r
for elt in seq:
if isSeq(elt):
for x in iflatten(elt, isSeq):
yield x
else:
yield elt | r"""Like `flatten` but lazy. |
4,881 | def _mark_started(self):
log = self._params.get(, self._discard)
now = time.time()
self._started = now
limit = self._config_running.get()
try:
limit = float(_fmt_context(self._get(limit, default=), self._context))
if limit > 0:
log.debug("Applying task time limit of %s", self._name, deltafmt(limit))
self._limit = now + limit
except Exception as e:
log.warn("Task time_limit value invalid -- %s",
self._name, limit, e, exc_info=log.isEnabledFor(logging.DEBUG)) | Set the state information for a task once it has completely started.
In particular, the time limit is applied as of this time (ie after
and start delay has been taking. |
4,882 | def get(key, value=None, conf_file=_DEFAULT_CONF):
**
current_conf = _parse_conf(conf_file)
stanza = current_conf.get(key, False)
if value:
if stanza:
return stanza.get(value, False)
_LOG.warning("Block not present or empty.", key)
return stanza | Get the value for a specific configuration line.
:param str key: The command or stanza block to configure.
:param str value: The command value or command of the block specified by the key parameter.
:param str conf_file: The logrotate configuration file.
:return: The value for a specific configuration line.
:rtype: bool|int|str
CLI Example:
.. code-block:: bash
salt '*' logrotate.get rotate
salt '*' logrotate.get /var/log/wtmp rotate /etc/logrotate.conf |
4,883 | def parse_string(self, timestr, subfmts):
components = (, , )
defaults = (None, 1, 1, 0)
try:
idot = timestr.rindex()
except:
fracday = 0.0
else:
timestr, fracday = timestr[:idot], timestr[idot:]
fracday = float(fracday)
for _, strptime_fmt_or_regex, _ in subfmts:
vals = []
if isinstance(strptime_fmt_or_regex, six.string_types):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
tm.tm_hour += int(24 * fracday)
tm.tm_min += int(60 * (24 * fracday - tm.tm_hour))
tm.tm_sec += 60 * (60 * (24 * fracday - tm.tm_hour) - tm.tm_min)
except ValueError as ex:
print ex
continue
else:
vals = [getattr(tm, + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in six.moves.zip(components, defaults)]
hrprt = int(24 * fracday)
vals.append(hrprt)
mnprt = int(60 * (24 * fracday - hrprt))
vals.append(mnprt)
scprt = 60 * (60 * (24 * fracday - hrprt) - mnprt)
vals.append(scprt)
return vals
else:
raise ValueError(
.format(timestr, self.name)) | Read time from a single string, using a set of possible formats. |
4,884 | def classify_tangent_intersection(
intersection, nodes1, tangent1, nodes2, tangent2
):
dot_prod = np.vdot(tangent1[:, 0], tangent2[:, 0])
curvature1 = _curve_helpers.get_curvature(nodes1, tangent1, intersection.s)
curvature2 = _curve_helpers.get_curvature(nodes2, tangent2, intersection.t)
if dot_prod < 0:
sign1, sign2 = _SIGN([curvature1, curvature2])
if sign1 == sign2:
if sign1 == 1.0:
return CLASSIFICATION_T.OPPOSED
else:
return CLASSIFICATION_T.TANGENT_BOTH
else:
delta_c = abs(curvature1) - abs(curvature2)
if delta_c == 0.0:
raise NotImplementedError(_SAME_CURVATURE)
elif sign1 == _SIGN(delta_c):
return CLASSIFICATION_T.OPPOSED
else:
return CLASSIFICATION_T.TANGENT_BOTH
else:
if curvature1 > curvature2:
return CLASSIFICATION_T.TANGENT_FIRST
elif curvature1 < curvature2:
return CLASSIFICATION_T.TANGENT_SECOND
else:
raise NotImplementedError(_SAME_CURVATURE) | Helper for func:`classify_intersection` at tangencies.
.. note::
This is a helper used only by :func:`classify_intersection`.
Args:
intersection (.Intersection): An intersection object.
nodes1 (numpy.ndarray): Control points for the first curve at
the intersection.
tangent1 (numpy.ndarray): The tangent vector to the first curve
at the intersection (``2 x 1`` array).
nodes2 (numpy.ndarray): Control points for the second curve at
the intersection.
tangent2 (numpy.ndarray): The tangent vector to the second curve
at the intersection (``2 x 1`` array).
Returns:
IntersectionClassification: The "inside" curve type, based on
the classification enum. Will either be ``opposed`` or one
of the ``tangent`` values.
Raises:
NotImplementedError: If the curves are tangent at the intersection
and have the same curvature. |
4,885 | def get_flight_rules(vis: Number, ceiling: Cloud) -> int:
if not vis:
return 2
if vis.repr == or vis.repr.startswith():
vis = 10
elif vis.repr.startswith():
vis = 0
elif len(vis.repr) == 4:
vis = vis.value * 0.000621371
else:
vis = vis.value
cld = ceiling.altitude if ceiling else 99
if (vis <= 5) or (cld <= 30):
if (vis < 3) or (cld < 10):
if (vis < 1) or (cld < 5):
return 3
return 2
return 1
return 0 | Returns int based on current flight rules from parsed METAR data
0=VFR, 1=MVFR, 2=IFR, 3=LIFR
Note: Common practice is to report IFR if visibility unavailable |
4,886 | def calc_avr_uvr_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= (con.hm+der.hv[i]):
flu.avr[i] = 0.
flu.uvr[i] = 0.
else:
flu.avr[i] = (flu.h-(con.hm+der.hv[i]))**2*con.bnvr[i]/2.
flu.uvr[i] = (flu.h-(con.hm+der.hv[i]))*(1.+con.bnvr[i]**2)**.5 | Calculate the flown through area and the wetted perimeter of both
outer embankments.
Note that each outer embankment lies beyond its foreland and that all
water flowing exactly above the a embankment is added to |AVR|.
The theoretical surface seperating water above the foreland from water
above its embankment is not contributing to |UVR|.
Required control parameters:
|HM|
|BNVR|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AVR|
|UVR|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
Generally, a triangles are assumed, with the vertical side
seperating the foreland from its outer embankment. Here, for
simplicity, both forelands are assumed to be symmetrical. Their
inclinations are 1 meter per 4 meters and their lowest point is
1 meter above the forelands bottom and 2 meters above the main
channels bottom:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bnvr(4.0)
>>> derived.hv(1.0)
The first example deals with moderate high flow conditions, where
water flows over the forelands, but not over their outer embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.0, 0.0)
>>> fluxes.uvr
uvr(0.0, 0.0)
The second example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.5, 0.5)
>>> fluxes.uvr
uvr(2.061553, 2.061553) |
4,887 | def energy_ratio_by_chunks(x, param):
res_data = []
res_index = []
full_series_energy = np.sum(x ** 2)
for parameter_combination in param:
num_segments = parameter_combination["num_segments"]
segment_focus = parameter_combination["segment_focus"]
assert segment_focus < num_segments
assert num_segments > 0
res_data.append(np.sum(np.array_split(x, num_segments)[segment_focus] ** 2.0)/full_series_energy)
res_index.append("num_segments_{}__segment_focus_{}".format(num_segments, segment_focus))
return list(zip(res_index, res_data)) | Calculates the sum of squares of chunk i out of N chunks expressed as a ratio with the sum of squares over the whole
series.
Takes as input parameters the number num_segments of segments to divide the series into and segment_focus
which is the segment number (starting at zero) to return a feature on.
If the length of the time series is not a multiple of the number of segments, the remaining data points are
distributed on the bins starting from the first. For example, if your time series consists of 8 entries, the
first two bins will contain 3 and the last two values, e.g. `[ 0., 1., 2.], [ 3., 4., 5.]` and `[ 6., 7.]`.
Note that the answer for `num_segments = 1` is a trivial "1" but we handle this scenario
in case somebody calls it. Sum of the ratios should be 1.0.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"num_segments": N, "segment_focus": i} with N, i both ints
:return: the feature values
:return type: list of tuples (index, data) |
4,888 | def connect(self, port=None, baud_rate=115200):
if isinstance(port, types.StringTypes):
ports = [port]
else:
ports = port
if not ports:
ports = serial_ports().index.tolist()
if not ports:
raise IOError("Arduino Mega2560 not found on any port.")
for comport_i in ports:
if self.connected():
self.disconnect()
self.port = None
self._i2c_devices = {}
try:
logger.debug(, comport_i)
Base.connect(self, str(comport_i), baud_rate)
self.port = comport_i
break
except BadVGND, exception:
logger.warning(exception)
break
except RuntimeError, exception:
continue
else:
raise RuntimeError(
% ports)
name = self.name()
version = self.hardware_version()
firmware = self.software_version()
serial_number_string = ""
try:
serial_number_string = ", S/N %03d" % self.serial_number
except:
pass
logger.info("Connected to %s v%s (Firmware: %s%s)" %
(name, version, firmware, serial_number_string))
logger.info("Poll control board for series resistors and "
"capacitance values.")
self._read_calibration_data()
try:
self.__aref__ = self._aref()
logger.info("Analog reference = %.2f V" % self.__aref__)
except:
pass
expected = 2 ** 10/2
v = {}
channels = [0, 1]
damaged = []
for channel in channels:
try:
v[channel] = np.mean(self.analog_reads(channel, 10))
logger.info("A%d VGND = %.2f V (%.2f%% of Aref)", channel,
self.__aref__ * v[channel] / (2 ** 10), 100.0 *
v[channel] / (2 ** 10))
if np.abs(v[channel] - expected) / expected > .1:
damaged.append(channel)
except:
break
self._i2c_scan()
if damaged:
if len(damaged) == 1:
msg = "Analog channel %d appears" % damaged[0]
else:
msg = "Analog channels %s appear" % damaged
raise BadVGND(msg + " to be damaged. You may need to replace the "
"op-amp on the control board.")
return self.RETURN_OK | Parameters
----------
port : str or list-like, optional
Port (or list of ports) to try to connect to as a DMF Control
Board.
baud_rate : int, optional
Returns
-------
str
Port DMF control board was connected on.
Raises
------
RuntimeError
If connection could not be established.
IOError
If no ports were specified and Arduino Mega2560 not found on any
port. |
4,889 | def standard_parsing_functions(Block, Tx):
def stream_block(f, block):
assert isinstance(block, Block)
block.stream(f)
def stream_blockheader(f, blockheader):
assert isinstance(blockheader, Block)
blockheader.stream_header(f)
def stream_tx(f, tx):
assert isinstance(tx, Tx)
tx.stream(f)
def parse_int_6(f):
b = f.read(6) + b
return struct.unpack(b, "<L")[0]
def stream_int_6(f, v):
f.write(struct.pack(v, "<L")[:6])
more_parsing = [
("A", (PeerAddress.parse, lambda f, peer_addr: peer_addr.stream(f))),
("v", (InvItem.parse, lambda f, inv_item: inv_item.stream(f))),
("T", (Tx.parse, stream_tx)),
("B", (Block.parse, stream_block)),
("z", (Block.parse_as_header, stream_blockheader)),
("1", (lambda f: struct.unpack("B", f.read(1))[0], lambda f, v: f.write(struct.pack("B", v)))),
("6", (parse_int_6, stream_int_6)),
("O", (lambda f: True if f.read(1) else False,
lambda f, v: f.write(b if v is None else struct.pack("B", v)))),
]
all_items = list(STREAMER_FUNCTIONS.items())
all_items.extend(more_parsing)
return all_items | Return the standard parsing functions for a given Block and Tx class.
The return value is expected to be used with the standard_streamer function. |
4,890 | def save(self, index=None, force=False):
editorstack = self.get_current_editorstack()
return editorstack.save(index=index, force=force) | Save file |
4,891 | def pverb(self, *args, **kwargs):
if not self.verbose:
return
self.pstd(*args, **kwargs) | Console verbose message to STDOUT |
4,892 | def createPREMISEventXML(eventType, agentIdentifier, eventDetail, eventOutcome,
outcomeDetail=None, eventIdentifier=None,
linkObjectList=[], eventDate=None):
eventXML = etree.Element(PREMIS + "event", nsmap=PREMIS_NSMAP)
eventIDXML = etree.SubElement(eventXML, PREMIS + "eventIdentifier")
eventTypeXML = etree.SubElement(eventXML, PREMIS + "eventType")
eventTypeXML.text = eventType
eventIDTypeXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierType"
)
eventIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/
eventIDValueXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierValue"
)
if eventIdentifier:
eventIDValueXML.text = eventIdentifier
else:
eventIDValueXML.text = uuid.uuid4().hex
eventDateTimeXML = etree.SubElement(eventXML, PREMIS + "eventDateTime")
if eventDate is None:
eventDateTimeXML.text = xsDateTime_format(datetime.utcnow())
else:
eventDateTimeXML.text = xsDateTime_format(eventDate)
eventDetailXML = etree.SubElement(eventXML, PREMIS + "eventDetail")
eventDetailXML.text = eventDetail
eventOutcomeInfoXML = etree.SubElement(
eventXML, PREMIS + "eventOutcomeInformation"
)
eventOutcomeXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcome"
)
eventOutcomeXML.text = eventOutcome
if outcomeDetail:
eventOutcomeDetailXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcomeDetail"
)
eventOutcomeDetailNoteXML = etree.SubElement(
eventOutcomeDetailXML, PREMIS + "eventOutcomeDetailNote"
)
eventOutcomeDetailNoteXML.text = outcomeDetail
linkAgentIDXML = etree.SubElement(
eventXML, PREMIS + "linkingAgentIdentifier")
linkAgentIDTypeXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierType"
)
linkAgentIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/
linkAgentIDValueXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierValue"
)
linkAgentIDValueXML.text = agentIdentifier
linkAgentIDRoleXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentRole"
)
linkAgentIDRoleXML.text = \
"http://purl.org/net/untl/vocabularies/linkingAgentRoles/
for linkObject in linkObjectList:
linkObjectIDXML = etree.SubElement(
eventXML, PREMIS + "linkingObjectIdentifier"
)
linkObjectIDTypeXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierType"
)
linkObjectIDTypeXML.text = linkObject[1]
linkObjectIDValueXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierValue"
)
linkObjectIDValueXML.text = linkObject[0]
if linkObject[2]:
linkObjectRoleXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectRole"
)
linkObjectRoleXML.text = linkObject[2]
return eventXML | Actually create our PREMIS Event XML |
4,893 | def get_instance(self, payload):
return NotificationInstance(self._version, payload, account_sid=self._solution[], ) | Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.notification.NotificationInstance
:rtype: twilio.rest.api.v2010.account.notification.NotificationInstance |
4,894 | def _update_port_locations(self, initial_coordinates):
particles = list(self.particles())
for port in self.all_ports():
if port.anchor:
idx = particles.index(port.anchor)
shift = particles[idx].pos - initial_coordinates[idx]
port.translate(shift) | Adjust port locations after particles have moved
Compares the locations of Particles between 'self' and an array of
reference coordinates. Shifts Ports in accordance with how far anchors
have been moved. This conserves the location of Ports with respect to
their anchor Particles, but does not conserve the orientation of Ports
with respect to the molecule as a whole.
Parameters
----------
initial_coordinates : np.ndarray, shape=(n, 3), dtype=float
Reference coordinates to use for comparing how far anchor Particles
have shifted. |
4,895 | def remove_outcome_hook(self, outcome_id):
for transition_id in list(self.transitions.keys()):
transition = self.transitions[transition_id]
if transition.to_outcome == outcome_id and transition.to_state == self.state_id:
self.remove_transition(transition_id) | Removes internal transition going to the outcome |
4,896 | def _metric_when_multiplied_with_sig_vec(self, sig):
return dot((self.B * self.D**-1.).T * sig, self.B * self.D) | return D^-1 B^T diag(sig) B D as a measure for
C^-1/2 diag(sig) C^1/2
:param sig: a vector "used" as diagonal matrix
:return: |
4,897 | def collect_modules(self):
try:
res = {}
m = sys.modules
for k in m:
continue
if m[k]:
try:
d = m[k].__dict__
if "version" in d and d["version"]:
res[k] = self.jsonable(d["version"])
elif "__version__" in d and d["__version__"]:
res[k] = self.jsonable(d["__version__"])
else:
res[k] = get_distribution(k).version
except DistributionNotFound:
pass
except Exception:
logger.debug("collect_modules: could not process module: %s" % k)
except Exception:
logger.debug("collect_modules", exc_info=True)
else:
return res | Collect up the list of modules in use |
4,898 | def cross_validate(self, ax):
cdpp_opt = self.get_cdpp_arr()
for b, brkpt in enumerate(self.breakpoints):
log.info("Cross-validating chunk %d/%d..." %
(b + 1, len(self.breakpoints)))
m = self.get_masked_chunk(b)
time = self.time[m]
flux = self.fraw[m]
ferr = self.fraw_err[m]
med = np.nanmedian(self.fraw)
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(time, ferr)
masks = list(Chunks(np.arange(0, len(time)),
len(time) // self.cdivs))
pre_v = [self.cv_precompute(mask, b) for mask in masks]
log_lam_opt = np.log10(self.lam[b])
scatter_opt = self.validation_scatter(
log_lam_opt, b, masks, pre_v, gp, flux, time, med)
log.info("Iter 0/%d: " % (self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
for p in range(self.piter):
log_lam = np.array(
np.log10(self.lam[b])) * \
(1 + self.ppert * np.random.randn(len(self.lam[b])))
scatter = self.validation_scatter(
log_lam, b, masks, pre_v, gp, flux, time, med)
log.info("Initializing at: " +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
log_lam, scatter, _, _, _, _ = \
fmin_powell(self.validation_scatter, log_lam,
args=(b, masks, pre_v, gp, flux, time, med),
maxfun=self.pmaxf, disp=False,
full_output=True)
tmp = np.array(self.lam[b])
self.lam[b] = 10 ** log_lam
self.compute()
cdpp = self.get_cdpp_arr()[b]
self.lam[b] = tmp
if cdpp < cdpp_opt[b]:
cdpp_opt[b] = cdpp
log_lam_opt = log_lam
log.info("Iter %d/%d: " % (p + 1, self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
log.info("Found minimum: logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
self.lam[b] = 10 ** log_lam_opt
ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1)) | Performs the cross-validation step. |
4,899 | def get_products(self):
products = set()
for _, _, _, react, _ in self.get_kinks():
products = products.union(set([k.reduced_formula
for k in react.products]))
return list(products) | List of formulas of potential products. E.g., ['Li','O2','Mn']. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.