_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q2900
|
Asset.new_withdraw_ong_transaction
|
train
|
def new_withdraw_ong_transaction(self, b58_claimer_address: str, b58_recv_address: str, amount: int,
b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction:
"""
This interface is used to generate a Transaction object that
allow one account to withdraw an amount of ong and transfer them to receive address.
:param b58_claimer_address: a base58 encode address which is used to indicate who is the claimer.
:param b58_recv_address: a base58 encode address which is used to indicate who receive the claimed ong.
:param amount: the amount of asset that will be claimed.
:param b58_payer_address: a base58 encode address which indicate who will pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: a Transaction object which can be used for withdraw ong.
"""
if not isinstance(b58_claimer_address, str) or not isinstance(b58_recv_address, str) or not isinstance(
b58_payer_address, str):
raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.'))
if len(b58_claimer_address) != 34 or len(b58_recv_address) != 34 or len(b58_payer_address) != 34:
raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.'))
if amount <= 0:
raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.'))
|
python
|
{
"resource": ""
}
|
q2901
|
Asset.transfer
|
train
|
def transfer(self, asset: str, from_acct: Account, b58_to_address: str, amount: int, payer: Account,
gas_limit: int, gas_price: int):
"""
This interface is used to send a transfer transaction that only for ONT or ONG.
:param asset: a string which is used to indicate which asset we want to transfer.
:param from_acct: a Account object which indicate where the asset from.
:param b58_to_address: a base58 encode address which indicate where the asset to.
:param amount: the amount of asset that will be transferred.
:param payer: a Account object which indicate who will pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
|
python
|
{
"resource": ""
}
|
q2902
|
Asset.withdraw_ong
|
train
|
def withdraw_ong(self, claimer: Account, b58_recv_address: str, amount: int, payer: Account,
gas_limit: int, gas_price: int) -> str:
"""
This interface is used to withdraw a amount of ong and transfer them to receive address.
:param claimer: the owner of ong that remained to claim.
:param b58_recv_address: the address that received the ong.
:param amount: the amount of ong want to claim.
:param payer: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: hexadecimal transaction hash value.
"""
if claimer is None:
raise SDKException(ErrorCode.param_err('the claimer should not be None.'))
if payer is None:
raise SDKException(ErrorCode.param_err('the payer should not be None.'))
if amount <= 0:
raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.'))
if gas_price < 0:
raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.'))
|
python
|
{
"resource": ""
}
|
q2903
|
Asset.approve
|
train
|
def approve(self, asset, sender: Account, b58_recv_address: str, amount: int, payer: Account, gas_limit: int,
gas_price: int) -> str:
"""
This is an interface used to send an approve transaction
which allow receiver to spend a amount of ONT or ONG asset in sender's account.
:param asset: a string which is used to indicate what asset we want to approve.
:param sender: an Account class that send the approve transaction.
:param b58_recv_address: a base58 encode address which indicate where the approve to.
:param amount: the amount of asset want to approve.
:param payer: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: hexadecimal transaction hash value.
"""
if sender is None:
raise SDKException(ErrorCode.param_err('the sender should not be None.'))
if payer is None:
raise SDKException(ErrorCode.param_err('the payer should not be None.'))
if amount <= 0:
raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.'))
if gas_price < 0:
|
python
|
{
"resource": ""
}
|
q2904
|
WalletData.remove_account
|
train
|
def remove_account(self, address: str):
"""
This interface is used to remove account from WalletData.
:param address: a string address.
|
python
|
{
"resource": ""
}
|
q2905
|
WalletData.set_default_account_by_index
|
train
|
def set_default_account_by_index(self, index: int):
"""
This interface is used to set default account by given index.
:param index: an int value that indicate the account object in account list.
"""
if index >= len(self.accounts):
raise SDKException(ErrorCode.param_error)
|
python
|
{
"resource": ""
}
|
q2906
|
WalletData.set_default_account_by_address
|
train
|
def set_default_account_by_address(self, b58_address: str):
"""
This interface is used to set default account by given base58 encode address.
:param b58_address: a base58 encode address.
"""
flag = True
index = -1
for acct in self.accounts:
index += 1
if acct.b58_address == b58_address:
flag = False
break
|
python
|
{
"resource": ""
}
|
q2907
|
WalletData.set_default_identity_by_index
|
train
|
def set_default_identity_by_index(self, index: int):
"""
This interface is used to set default account by given an index value.
:param index: an int value that indicate the position of an account object in account list.
"""
identities_len = len(self.identities)
if index >= identities_len:
|
python
|
{
"resource": ""
}
|
q2908
|
read_gtf
|
train
|
def read_gtf(
filepath_or_buffer,
expand_attribute_column=True,
infer_biotype_column=False,
column_converters={},
usecols=None,
features=None,
chunksize=1024 * 1024):
"""
Parse a GTF into a dictionary mapping column names to sequences of values.
Parameters
----------
filepath_or_buffer : str or buffer object
Path to GTF file (may be gzip compressed) or buffer object
such as StringIO
expand_attribute_column : bool
Replace strings of semi-colon separated key-value values in the
'attribute' column with one column per distinct key, with a list of
values for each row (using None for rows where key didn't occur).
infer_biotype_column : bool
Due to the annoying ambiguity of the second GTF column across multiple
Ensembl releases, figure out if an older GTF's source column is actually
the gene_biotype or transcript_biotype.
column_converters : dict, optional
Dictionary mapping column names to conversion functions. Will replace
empty strings with None and otherwise passes them to given conversion
function.
usecols : list of str or None
Restrict which columns are loaded to the give set. If None, then
load all columns.
features : set of str or None
Drop rows which aren't one of the features in the supplied set
|
python
|
{
"resource": ""
}
|
q2909
|
create_missing_features
|
train
|
def create_missing_features(
dataframe,
unique_keys={},
extra_columns={},
missing_value=None):
"""
Helper function used to construct a missing feature such as 'transcript'
or 'gene'. Some GTF files only have 'exon' and 'CDS' entries, but have
transcript_id and gene_id annotations which allow us to construct those
missing features.
Parameters
----------
dataframe : pandas.DataFrame
Should contain at least the core GTF columns, such as "seqname",
"start", and "end"
unique_keys : dict
Mapping from feature names to the name of the column which should
act as a unique key for that feature. Example: {"gene": "gene_id"}
extra_columns : dict
By default the constructed feature row will include only the 8
core columns and its unique key. Any other columns that should
be included should be associated with the feature name in this
dict.
missing_value : any
Which value to fill in for columns that we don't infer values for.
Returns original dataframe along with all extra rows created for missing
features.
"""
extra_dataframes = []
existing_features = set(dataframe["feature"])
existing_columns = set(dataframe.keys())
for (feature_name, groupby_key) in unique_keys.items():
if feature_name in existing_features:
logging.info(
"Feature '%s' already exists in GTF data" % feature_name)
continue
logging.info("Creating rows for missing feature '%s'" % feature_name)
# don't include rows where the groupby key was missing
empty_key_values = dataframe[groupby_key].map(
lambda x: x == "" or x is None)
row_groups = dataframe[~empty_key_values].groupby(groupby_key)
# Each group corresponds to a unique feature entry for which the
# other columns may or may not be uniquely defined. Start off by
# assuming the values for every column are missing and fill them in
# where possible.
feature_values = OrderedDict([
(column_name, [missing_value] * row_groups.ngroups)
for column_name in dataframe.keys()
])
# User specifies which non-required columns should we try to infer
# values for
feature_columns = list(extra_columns.get(feature_name, []))
for i, (feature_id, group) in enumerate(row_groups):
# fill in the required columns by assuming that this feature
# is the union of all intervals of other features that were
# tagged with its unique ID (e.g. union of exons which had a
|
python
|
{
"resource": ""
}
|
q2910
|
SyncClient.get_action
|
train
|
def get_action(self, key):
"""
returns the action to perform on this key based on its
state before the last sync.
"""
index_local_timestamp = self.get_index_local_timestamp(key)
real_local_timestamp = self.get_real_local_timestamp(key)
|
python
|
{
"resource": ""
}
|
q2911
|
LocalSyncClient.lock
|
train
|
def lock(self, timeout=10):
"""
Advisory lock.
Use to ensure that only one LocalSyncClient is working on the Target at the same time.
"""
logger.debug("Locking %s", self.lock_file)
if not os.path.exists(self.lock_file):
|
python
|
{
"resource": ""
}
|
q2912
|
LocalSyncClient.unlock
|
train
|
def unlock(self):
"""
Unlock the active advisory lock.
"""
logger.debug("Releasing lock %s", self.lock_file)
self._lock.release()
|
python
|
{
"resource": ""
}
|
q2913
|
ProxyMetaClass._no_proxy
|
train
|
def _no_proxy(method):
"""
Returns a wrapped version of `method`, such that proxying is turned off
during the method call.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
notproxied = _oga(self, "__notproxied__")
_osa(self, "__notproxied__", True)
|
python
|
{
"resource": ""
}
|
q2914
|
Proxy._should_proxy
|
train
|
def _should_proxy(self, attr):
"""
Determines whether `attr` should be looked up on the proxied object, or
the proxy itself.
"""
if attr in type(self).__notproxied__:
|
python
|
{
"resource": ""
}
|
q2915
|
Proxy.add_proxy_meth
|
train
|
def add_proxy_meth(cls, name, func, arg_pos=0):
"""
Add a method `name` to the class, which returns the value of `func`,
called with the proxied value inserted at `arg_pos`
"""
@wraps(func)
def proxied(self, *args, **kwargs):
|
python
|
{
"resource": ""
}
|
q2916
|
load_uri
|
train
|
def load_uri(uri, base_uri=None, loader=None, jsonschema=False, load_on_repr=True):
"""
Load JSON data from ``uri`` with JSON references proxied to their referent
data.
:param uri: URI to fetch the JSON from
:param kwargs: This function takes any of the keyword arguments from
:meth:`JsonRef.replace_refs`
"""
if loader is None:
|
python
|
{
"resource": ""
}
|
q2917
|
JsonRef.resolve_pointer
|
train
|
def resolve_pointer(self, document, pointer):
"""
Resolve a json pointer ``pointer`` within the referenced ``document``.
:argument document: the referent document
:argument str pointer: a json pointer URI fragment to resolve within it
"""
# Do only split at single forward slashes which are not prefixed by a caret
parts = re.split(r"(?<!\^)/", unquote(pointer.lstrip("/"))) if pointer else []
for part in parts:
# Restore escaped slashes and carets
replacements = {r"^/": r"/", r"^^": r"^"}
part = re.sub(
"|".join(re.escape(key) for key in replacements.keys()),
lambda k: replacements[k.group(0)],
part,
)
if isinstance(document, Sequence):
|
python
|
{
"resource": ""
}
|
q2918
|
dump
|
train
|
def dump(obj, fp, container_count=False, sort_keys=False, no_float32=True, default=None):
"""Writes the given object as UBJSON to the provided file-like object
Args:
obj: The object to encode
fp: write([size])-able object
container_count (bool): Specify length for container types (including
for empty ones). This can aid decoding speed
depending on implementation but requires a bit
more space and encoding speed could be reduced
if getting length of any of the containers is
expensive.
sort_keys (bool): Sort keys of mappings
no_float32 (bool): Never use float32 to store float numbers (other than
for zero). Disabling this might save space at the
loss of precision.
default (callable): Called for objects which cannot be serialised.
Should return a UBJSON-encodable version of the
object or raise an EncoderException.
Raises:
EncoderException: If an encoding failure occured.
The following Python types and interfaces (ABCs) are supported (as are any
subclasses):
+------------------------------+-----------------------------------+
| Python | UBJSON |
+==============================+===================================+
| (3) str | string |
| (2) unicode | |
+------------------------------+-----------------------------------+
| None | null |
+------------------------------+-----------------------------------+
| bool | true, false |
+------------------------------+-----------------------------------+
| (3) int | uint8, int8, int16, int32, int64, |
| (2) int, long | high_precision |
+------------------------------+-----------------------------------+
| float | float32, float64, high_precision |
+------------------------------+-----------------------------------+
| Decimal | high_precision
|
python
|
{
"resource": ""
}
|
q2919
|
_resolve_version
|
train
|
def _resolve_version(version):
"""
Resolve LATEST version
"""
if version is not LATEST:
return version
resp = urlopen('https://pypi.python.org/pypi/setuptools/json')
with contextlib.closing(resp):
try:
charset = resp.info().get_content_charset()
except Exception:
|
python
|
{
"resource": ""
}
|
q2920
|
load
|
train
|
def load(fp, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False):
"""Decodes and returns UBJSON from the given file-like object
Args:
fp: read([size])-able object
no_bytes (bool): If set, typed UBJSON arrays (uint8) will not be
converted to a bytes instance and instead treated like
any other array (i.e. result in a list).
object_hook (callable): Called with the result of any object literal
decoded (instead of dict).
object_pairs_hook (callable): Called with the result of any object
literal decoded with an ordered list of
pairs (instead of dict). Takes precedence
over object_hook.
intern_object_keys (bool): If set, object keys are interned which can
provide a memory saving when many repeated
keys are used. NOTE: This is not supported
in Python2 (since interning does not apply
to unicode) and wil be ignored.
Returns:
Decoded object
Raises:
DecoderException: If an encoding failure occured.
UBJSON types are mapped to Python types as follows. Numbers in brackets
denote Python version.
+----------------------------------+---------------+
| UBJSON | Python |
|
python
|
{
"resource": ""
}
|
q2921
|
TSRequest.check_error_code
|
train
|
def check_error_code(self):
"""
For CredSSP version of 3 or newer, the server can response with an
NtStatus error code with details of what error occurred. This method
will check if the error code exists and throws an NTStatusException
if it is no STATUS_SUCCESS.
"""
# start off with STATUS_SUCCESS as a baseline
status = NtStatusCodes.STATUS_SUCCESS
error_code = self['errorCode']
if error_code.isValue:
# ASN.1 Integer
|
python
|
{
"resource": ""
}
|
q2922
|
GSSAPIContext.get_mechs_available
|
train
|
def get_mechs_available():
"""
Returns a list of auth mechanisms that are available to the local
GSSAPI instance. Because we are interacting with Windows, we only
care if SPNEGO, Kerberos and NTLM are available where NTLM is the
only wildcard that may not be available by default.
The only NTLM implementation that works properly is gss-ntlmssp and
part of this test is to verify the gss-ntlmssp OID
GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required
for SPNEGO and NTLM to work properly.
:return: list - A list of supported mechs available in the installed
version of GSSAPI
"""
ntlm_oid = GSSAPIContext._AUTH_MECHANISMS['ntlm']
ntlm_mech = gssapi.OID.from_int_seq(ntlm_oid)
# GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH
# github.com/simo5/gss-ntlmssp/blob/master/src/gssapi_ntlmssp.h#L68
reset_mech = gssapi.OID.from_int_seq("1.3.6.1.4.1.7165.655.1.3")
try:
# we don't actually care about the account used here so just use
# a random username and password
ntlm_context = GSSAPIContext._get_security_context(
gssapi.NameType.user,
ntlm_mech,
"http@server",
|
python
|
{
"resource": ""
}
|
q2923
|
CredSSPContext.wrap
|
train
|
def wrap(self, data):
"""
Encrypts the data in preparation for sending to the server. The data is
encrypted using the TLS channel negotiated between the client and the
server.
:param data: a byte string of data to encrypt
:return: a byte string of the encrypted data
"""
length = self.tls_connection.send(data)
encrypted_data = b''
counter = 0
|
python
|
{
"resource": ""
}
|
q2924
|
CredSSPContext.unwrap
|
train
|
def unwrap(self, encrypted_data):
"""
Decrypts the data send by the server using the TLS channel negotiated
between the client and the server.
:param encrypted_data: the byte string of the encrypted data
:return: a byte string of the decrypted data
"""
length = self.tls_connection.bio_write(encrypted_data)
data = b''
counter = 0
while True:
try:
|
python
|
{
"resource": ""
}
|
q2925
|
CredSSPContext._get_subject_public_key
|
train
|
def _get_subject_public_key(cert):
"""
Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo
field of the server's certificate. This is used in the server
verification steps to thwart MitM attacks.
:param cert: X509 certificate from pyOpenSSL .get_peer_certificate()
:return: byte string of the asn.1 DER encoded SubjectPublicKey field
"""
|
python
|
{
"resource": ""
}
|
q2926
|
_KindleCloudReaderBrowser._to_reader_home
|
train
|
def _to_reader_home(self):
"""Navigate to the Cloud Reader library page.
Raises:
BrowserError: If the KCR homepage could not be loaded.
ConnectionError: If there was a connection error.
"""
# NOTE: Prevents QueryInterface error caused by getting a URL
# while switched to an iframe
self.switch_to_default_content()
self.get(_KindleCloudReaderBrowser._CLOUD_READER_URL)
|
python
|
{
"resource": ""
}
|
q2927
|
_KindleCloudReaderBrowser._login
|
train
|
def _login(self, max_tries=2):
"""Logs in to Kindle Cloud Reader.
Args:
max_tries: The maximum number of login attempts that will be made.
Raises:
BrowserError: If method called when browser not at a signin URL.
LoginError: If login unsuccessful after `max_tries` attempts.
"""
if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL):
raise BrowserError(
'Current url "%s" is not a signin url ("%s")' %
(self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL))
email_field_loaded = lambda br: br.find_elements_by_id('ap_email')
self._wait().until(email_field_loaded)
tries = 0
while tries < max_tries:
# Enter the username
email_elem = self.find_element_by_id('ap_email')
email_elem.clear()
email_elem.send_keys(self._uname)
# Enter the password
pword_elem = self.find_element_by_id('ap_password')
|
python
|
{
"resource": ""
}
|
q2928
|
_KindleCloudReaderBrowser._to_reader_frame
|
train
|
def _to_reader_frame(self):
"""Navigate to the KindleReader iframe."""
reader_frame = 'KindleReaderIFrame'
frame_loaded = lambda br: br.find_elements_by_id(reader_frame)
self._wait().until(frame_loaded)
|
python
|
{
"resource": ""
}
|
q2929
|
_KindleCloudReaderBrowser._wait_for_js
|
train
|
def _wait_for_js(self):
"""Wait for the Kindle Cloud Reader JS modules to initialize.
These modules provide the interface used to execute API queries.
"""
# Wait for the Module Manager to load
mod_mgr_script = ur"return window.hasOwnProperty('KindleModuleManager');"
mod_mgr_loaded = lambda br: br.execute_script(mod_mgr_script)
self._wait(5).until(mod_mgr_loaded)
# Wait for the DB Client to load
db_client_script = dedent(ur"""
var done = arguments[0];
if (!window.hasOwnProperty('KindleModuleManager') ||
!KindleModuleManager
.isModuleInitialized(Kindle.MODULE.DB_CLIENT)) {
|
python
|
{
"resource": ""
}
|
q2930
|
KindleCloudReaderAPI._get_api_call
|
train
|
def _get_api_call(self, function_name, *args):
"""Runs an api call with javascript-formatted arguments.
Args:
function_name: The name of the KindleAPI call to run.
*args: Javascript-formatted arguments to pass to the API call.
Returns:
The result of the API call.
Raises:
APIError: If the API call fails or times out.
"""
api_call = dedent("""
var done = arguments[0];
KindleAPI.%(api_call)s(%(args)s).always(function(a) {
done(a);
});
""") % {
|
python
|
{
"resource": ""
}
|
q2931
|
KindleCloudReaderAPI.get_book_metadata
|
train
|
def get_book_metadata(self, asin):
"""Returns a book's metadata.
Args:
asin: The ASIN of the book to be queried.
Returns:
|
python
|
{
"resource": ""
}
|
q2932
|
KindleCloudReaderAPI.get_book_progress
|
train
|
def get_book_progress(self, asin):
"""Returns the progress data available for a book.
NOTE: A summary of the two progress formats can be found in the
docstring for `ReadingProgress`.
Args:
asin: The asin of the book to be queried.
Returns:
A `ReadingProgress` instance
|
python
|
{
"resource": ""
}
|
q2933
|
KindleCloudReaderAPI.get_library_progress
|
train
|
def get_library_progress(self):
"""Returns the reading progress for all books in the kindle library.
Returns:
A mapping of ASINs to `ReadingProgress` instances corresponding to the
books in the current user's library.
|
python
|
{
"resource": ""
}
|
q2934
|
KindleCloudReaderAPI.get_instance
|
train
|
def get_instance(*args, **kwargs):
"""Context manager for an instance of `KindleCloudReaderAPI`."""
inst = KindleCloudReaderAPI(*args, **kwargs)
try:
|
python
|
{
"resource": ""
}
|
q2935
|
load_config
|
train
|
def load_config(path):
"""
Loads configuration from a path.
Path can be a json file, or a directory containing config.json
and zero or more *.txt files with word lists or phrase lists.
Returns config dict.
Raises InitializationError when something is wrong.
"""
path = os.path.abspath(path)
if os.path.isdir(path):
config, wordlists = _load_data(path)
elif os.path.isfile(path):
config = _load_config(path)
wordlists = {}
else:
raise InitializationError('File or directory not found: {0}'.format(path))
for name, wordlist in wordlists.items():
if name in config:
|
python
|
{
"resource": ""
}
|
q2936
|
_load_wordlist
|
train
|
def _load_wordlist(name, stream):
"""
Loads list of words or phrases from file.
Returns "words" or "phrases" dictionary, the same as used in config.
Raises Exception if file is missing or invalid.
"""
items = []
max_length = None
multiword = False
multiword_start = None
number_of_words = None
for i, line in enumerate(stream, start=1):
line = line.strip()
if not line or line.startswith('#'):
continue
# Is it an option line, e.g. 'max_length = 10'?
if '=' in line:
if items:
raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} '
'(options must be defined before words)'
.format(name, i, line))
try:
option, option_value = _parse_option(line)
except ValueError as ex:
raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} '
'({})'
.format(name, i, line, ex))
if option == _CONF.FIELD.MAX_LENGTH:
max_length = option_value
elif option == _CONF.FIELD.NUMBER_OF_WORDS:
number_of_words = option_value
continue # pragma: no cover
# Parse words
if not multiword and _WORD_REGEX.match(line):
if max_length is not None and len(line) > max_length:
raise ConfigurationError('Word is too long at list {!r} line {}: {!r}'
.format(name, i, line))
items.append(line)
elif _PHRASE_REGEX.match(line):
if not multiword:
multiword = True
multiword_start = len(items)
phrase = tuple(line.split(' '))
if number_of_words is not None and len(phrase) != number_of_words:
raise ConfigurationError('Phrase has {} word(s) (while number_of_words={}) '
|
python
|
{
"resource": ""
}
|
q2937
|
_create_lists
|
train
|
def _create_lists(config, results, current, stack, inside_cartesian=None):
"""
An ugly recursive method to transform config dict
into a tree of AbstractNestedList.
"""
# Have we done it already?
try:
return results[current]
except KeyError:
pass
# Check recursion depth and detect loops
if current in stack:
raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack))
if len(stack) > 99:
raise ConfigurationError('Rule {!r} is too deep'.format(stack[0]))
# Track recursion depth
stack.append(current)
try:
# Check what kind of list we have
listdef = config[current]
list_type = listdef[_CONF.FIELD.TYPE]
# 1. List of words
if list_type == _CONF.TYPE.WORDS:
results[current] = WordList(listdef['words'])
# List of phrases
elif list_type == _CONF.TYPE.PHRASES:
results[current] = PhraseList(listdef['phrases'])
|
python
|
{
"resource": ""
}
|
q2938
|
RandomGenerator.generate
|
train
|
def generate(self, pattern=None):
"""
Generates and returns random name as a list of strings.
"""
lst = self._lists[pattern]
while True:
result = lst[self._randrange(lst.length)]
# 1. Check that there are no duplicates
# 2. Check that there are no duplicate prefixes
# 3. Check max slug length
n = len(result)
if (self._ensure_unique and len(set(result)) != n or
|
python
|
{
"resource": ""
}
|
q2939
|
RandomGenerator._dump
|
train
|
def _dump(self, stream, pattern=None, object_ids=False):
"""Dumps current tree into a text stream."""
|
python
|
{
"resource": ""
}
|
q2940
|
alter_column
|
train
|
def alter_column(conn, table, column_name, func, schema=None):
"""
Run given callable against given table and given column in activity table
jsonb data columns. This function is useful when you want to reflect type
changes in your schema to activity table.
In the following example we change the data type of User's age column from
string to integer.
::
from alembic import op
from postgresql_audit import alter_column
def upgrade():
op.alter_column(
'user',
'age',
type_=sa.Integer
)
alter_column(
op,
'user',
'age',
lambda value, activity_table: sa.cast(value, sa.Integer)
)
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param table:
The table to run the column name changes against
:param column_name:
Name of the column to run callable against
:param func:
A callable to run against specific column in activity table jsonb data
columns. The callable should take two parameters the jsonb value
corresponding to given column_name and activity table object.
:param schema:
Optional name of schema to use.
"""
activity_table = get_activity_table(schema=schema)
query = (
|
python
|
{
"resource": ""
}
|
q2941
|
change_column_name
|
train
|
def change_column_name(
conn,
table,
old_column_name,
new_column_name,
schema=None
):
"""
Changes given `activity` jsonb data column key. This function is useful
when you want to reflect column name changes to activity table.
::
from alembic import op
from postgresql_audit import change_column_name
def upgrade():
op.alter_column(
'my_table',
'my_column',
new_column_name='some_column'
)
change_column_name(op, 'my_table', 'my_column', 'some_column')
:param conn:
An object that is able to execute
|
python
|
{
"resource": ""
}
|
q2942
|
add_column
|
train
|
def add_column(conn, table, column_name, default_value=None, schema=None):
"""
Adds given column to `activity` table jsonb data columns.
In the following example we reflect the changes made to our schema to
activity table.
::
import sqlalchemy as sa
from alembic import op
from postgresql_audit import add_column
def upgrade():
op.add_column('article', sa.Column('created_at', sa.DateTime()))
add_column(op, 'article', 'created_at')
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param table:
The table to remove the column from
:param column_name:
Name of the column to add
:param default_value:
The default value of the column
:param schema:
|
python
|
{
"resource": ""
}
|
q2943
|
remove_column
|
train
|
def remove_column(conn, table, column_name, schema=None):
"""
Removes given `activity` jsonb data column key. This function is useful
when you are doing schema changes that require removing a column.
Let's say you've been using PostgreSQL-Audit for a while for a table called
article. Now you want to remove one audited column called 'created_at' from
this table.
::
from alembic import op
from postgresql_audit import remove_column
def upgrade():
op.remove_column('article', 'created_at')
remove_column(op, 'article', 'created_at')
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param table:
The table to
|
python
|
{
"resource": ""
}
|
q2944
|
rename_table
|
train
|
def rename_table(conn, old_table_name, new_table_name, schema=None):
"""
Renames given table in activity table. You should remember to call this
function whenever you rename a versioned table.
::
from alembic import op
from postgresql_audit import rename_table
def upgrade():
op.rename_table('article', 'article_v2')
rename_table(op, 'article', 'article_v2')
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param old_table_name:
The name of table to rename
:param new_table_name:
|
python
|
{
"resource": ""
}
|
q2945
|
VersioningManager.instrument_versioned_classes
|
train
|
def instrument_versioned_classes(self, mapper, cls):
"""
Collect versioned class and add it to pending_classes list.
:mapper mapper: SQLAlchemy
|
python
|
{
"resource": ""
}
|
q2946
|
VersioningManager.configure_versioned_classes
|
train
|
def configure_versioned_classes(self):
"""
Configures all versioned classes that were collected during
instrumentation process.
|
python
|
{
"resource": ""
}
|
q2947
|
pubmed_citation
|
train
|
def pubmed_citation(args=sys.argv[1:], out=sys.stdout):
"""Get a citation via the command line using a PubMed ID or PubMed URL"""
parser = argparse.ArgumentParser(
description='Get a citation using a PubMed ID or PubMed URL')
parser.add_argument('query', help='PubMed ID or PubMed URL')
parser.add_argument(
'-m', '--mini', action='store_true', help='get mini citation')
parser.add_argument(
'-e', '--email', action='store', help='set user email', default='')
|
python
|
{
"resource": ""
}
|
q2948
|
pubmed_url
|
train
|
def pubmed_url(args=sys.argv[1:], resolve_doi=True, out=sys.stdout):
"""
Get a publication URL via the command line using a PubMed ID or PubMed URL
"""
parser = argparse.ArgumentParser(
description='Get a publication URL using a PubMed ID or PubMed URL')
parser.add_argument('query', help='PubMed ID or PubMed URL')
parser.add_argument(
'-d', '--doi', action='store_false', help='get DOI URL')
parser.add_argument(
|
python
|
{
"resource": ""
}
|
q2949
|
Publication.authors_et_al
|
train
|
def authors_et_al(self, max_authors=5):
"""
Return string with a truncated author list followed by 'et al.'
"""
author_list = self._author_list
if len(author_list) <= max_authors:
authors_et_al = self.authors
else:
|
python
|
{
"resource": ""
}
|
q2950
|
Publication.parse_abstract
|
train
|
def parse_abstract(xml_dict):
"""
Parse PubMed XML dictionary to retrieve abstract.
"""
key_path = ['PubmedArticleSet', 'PubmedArticle', 'MedlineCitation',
'Article', 'Abstract', 'AbstractText']
abstract_xml = reduce(dict.get, key_path, xml_dict)
abstract_paragraphs = []
if isinstance(abstract_xml, str):
abstract_paragraphs.append(abstract_xml)
elif isinstance(abstract_xml, dict):
abstract_text = abstract_xml.get('#text')
try:
abstract_label = abstract_xml['@Label']
except KeyError:
abstract_paragraphs.append(abstract_text)
else:
abstract_paragraphs.append(
"{}: {}".format(abstract_label, abstract_text))
elif isinstance(abstract_xml, list):
for abstract_section in abstract_xml:
try:
abstract_text =
|
python
|
{
"resource": ""
}
|
q2951
|
Publication.get_pubmed_xml
|
train
|
def get_pubmed_xml(self):
"""
Use a PubMed ID to retrieve PubMed metadata in XML form.
"""
url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/' \
'efetch.fcgi?db=pubmed&rettype=abstract&id={}' \
.format(self.pmid)
try:
|
python
|
{
"resource": ""
}
|
q2952
|
Publication.set_abstract
|
train
|
def set_abstract(self, xml_dict):
"""
If record has an abstract, extract it from PubMed's XML data
|
python
|
{
"resource": ""
}
|
q2953
|
Publication.set_article_url
|
train
|
def set_article_url(self, resolve_doi=True):
"""
If record has a DOI, set article URL based on where the DOI points.
"""
if 'DOI' in self.record:
doi_url = "/".join(['http://dx.doi.org', self.record['DOI']])
if resolve_doi:
try:
response = urlopen(doi_url)
except URLError:
|
python
|
{
"resource": ""
}
|
q2954
|
Publication.set_pub_year_month_day
|
train
|
def set_pub_year_month_day(self, xml_dict):
"""
Set publication year, month, day from PubMed's XML data
"""
key_path = ['PubmedArticleSet', 'PubmedArticle', 'MedlineCitation',
'Article', 'Journal', 'JournalIssue', 'PubDate']
pubdate_xml = reduce(dict.get, key_path, xml_dict)
if isinstance(pubdate_xml, dict):
self.year = pubdate_xml.get('Year')
month_short = pubdate_xml.get('Month')
self.day = pubdate_xml.get('Day')
try:
|
python
|
{
"resource": ""
}
|
q2955
|
PubMedLookup.get_pubmed_record
|
train
|
def get_pubmed_record(pmid):
"""Get PubMed record from PubMed ID."""
handle
|
python
|
{
"resource": ""
}
|
q2956
|
Simulation._initialize
|
train
|
def _initialize(self, register_value_map=None, memory_value_map=None, default_value=None):
""" Sets the wire, register, and memory values to default or as specified.
:param register_value_map: is a map of {Register: value}.
:param memory_value_map: is a map of maps {Memory: {address: Value}}.
:param default_value: is the value that all unspecified registers and memories will
default to. If no default_value is specified, it will use the value stored in the
object (default to 0)
"""
if default_value is None:
default_value = self.default_value
# set registers to their values
reg_set = self.block.wirevector_subset(Register)
if register_value_map is not None:
for r in reg_set:
self.value[r] = self.regvalue[r] = register_value_map.get(r, default_value)
# set constants to their set values
for w in self.block.wirevector_subset(Const):
self.value[w] = w.val
assert isinstance(w.val, numbers.Integral) # for now
# set memories
|
python
|
{
"resource": ""
}
|
q2957
|
Simulation.step
|
train
|
def step(self, provided_inputs):
""" Take the simulation forward one cycle
:param provided_inputs: a dictionary mapping wirevectors to their values for this step
All input wires must be in the provided_inputs in order for the simulation
to accept these values
Example: if we have inputs named 'a' and 'x', we can call:
sim.step({'a': 1, 'x': 23}) to simulate a cycle with values 1 and 23
respectively
"""
# Check that all Input have a corresponding provided_input
input_set = self.block.wirevector_subset(Input)
supplied_inputs = set()
for i in provided_inputs:
if isinstance(i, WireVector):
name = i.name
else:
name = i
sim_wire = self.block.wirevector_by_name[name]
if sim_wire not in input_set:
raise PyrtlError(
'step provided a value for input for "%s" which is '
'not a known input ' % name)
if not isinstance(provided_inputs[i], numbers.Integral) or provided_inputs[i] < 0:
raise PyrtlError(
'step provided an input "%s" which is not a valid '
'positive integer' % provided_inputs[i])
if len(bin(provided_inputs[i]))-2 > sim_wire.bitwidth:
raise PyrtlError(
'the bitwidth for "%s" is %d, but the provided input '
'%d requires %d bits to represent'
% (name, sim_wire.bitwidth,
provided_inputs[i], len(bin(provided_inputs[i]))-2))
self.value[sim_wire] = provided_inputs[i]
supplied_inputs.add(sim_wire)
# Check that only inputs are specified, and set the values
if input_set != supplied_inputs:
for i in input_set.difference(supplied_inputs):
raise PyrtlError('Input "%s" has no
|
python
|
{
"resource": ""
}
|
q2958
|
Simulation._execute
|
train
|
def _execute(self, net):
"""Handle the combinational logic update rules for the given net.
This function, along with edge_update, defined the semantics
of the primitive ops. Function updates self.value accordingly.
"""
if net.op in 'r@':
return # registers and memory write ports have no logic function
elif net.op in self.simple_func:
argvals = (self.value[arg] for arg in net.args)
result = self.simple_func[net.op](*argvals)
elif net.op == 'c':
result = 0
for arg in net.args:
result = result << len(arg)
result = result | self.value[arg]
elif net.op == 's':
result = 0
source = self.value[net.args[0]]
for b in net.op_param[::-1]:
result = (result << 1) | (0x1 & (source >> b))
elif
|
python
|
{
"resource": ""
}
|
q2959
|
FastSimulation.step
|
train
|
def step(self, provided_inputs):
""" Run the simulation for a cycle
:param provided_inputs: a dictionary mapping WireVectors (or their names)
to their values for this step
eg: {wire: 3, "wire_name": 17}
"""
# validate_inputs
for wire, value in provided_inputs.items():
wire = self.block.get_wirevector_by_name(wire) if isinstance(wire, str) else wire
if value > wire.bitmask or value < 0:
raise PyrtlError("Wire {} has value {} which cannot be represented"
" using its bitwidth".format(wire, value))
# building the simulation data
ins = {self._to_name(wire): value for wire, value
|
python
|
{
"resource": ""
}
|
q2960
|
FastSimulation.inspect_mem
|
train
|
def inspect_mem(self, mem):
""" Get the values in a map during the current simulation cycle.
:param mem: the memory to inspect
:return: {address: value}
Note that this returns the current memory state. Modifying the dictonary
will also modify the state in the simulator
"""
|
python
|
{
"resource": ""
}
|
q2961
|
FastSimulation._arg_varname
|
train
|
def _arg_varname(self, wire):
"""
Input, Const, and Registers have special input values
"""
if isinstance(wire, (Input, Register)):
|
python
|
{
"resource": ""
}
|
q2962
|
_WaveRendererBase._render_val_with_prev
|
train
|
def _render_val_with_prev(self, w, n, current_val, symbol_len):
"""Return a string encoding the given value in a waveform.
:param w: The WireVector we are rendering to a waveform
:param n: An integer from 0 to segment_len-1
:param current_val: the value to be rendered
:param symbol_len: and integer for how big to draw the current value
Returns a string of printed length symbol_len that will draw the
representation of current_val. The input prior_val is used to
render transitions.
"""
sl = symbol_len-1
if len(w) > 1:
out = self._revstart
if current_val != self.prior_val:
out += self._x + hex(current_val).rstrip('L').ljust(sl)[:sl]
elif n == 0:
out += hex(current_val).rstrip('L').ljust(symbol_len)[:symbol_len]
else:
|
python
|
{
"resource": ""
}
|
q2963
|
SimulationTrace.add_step
|
train
|
def add_step(self, value_map):
""" Add the values in value_map to the end of the trace. """
if len(self.trace) == 0:
raise PyrtlError('error, simulation trace needs at least 1 signal to track '
|
python
|
{
"resource": ""
}
|
q2964
|
SimulationTrace.add_fast_step
|
train
|
def add_fast_step(self, fastsim):
""" Add the fastsim context to the trace. """
for wire_name in self.trace:
|
python
|
{
"resource": ""
}
|
q2965
|
SimulationTrace.print_vcd
|
train
|
def print_vcd(self, file=sys.stdout, include_clock=False):
""" Print the trace out as a VCD File for use in other tools.
:param file: file to open and output vcd dump to.
:param include_clock: boolean specifying if the implicit clk should be included.
Dumps the current trace to file as a "value change dump" file. The file parameter
defaults to _stdout_ and the include_clock defaults to True.
Examples ::
sim_trace.print_vcd()
sim_trace.print_vcd("my_waveform.vcd", include_clock=False)
"""
# dump header info
# file_timestamp = time.strftime("%a, %d %b %Y %H:%M:%S (UTC/GMT)", time.gmtime())
# print >>file, " ".join(["$date", file_timestamp, "$end"])
self.internal_names = _VerilogSanitizer('_vcd_tmp_')
for wire in self.wires_to_track:
self.internal_names.make_valid_string(wire.name)
def _varname(wireName):
""" Converts WireVector names to internal names """
return self.internal_names[wireName]
print(' '.join(['$timescale', '1ns', '$end']), file=file)
print(' '.join(['$scope', 'module logic', '$end']), file=file)
def print_trace_strs(time):
for wn in sorted(self.trace, key=_trace_sort_key):
print(' '.join([str(bin(self.trace[wn][time]))[1:], _varname(wn)]), file=file)
# dump variables
if include_clock:
print(' '.join(['$var', 'wire', '1', 'clk', 'clk', '$end']), file=file)
for wn in sorted(self.trace, key=_trace_sort_key):
|
python
|
{
"resource": ""
}
|
q2966
|
SimulationTrace.render_trace
|
train
|
def render_trace(
self, trace_list=None, file=sys.stdout, render_cls=default_renderer(),
symbol_len=5, segment_size=5, segment_delim=' ', extra_line=True):
""" Render the trace to a file using unicode and ASCII escape sequences.
:param trace_list: A list of signals to be output in the specified order.
:param file: The place to write output, default to stdout.
:param render_cls: A class that translates traces into output bytes.
:param symbol_len: The "length" of each rendered cycle in characters.
:param segment_size: Traces are broken in the segments of this number of cycles.
:param segment_delim: The character to be output between segments.
:param extra_line: A Boolean to determin if we should print a blank line between signals.
The resulting output can be viewed directly on the terminal or looked
at with "more" or "less -R" which both should handle the ASCII escape
sequences used in rendering. render_trace takes the following optional
arguments.
"""
if _currently_in_ipython():
from IPython.display import display, HTML, Javascript # pylint: disable=import-error
from .inputoutput import trace_to_html
htmlstring = trace_to_html(self, trace_list=trace_list, sortkey=_trace_sort_key)
html_elem = HTML(htmlstring)
|
python
|
{
"resource": ""
}
|
q2967
|
prioritized_mux
|
train
|
def prioritized_mux(selects, vals):
"""
Returns the value in the first wire for which its select bit is 1
:param [WireVector] selects: a list of WireVectors signaling whether
a wire should be chosen
:param [WireVector] vals: values to return when the corresponding select
value is 1
:return: WireVector
If none of the items are high, the last val is returned
"""
if len(selects) != len(vals):
raise pyrtl.PyrtlError("Number of select and val signals must match")
if len(vals) == 0:
raise pyrtl.PyrtlError("Must have a signal to mux")
if len(vals) == 1:
|
python
|
{
"resource": ""
}
|
q2968
|
demux
|
train
|
def demux(select):
"""
Demultiplexes a wire of arbitrary bitwidth
:param WireVector select: indicates which wire to set on
:return (WireVector, ...): a tuple of wires corresponding to each demultiplexed wire
"""
if len(select) == 1:
return _demux_2(select)
wires = demux(select[:-1])
|
python
|
{
"resource": ""
}
|
q2969
|
MultiSelector.finalize
|
train
|
def finalize(self):
"""
Connects the wires.
"""
self._check_finalized()
self._final = True
for dest_w, values in self.dest_instrs_info.items():
|
python
|
{
"resource": ""
}
|
q2970
|
WireVector.bitmask
|
train
|
def bitmask(self):
""" A property holding a bitmask of the same length as this WireVector.
Specifically it is an integer with a number of bits set to 1 equal to the
bitwidth of the WireVector.
It is often times useful to "mask" an integer such that it fits in the
the number of bits of a WireVector. As a convenience for this, the
|
python
|
{
"resource": ""
}
|
q2971
|
log_middleware
|
train
|
def log_middleware(store):
"""log all actions to console as they are dispatched"""
def wrapper(next_):
def log_dispatch(action):
|
python
|
{
"resource": ""
}
|
q2972
|
CompiledSimulation.inspect
|
train
|
def inspect(self, w):
"""Get the latest value of the wire given, if possible."""
if isinstance(w, WireVector):
w = w.name
try:
vals = self.tracer.trace[w]
|
python
|
{
"resource": ""
}
|
q2973
|
CompiledSimulation.run
|
train
|
def run(self, inputs):
"""Run many steps of the simulation.
The argument is a list of input mappings for each step,
and its length is the number of steps to be executed.
"""
steps = len(inputs)
# create i/o arrays of the appropriate length
ibuf_type = ctypes.c_uint64*(steps*self._ibufsz)
obuf_type = ctypes.c_uint64*(steps*self._obufsz)
ibuf = ibuf_type()
obuf = obuf_type()
# these array will be passed to _crun
self._crun.argtypes = [ctypes.c_uint64, ibuf_type, obuf_type]
# build the input array
for n, inmap in enumerate(inputs):
for w in inmap:
if isinstance(w, WireVector):
name = w.name
else:
name = w
start, count = self._inputpos[name]
start += n*self._ibufsz
val = inmap[w]
if val >= 1 << self._inputbw[name]:
raise PyrtlError(
'Wire {} has value {} which cannot be represented '
'using its bitwidth'.format(name, val))
# pack input
for pos in range(start, start+count):
ibuf[pos] = val & ((1 << 64)-1)
val >>= 64
# run the simulation
self._crun(steps, ibuf, obuf)
# save traced wires
for name in self.tracer.trace:
rname = self._probe_mapping.get(name, name)
|
python
|
{
"resource": ""
}
|
q2974
|
CompiledSimulation._traceable
|
train
|
def _traceable(self, wv):
"""Check if wv is able to be traced
If it is traceable due to a probe, record that probe in _probe_mapping.
"""
if isinstance(wv, (Input, Output)):
return True
for net in self.block.logic:
|
python
|
{
"resource": ""
}
|
q2975
|
CompiledSimulation._remove_untraceable
|
train
|
def _remove_untraceable(self):
"""Remove from the tracer those wires that CompiledSimulation cannot track.
Create _probe_mapping for wires only traceable via probes.
"""
self._probe_mapping = {}
wvs = {wv for wv in self.tracer.wires_to_track if
|
python
|
{
"resource": ""
}
|
q2976
|
CompiledSimulation._create_dll
|
train
|
def _create_dll(self):
"""Create a dynamically-linked library implementing the simulation logic."""
self._dir = tempfile.mkdtemp()
with open(path.join(self._dir, 'pyrtlsim.c'), 'w') as f:
self._create_code(lambda s: f.write(s+'\n'))
if platform.system() == 'Darwin':
shared = '-dynamiclib'
else:
shared = '-shared'
subprocess.check_call([
'gcc', '-O0', '-march=native', '-std=c99', '-m64',
shared, '-fPIC',
|
python
|
{
"resource": ""
}
|
q2977
|
CompiledSimulation._makeini
|
train
|
def _makeini(self, w, v):
"""C initializer string for a wire with a given value."""
pieces = []
for n in range(self._limbs(w)):
|
python
|
{
"resource": ""
}
|
q2978
|
CompiledSimulation._makemask
|
train
|
def _makemask(self, dest, res, pos):
"""Create a bitmask.
The value being masked is of width `res`.
Limb number `pos` of `dest` is
|
python
|
{
"resource": ""
}
|
q2979
|
CompiledSimulation._getarglimb
|
train
|
def _getarglimb(self, arg, n):
"""Get the nth limb of the given wire.
Returns '0' when the wire does not have sufficient limbs.
"""
|
python
|
{
"resource": ""
}
|
q2980
|
CompiledSimulation._clean_name
|
train
|
def _clean_name(self, prefix, obj):
"""Create a C variable name with the given prefix based
|
python
|
{
"resource": ""
}
|
q2981
|
_trivial_mult
|
train
|
def _trivial_mult(A, B):
"""
turns a multiplication into an And gate if one of the
wires is a bitwidth of 1
:param A:
:param B:
:return:
"""
if len(B) == 1:
A, B = B, A # so that we can reuse the code below :)
|
python
|
{
"resource": ""
}
|
q2982
|
tree_multiplier
|
train
|
def tree_multiplier(A, B, reducer=adders.wallace_reducer, adder_func=adders.kogge_stone):
""" Build an fast unclocked multiplier for inputs A and B using a Wallace or Dada Tree.
:param WireVector A, B: two input wires for the multiplication
:param function reducer: Reduce the tree using either a Dada recuder or a Wallace reducer
determines whether it is a Wallace tree multiplier or a Dada tree multiplier
:param function adder_func: an adder function that will be used to do the last addition
:return WireVector: The multiplied result
Delay is order logN, while area is order N^2.
"""
"""
The two tree multipliers basically works by splitting the multiplication
into a series of many additions, and it works by applying 'reductions'.
"""
|
python
|
{
"resource": ""
}
|
q2983
|
signed_tree_multiplier
|
train
|
def signed_tree_multiplier(A, B, reducer=adders.wallace_reducer, adder_func=adders.kogge_stone):
"""Same as tree_multiplier, but uses two's-complement signed integers"""
if len(A) == 1 or len(B) == 1:
raise pyrtl.PyrtlError("sign bit required, one or both wires too small")
aneg, bneg = A[-1], B[-1]
a = _twos_comp_conditional(A, aneg)
|
python
|
{
"resource": ""
}
|
q2984
|
generalized_fma
|
train
|
def generalized_fma(mult_pairs, add_wires, signed=False, reducer=adders.wallace_reducer,
adder_func=adders.kogge_stone):
"""Generated an opimitized fused multiply adder.
A generalized FMA unit that multiplies each pair of numbers in mult_pairs,
then adds the resulting numbers and and the values of the add wires all
together to form an answer. This is faster than separate adders and
multipliers because you avoid unnecessary adder structures for intermediate
representations.
:param mult_pairs: Either None (if there are no pairs to multiply) or
a list of pairs of wires to multiply:
[(mult1_1, mult1_2), ...]
:param add_wires: Either None (if there are no individual
items to add other than the mult_pairs), or a list of wires for adding on
top of the result of the pair multiplication.
:param Bool signed: Currently not supported (will be added in the future)
The default will likely be changed to True, so if you want the smallest
set of wires in the future, specify this as False
:param reducer: (advanced) The tree reducer to use
:param adder_func: (advanced) The adder to use to add the two results at the end
:return WireVector: The result WireVector
"""
# first need to figure out the max length
if mult_pairs: # Need to deal with the case when it is empty
mult_max = max(len(m[0]) + len(m[1]) - 1 for m
|
python
|
{
"resource": ""
}
|
q2985
|
net_transform
|
train
|
def net_transform(transform_func, block=None, **kwargs):
"""
Maps nets to new sets of nets according to a custom function
:param transform_func:
Function signature: func(orig_net (logicnet)) -> keep_orig_net (bool)
:return:
"""
block = working_block(block)
with set_working_block(block, True):
for
|
python
|
{
"resource": ""
}
|
q2986
|
all_nets
|
train
|
def all_nets(transform_func):
"""Decorator that wraps a net transform function"""
@functools.wraps(transform_func)
|
python
|
{
"resource": ""
}
|
q2987
|
wire_transform
|
train
|
def wire_transform(transform_func, select_types=WireVector,
exclude_types=(Input, Output, Register, Const), block=None):
"""
Maps Wires to new sets of nets and wires according to a custom function
:param transform_func: The function you want to run on all wires
Function signature: func(orig_wire (WireVector)) -> src_wire, dst_wire
src_wire is the src for the stuff you made in the transform func
and dst_wire is the sink
to indicate that the wire has not been changed, make src_wire and dst_wire both
the original wire
:param select_types: Type or Tuple of types of WireVectors to replace
:param
|
python
|
{
"resource": ""
}
|
q2988
|
all_wires
|
train
|
def all_wires(transform_func):
"""Decorator that wraps a wire transform function"""
@functools.wraps(transform_func)
|
python
|
{
"resource": ""
}
|
q2989
|
replace_wires
|
train
|
def replace_wires(wire_map, block=None):
"""
Quickly replace all wires in a block
:param {old_wire: new_wire} wire_map: mapping of old wires to
new wires
"""
block = working_block(block)
|
python
|
{
"resource": ""
}
|
q2990
|
clone_wire
|
train
|
def clone_wire(old_wire, name=None):
"""
Makes a copy of any existing wire
:param old_wire: The wire to clone
:param name: a name fo rhte new wire
Note that this function is mainly intended to be used when the
two wires are from different blocks. Making two wires with the
same name in the same block is not allowed
"""
if isinstance(old_wire, Const):
|
python
|
{
"resource": ""
}
|
q2991
|
copy_block
|
train
|
def copy_block(block=None, update_working_block=True):
"""
Makes a copy of an existing block
:param block: The block to clone. (defaults to the working block)
:return: The resulting block
"""
block_in = working_block(block)
block_out, temp_wv_map = _clone_block_and_wires(block_in)
mems = {}
for net in block_in.logic:
|
python
|
{
"resource": ""
}
|
q2992
|
_clone_block_and_wires
|
train
|
def _clone_block_and_wires(block_in):
"""
This is a generic function to copy the WireVectors for another round of
synthesis This does not split a WireVector with multiple wires.
:param block_in: The block to change
:param synth_name: a name to prepend to all new copies of a wire
:return: the resulting block and a WireVector map
"""
block_in.sanity_check() # make sure that everything is valid
|
python
|
{
"resource": ""
}
|
q2993
|
_copy_net
|
train
|
def _copy_net(block_out, net, temp_wv_net, mem_map):
"""This function makes a copy of all nets passed to it for synth uses
"""
new_args = tuple(temp_wv_net[a_arg] for a_arg in net.args)
new_dests = tuple(temp_wv_net[a_dest] for a_dest in net.dests)
if net.op in "m@": # special stuff for copying
|
python
|
{
"resource": ""
}
|
q2994
|
_get_new_block_mem_instance
|
train
|
def _get_new_block_mem_instance(op_param, mem_map, block_out):
""" gets the instance of the memory in the new block that is
associated with a memory in a old block
"""
memid, old_mem = op_param
if old_mem not in mem_map:
new_mem =
|
python
|
{
"resource": ""
}
|
q2995
|
probe
|
train
|
def probe(w, name=None):
""" Print useful information about a WireVector when in debug mode.
:param w: WireVector from which to get info
:param name: optional name for probe (defaults to an autogenerated name)
:return: original WireVector w
Probe can be inserted into a existing design easily as it returns the
original wire unmodified. For example ``y <<= x[0:3] + 4`` could be turned
into ``y <<= probe(x)[0:3] + 4`` to give visibility into both the origin of
``x`` (including the line that WireVector was originally created) and
the run-time values of ``x`` (which will be named and thus show up by
default in a trace. Likewise ``y <<= probe(x[0:3]) + 4``,
``y <<= probe(x[0:3] + 4)``, and ``probe(y) <<= x[0:3] + 4`` are all
valid uses of `probe`.
Note: `probe` does actually
|
python
|
{
"resource": ""
}
|
q2996
|
rtl_assert
|
train
|
def rtl_assert(w, exp, block=None):
""" Add hardware assertions to be checked on the RTL design.
:param w: should be a WireVector
:param Exception exp: Exception to throw when assertion fails
:param Block block: block to which the assertion should be added (default to working block)
:return: the Output wire for the assertion (can be ignored in most cases)
If at any time during execution the wire w is not `true` (i.e. asserted low)
then simulation will raise exp.
"""
block = working_block(block)
if not isinstance(w, WireVector):
raise PyrtlError('Only WireVectors can be asserted with rtl_assert')
if len(w) != 1:
raise PyrtlError('rtl_assert checks only a WireVector of bitwidth 1')
if not isinstance(exp, Exception):
raise PyrtlError('the second argument to rtl_assert must be an instance of Exception')
if isinstance(exp, KeyError):
raise
|
python
|
{
"resource": ""
}
|
q2997
|
check_rtl_assertions
|
train
|
def check_rtl_assertions(sim):
""" Checks the values in sim to see if any registers assertions fail.
:param sim: Simulation in which to check the assertions
:return: None
"""
for (w, exp) in sim.block.rtl_assert_dict.items():
try:
|
python
|
{
"resource": ""
}
|
q2998
|
wirevector_list
|
train
|
def wirevector_list(names, bitwidth=None, wvtype=WireVector):
""" Allocate and return a list of WireVectors.
:param names: Names for the WireVectors. Can be a list or single comma/space-separated string
:param bitwidth: The desired bitwidth for the resulting WireVectors.
:param WireVector wvtype: Which WireVector type to create.
:return: List of WireVectors.
Additionally, the ``names`` string can also contain an additional bitwidth specification
separated by a ``/`` in the name. This cannot be used in combination with a ``bitwidth``
value other than ``1``.
Examples: ::
wirevector_list(['name1', 'name2', 'name3'])
wirevector_list('name1, name2, name3')
wirevector_list('input1 input2 input3', bitwidth=8, wvtype=pyrtl.wire.Input)
wirevector_list('output1, output2 output3', bitwidth=3, wvtype=pyrtl.wire.Output)
wirevector_list('two_bits/2, four_bits/4, eight_bits/8')
wirevector_list(['name1', 'name2', 'name3'], bitwidth=[2, 4, 8])
"""
if isinstance(names, str):
names = names.replace(',', ' ').split()
|
python
|
{
"resource": ""
}
|
q2999
|
val_to_signed_integer
|
train
|
def val_to_signed_integer(value, bitwidth):
""" Return value as intrepreted as a signed integer under twos complement.
:param value: a python integer holding the value to convert
:param bitwidth: the length of the integer in bits to assume for conversion
Given an unsigned integer (not a wirevector!) covert that to a signed
integer. This is useful for printing and interpreting values which are
negative numbers in twos complement. ::
val_to_signed_integer(0xff, 8) == -1
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.