content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def getOrc(orcName):
"""Get an orchestra stored in the user namespace.
One can store an orchestra in the user name space with the %%orc magic.
"""
ip = get_ipython()
return ip.user_ns["__orc"][orcName] | 5,357,500 |
def check_dwi_volume(in_dwi, in_bvec, in_bval):
# TODO return str instead of raising an error, so that user can be informed of which file is causing problem
"""
Check that # DWI = # B-val = # B-vec.
Raises
IOError
"""
import nibabel as nib
import numpy as np
bvals = np.loadtxt(in_bval)
num_b_vals = len(bvals)
bvecs = np.loadtxt(in_bvec)
_, num_b_vecs = bvecs.shape
img = nib.load(in_dwi)
_, _, _, num_dwis = img.shape
if not (num_b_vals == num_b_vecs == num_dwis):
raise IOError(
f"Number of DWIs, b-vals and b-vecs mismatch "
f"(# DWI = {num_dwis}, # B-vec = {num_b_vecs}, #B-val = {num_b_vals}) "
) | 5,357,501 |
def get_prompt_data_from_batse(grb: str, **kwargs: None) -> pd.DataFrame:
"""Get prompt emission data from BATSE. Creates a directory structure and saves the data.
Returns the data, though no further action needs to be taken by the user.
:param grb: Telephone number of GRB, e.g., 'GRB140903A' or '140903A' are valid inputs.
:type grb: str
:param kwargs: Placeholder to prevent TypeErrors.
:type kwargs: None
:return: The processed data.
:rtype: pandas.DataFrame
"""
getter = BATSEDataGetter(grb=grb)
return getter.get_data() | 5,357,502 |
def angular_error(a, b):
"""Calculate angular error (via cosine similarity)."""
a = pitchyaw_to_vector(a) if a.shape[1] == 2 else a
b = pitchyaw_to_vector(b) if b.shape[1] == 2 else b
ab = np.sum(np.multiply(a, b), axis=1)
a_norm = np.linalg.norm(a, axis=1)
b_norm = np.linalg.norm(b, axis=1)
# Avoid zero-values (to avoid NaNs)
a_norm = np.clip(a_norm, a_min=1e-8, a_max=None)
b_norm = np.clip(b_norm, a_min=1e-8, a_max=None)
similarity = np.divide(ab, np.multiply(a_norm, b_norm))
similarity = np.clip(similarity, a_min=-1.+1e-8, a_max=1.-1e-8)
return np.degrees(np.arccos(similarity)) | 5,357,503 |
def includeme(config):
"""
Get build Git repository directory and make it accessible
to all requests generated via Cornice
"""
# Make DB connection accessible as a request property
def _get_repos(request):
_settings = request.registry.settings
repo_dir = _settings['repo_basedir']
return repo_dir
config.add_request_method(_get_repos, 'repo_dir', reify=True) | 5,357,504 |
def add_dot_csv(filename: Union[Path, str]) -> str:
"""Adds a .csv extension to filename."""
return add_extension(filename, '.csv') | 5,357,505 |
def main():
"""
主函数
"""
app_sid = detect_edge_sid()
if app_sid is None:
print u'注册表中没有找到Edge的sid'
return
if enable_loopback(app_sid):
print u'激活成功!'
else:
print u'激活失败。。。' | 5,357,506 |
def load_settings_from_file(filename: str) -> Dict[str, Any]:
"""Load amset configuration settings from a yaml file.
If the settings file does not contain a required parameter, the default
value will be added to the configuration.
An example file is given in *amset/examples/example_settings.yaml*.
Args:
filename: Path to settings file.
Returns:
The settings, with any missing values set according to the amset
defaults.
"""
logger.info("Loading settings from: {}".format(filename))
settings = loadfn(filename)
return validate_settings(settings) | 5,357,507 |
def test_main():
"""Executes the list of steps in parallel
Computes and returns the testcase status"""
tree = ET.parse(os.path.join(os.path.split(__file__)[0], "testcase_custom_par.xml"))
timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
with open(result_dir+'/'+'resultfile.txt', 'w'):
pass
wt_resultfile = os.path.join(result_dir, 'resultfile.txt')
step_list = tree.findall('Steps/step')
tc_status = False
data_repository = {'wt_junit_object':None, 'wt_tc_timestamp':timestamp,\
'wt_resultfile':wt_resultfile}
result = custom_parallel_kw_driver.main(step_list, data_repository, tc_status,\
system_name=None)
assert result == False | 5,357,508 |
def execute_query(query, *arguments):
"""Execute a query on the DB with given arguments."""
_db = labpals.model.get_db()
cursor = _db.execute(query, arguments)
rows = cursor.fetchall()
return rows | 5,357,509 |
def CreateClientPool(n):
"""Create n clients to run in a pool."""
clients = []
# Load previously stored clients.
try:
certificates = []
with open(flags.FLAGS.cert_file, "rb") as fd:
# Certificates are base64-encoded, so that we can use new-lines as
# separators.
for l in fd:
cert = rdf_crypto.RSAPrivateKey(initializer=base64.b64decode(l))
certificates.append(cert)
for certificate in certificates[:n]:
clients.append(
PoolGRRClient(
private_key=certificate,
ca_cert=config.CONFIG["CA.certificate"],
fast_poll=flags.FLAGS.fast_poll,
send_foreman_request=flags.FLAGS.send_foreman_request,
))
clients_loaded = True
except (IOError, EOFError):
clients_loaded = False
if clients_loaded and len(clients) < n:
raise RuntimeError(
"Loaded %d clients, but expected %d." % (len(clients), n))
while len(clients) < n:
# Generate a new RSA key pair for each client.
bits = config.CONFIG["Client.rsa_key_length"]
key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=bits)
clients.append(
PoolGRRClient(private_key=key, ca_cert=config.CONFIG["CA.certificate"]))
# Start all the clients now.
for c in clients:
c.start()
start_time = rdfvalue.RDFDatetime.Now()
try:
if flags.FLAGS.enroll_only:
while True:
time.sleep(1)
enrolled = len([x for x in clients if x.enrolled])
if enrolled == n:
logging.info("All clients enrolled, exiting.")
break
else:
logging.info("%s: Enrolled %d/%d clients.", int(time.time()),
enrolled, n)
else:
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
finally:
# Stop all pool clients.
for cl in clients:
cl.Stop()
# Note: code below is going to be executed after SIGTERM is sent to this
# process.
duration = rdfvalue.RDFDatetime.Now() - start_time
logging.info("Pool done in %s seconds.",
duration.ToFractional(rdfvalue.SECONDS))
# The way benchmarking is supposed to work is that we execute poolclient with
# --enroll_only flag, it dumps the certificates to the flags.FLAGS.cert_file.
# Then, all further poolclient invocations just read private keys back
# from that file. Therefore if private keys were loaded from
# flags.FLAGS.cert_file, then there's no need to rewrite it again with the
# same data.
if not clients_loaded:
logging.info("Saving certificates.")
with open(flags.FLAGS.cert_file, "wb") as fd:
# We're base64-encoding ceritificates so that we can use new-lines
# as separators.
b64_certs = [
base64.b64encode(x.private_key.SerializeToBytes()) for x in clients
]
fd.write("\n".join(b64_certs)) | 5,357,510 |
def format_search_events_results(response: Dict[str, Any], limit: int) -> tuple:
"""
Format the output of the search events results command.
Args:
response (Dict[str,Any]): API response from FortiSIEM.
limit (int):Maximum number of results to retrieve.
Returns:
str: Formatted command output.
"""
outputs = []
events = dict_safe_get(response, ['queryResult', 'events', 'event'])
if isinstance(events, dict):
events = [events]
total_count = arg_to_number(dict_safe_get(response, ['queryResult', '@totalCount']))
total_pages = total_count // limit + (total_count % limit != 0) if total_count else 0
if events:
for event in events:
formatted_event = copy.deepcopy(event)
formatted_attributes = {}
attributes = dict_safe_get(event, ['attributes', 'attribute'])
formatted_event['receiveTime'] = FormatIso8601(arg_to_datetime(event['receiveTime']))
for attribute in attributes:
formatted_attributes[attribute['@name']] = attribute['#text']
formatted_event['attributes'] = formatted_attributes
outputs.append(formatted_event)
return outputs, total_pages | 5,357,511 |
def radec_obs_vec_mpc(inds, mpc_object_data):
"""Compute vector of observed ra,dec values for MPC tracking data.
Args:
inds (int array): line numbers of data in file
mpc_object_data (ndarray): MPC observation data for object
Returns:
rov (1xlen(inds) array): vector of ra/dec observed values
"""
rov = np.zeros((2*len(inds)))
for i in range(0,len(inds)):
indm1 = inds[i]-1
# extract observations data
timeobs = Time( datetime(mpc_object_data['yr'][indm1],
mpc_object_data['month'][indm1],
mpc_object_data['day'][indm1]) + timedelta(days=mpc_object_data['utc'][indm1]) )
obs_t_ra_dec = SkyCoord(mpc_object_data['radec'][indm1], unit=(uts.hourangle, uts.deg), obstime=timeobs)
rov[2*i-2], rov[2*i-1] = obs_t_ra_dec.ra.rad, obs_t_ra_dec.dec.rad
return rov | 5,357,512 |
def mod(x, y) -> ProcessBuilder:
"""
Modulo
:param x: A number to be used as the dividend.
:param y: A number to be used as the divisor.
:return: The remainder after division.
"""
return _process('mod', x=x, y=y) | 5,357,513 |
def test_person_info_photosdb_v5(photosdb5):
""" Test PersonInfo object """
import json
test_key = "katie_5"
katie = [p for p in photosdb5.person_info if p.uuid == UUID_DICT[test_key]][0]
assert katie.facecount == 3
assert katie.name == "Katie"
assert katie.display_name == "Katie"
photos = katie.photos
assert len(photos) == 3
uuid = [p.uuid for p in photos]
assert sorted(uuid) == sorted(PHOTO_DICT[test_key])
assert str(katie) == STR_DICT[test_key]
assert json.loads(katie.json()) == JSON_DICT[test_key] | 5,357,514 |
def find_test_file_loc(test_data_dir):
"""
Return a new, unique and non-existing base name location suitable to create
a new copyright test.
"""
template = "copyright_{}.txt"
idx = 1
while True:
test_file_loc = path.join(test_data_dir, template.format(idx))
if not path.exists(test_file_loc):
return test_file_loc
idx += 1 | 5,357,515 |
def get_list_of_encodings() -> list:
"""
Get a list of all implemented encodings.
! Adapt if new encoding is added !
:return: List of all possible encodings
"""
return ['raw', '012', 'onehot', '101'] | 5,357,516 |
def region_to_bin(chr_start_bin, bin_size, chr, start):
"""Translate genomic region to Cooler bin idx.
Parameters:
----------
chr_start_bin : dict
Dictionary translating chromosome id to bin start index
bin_size : int
Size of the bin
chr : str
Chromosome
start : int
Start of the genomic region
"""
return chr_start_bin[chr] + start // bin_size | 5,357,517 |
def loop_invariant_statement_but_name_while():
"""Catch basic loop-invariant function call."""
i = 6
for _ in range(10_000):
i | 5,357,518 |
def Wspan_plot(rv, ccf, rvunits='km/s'):
""" Make a pretty plot of the line profile and the Wspan """
pass | 5,357,519 |
def test_get_profile_topics(
api_client, enable_premium_requirement, profile_topic_factory, user_factory
):
"""
Premium users should be able to list their own profile topics.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
topic = profile_topic_factory(profile__km_user__user=user)
url = f"/know-me/profile/profiles/{topic.profile.pk}/topics/"
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.json() == [
{
"id": topic.pk,
"url": api_client.build_full_url(
f"/know-me/profile/profile-topics/{topic.pk}/"
),
"created_at": serialized_time(topic.created_at),
"updated_at": serialized_time(topic.updated_at),
"is_detailed": topic.is_detailed,
"items_url": api_client.build_full_url(
f"/know-me/profile/profile-topics/{topic.pk}/items/"
),
"name": topic.name,
"permissions": {"read": True, "write": True},
"profile_id": topic.profile.pk,
}
] | 5,357,520 |
def test_get_object_not_in_indexd(client):
"""
Test the GET object endpoint when the provided key does NOT exist
in indexd, or when indexd errors.
If the key exists in MDS, the metadata should be returned regardless
of a non-200 response from indexd.
"""
guid_or_alias = "dg.hello/test_guid"
# mock the request to indexd: GUID or alias NOT found in indexd
indexd_url = f"{config.INDEXING_SERVICE_ENDPOINT}/{guid_or_alias}"
indexd_get_mocked_request = respx.get(indexd_url, status_code=404)
# GET an object that exists in NEITHER indexd NOR MDS
get_object_url = f"/objects/{guid_or_alias}"
resp = client.get(get_object_url)
assert indexd_get_mocked_request.called
assert resp.status_code == 404, resp.text
# create metadata for this object
mds_data = dict(a=1, b=2)
client.post("/metadata/" + guid_or_alias, json=mds_data).raise_for_status()
try:
# GET an object that exists in MDS but NOT in indexd
resp = client.get(get_object_url)
assert indexd_get_mocked_request.called
assert resp.status_code == 200, resp.text
assert resp.json() == {"record": {}, "metadata": mds_data}
# mock the request to indexd: 500 error from indexd
respx.clear()
indexd_get_mocked_request = respx.get(indexd_url, status_code=500)
# GET an object that exists in MDS, even if indexd failed
resp = client.get(get_object_url)
assert indexd_get_mocked_request.called
assert resp.status_code == 200, resp.text
assert resp.json() == {"record": {}, "metadata": mds_data}
finally:
client.delete("/metadata/" + guid_or_alias) | 5,357,521 |
def get_img_name(img_path: str):
"""
Get the name from the image path.
Args:
img_path (str): a/b.jpg or a/b.png ...
Returns:
name (str): a/b.jpg -> b
"""
image_name = os.path.split(img_path)[-1].split('.')[0]
return image_name | 5,357,522 |
def cvCalcProbDensity(*args):
"""
cvCalcProbDensity(CvHistogram hist1, CvHistogram hist2, CvHistogram dst_hist,
double scale=255)
"""
return _cv.cvCalcProbDensity(*args) | 5,357,523 |
def get_filter_para(node_element):
"""Return paragraph containing the used filter description"""
para = nodes.paragraph()
filter_text = "Used filter:"
filter_text += " status(%s)" % " OR ".join(node_element["status"]) if len(
node_element["status"]) > 0 else ""
if len(node_element["status"]) > 0 and len(node_element["tags"]) > 0:
filter_text += " AND "
filter_text += " tags(%s)" % " OR ".join(node_element["tags"]) if len(
node_element["tags"]) > 0 else ""
if (len(node_element["status"]) > 0 or len(node_element["tags"]) > 0) and len(
node_element["types"]) > 0:
filter_text += " AND "
filter_text += " types(%s)" % " OR ".join(node_element["types"]) if len(
node_element["types"]) > 0 else ""
filter_node = nodes.emphasis(filter_text, filter_text)
para += filter_node
return para | 5,357,524 |
def mk_multi_line_figax(nrows, ncols, xlabel='time (s)', ylabel='signal (a.u.)'):
"""
Create the figure and axes for a
multipanel 2d-line plot
"""
# ncols and nrows get
# restricted via the plotting frontend
x_size = ncols * pltConfig['mXSize']
y_size = nrows * pltConfig['mYSize']
fig, axs = ppl.subplots(nrows, ncols, figsize=(x_size, y_size),
sharex=True, sharey=True, squeeze=False)
# Hide the right and top spines
# and remove all tick labels
for ax in axs.flatten():
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize=0)
# determine axis layout
y_left = axs[:, 0]
x_bottom = axs[-1, :]
# write tick and axis labels only on outer axes to save space
for ax in y_left:
ax.tick_params(labelsize=pltConfig['mTickSize'])
ax.set_ylabel(ylabel, fontsize=pltConfig['mLabelSize'])
for ax in x_bottom:
ax.tick_params(labelsize=pltConfig['mTickSize'])
ax.set_xlabel(xlabel, fontsize=pltConfig['mLabelSize'])
return fig, axs | 5,357,525 |
def execute_test(test_function=None,
supported_fmts=[], supported_oses=['linux'],
supported_cache_modes=[], unsupported_fmts=[],
supported_protocols=[], unsupported_protocols=[]):
"""Run either unittest or script-style tests."""
# We are using TEST_DIR and QEMU_DEFAULT_MACHINE as proxies to
# indicate that we're not being run via "check". There may be
# other things set up by "check" that individual test cases rely
# on.
if test_dir is None or qemu_default_machine is None:
sys.stderr.write('Please run this test via the "check" script\n')
sys.exit(os.EX_USAGE)
debug = '-d' in sys.argv
verbosity = 1
verify_image_format(supported_fmts, unsupported_fmts)
verify_protocol(supported_protocols, unsupported_protocols)
verify_platform(supported_oses)
verify_cache_mode(supported_cache_modes)
if debug:
output = sys.stdout
verbosity = 2
sys.argv.remove('-d')
else:
# We need to filter out the time taken from the output so that
# qemu-iotest can reliably diff the results against master output.
output = io.StringIO()
logging.basicConfig(level=(logging.DEBUG if debug else logging.WARN))
if not test_function:
execute_unittest(output, verbosity, debug)
else:
test_function() | 5,357,526 |
def load_sizes(infile_path: str, header: bool=None):
"""
Load and parse a gtf file. More information on the gtf format is here:
https://asia.ensembl.org/info/website/upload/gff.html
Arguments:
(REQUIRED) infile_path: path to gtf file
(OPTIONAL) header: headers in size file (DEFAULT: None)
chr1 247249719
chr2 242951149
...
"""
return pd.read_csv(infile_path, sep="\t", header=None, index_col=0) | 5,357,527 |
def latent_tree_mutate(g):
"""Produce an offspring genome by mutation through dict
manipulation. Choose a random key in the dict, and overwrite its
value with a random int. Later, repair must make sure the
offspring is valid, including using the mod rule to map from a
(possibly) large int to the corresponding small one (ie the one
giving the same production choice) in the range of possible
choices."""
# FIXME We don't rely on g being a copy, in case the search
# algorithm sometimes mutates individuals which are original
# members of the population.
# See https://github.com/PonyGE/PonyGE2/issues/89.
g = g.copy()
k = random.choice(list(g.keys()))
g[k] = random.randrange(1000000) # there is no true maxint on py 3
return g | 5,357,528 |
def AddResourceUsageExportFlags(parser, is_update=False, hidden=False):
"""Adds flags about exporting cluster resource usage to BigQuery."""
group = parser.add_group(
"Exports cluster's usage of cloud resources", hidden=hidden)
if is_update:
group.is_mutex = True
group.add_argument(
'--clear-resource-usage-bigquery-dataset',
action='store_true',
hidden=hidden,
default=None,
help='Disables exporting cluster resource usage to BigQuery.')
group = group.add_group()
dataset_help_text = """\
The name of the BigQuery dataset to which the cluster's usage of cloud
resources is exported. A table will be created in the specified dataset to
store cluster resource usage. The resulting table can be joined with BigQuery
Billing Export to produce a fine-grained cost breakdown.
Examples:
$ {command} example-cluster --resource-usage-bigquery-dataset=example_bigquery_dataset_name
"""
group.add_argument(
'--resource-usage-bigquery-dataset',
default=None,
hidden=hidden,
help=dataset_help_text)
network_egress_help_text = """\
Enable network egress metering on this cluster.
When enabled, a DaemonSet is deployed into the cluster. Each DaemonSet pod
meters network egress traffic by collecting data from the conntrack table, and
exports the metered metrics to the specified destination.
Network egress metering is disabled if this flag is omitted, or when
`--no-enable-network-egress-metering` is set.
"""
group.add_argument(
'--enable-network-egress-metering',
action='store_true',
default=None,
hidden=hidden,
help=network_egress_help_text)
resource_consumption_help_text = """\
Enable resource consumption metering on this cluster.
When enabled, a table will be created in the specified BigQuery dataset to store
resource consumption data. The resulting table can be joined with the resource
usage table or with BigQuery billing export.
Resource consumption metering is enabled unless `--no-enable-resource-
consumption-metering` is set.
"""
if is_update:
resource_consumption_help_text = """\
Enable resource consumption metering on this cluster.
When enabled, a table will be created in the specified BigQuery dataset to store
resource consumption data. The resulting table can be joined with the resource
usage table or with BigQuery billing export.
To disable resource consumption metering, set `--no-enable-resource-consumption-
metering`. If this flag is omitted, then resource consumption metering will
remain enabled or disabled depending on what is already configured for this
cluster.
"""
group.add_argument(
'--enable-resource-consumption-metering',
action='store_true',
default=None,
hidden=hidden,
help=resource_consumption_help_text) | 5,357,529 |
def store_restore_example():
"""Connects to the first scanned drive and store and restores the
current configuration."""
net = CanopenNetwork(device=CAN_DEVICE.IXXAT,
channel=0,
baudrate=CAN_BAUDRATE.Baudrate_1M)
nodes = net.scan_slaves()
print(nodes)
if len(nodes) > 0:
servo = net.connect_to_slave(
target=nodes[0],
dictionary='../../resources/dictionaries/eve-net-c_can_1.8.1.xdf',
eds='../../resources/dictionaries/eve-net-c_1.8.1.eds')
fw_version = servo.read('DRV_ID_SOFTWARE_VERSION')
print(fw_version)
# Store all
try:
servo.store_parameters()
print('Stored all parameters successfully')
except Exception as e:
print('Error storing all parameters')
# Store axis 1
try:
servo.store_parameters(subnode=1)
print('Stored axis 1 parameters successfully')
except Exception as e:
print('Error storing parameters axis 1')
# Restore all
try:
servo.restore_parameters()
print('Restored all parameters successfully')
except Exception as e:
print('Error restoring all parameters')
net.disconnect_from_slave(servo)
else:
print('Could not find any nodes') | 5,357,530 |
def load_model(file_path: string):
"""
Used to serialize an save a trained model, so it can be reused later on again.
-----------------------------------------------------------------------------------
Parameters:
-----------------------------------------------------------------------------------
file_path: List (ndarray, int)
Path to a stored model from prior running save_model().
Returns:
-----------------------------------------------------------------------------------
fcm_model: List (ndarray, float)
The de-serialized model.
"""
fcm_model = pickle.load(open(file_path, 'rb'))
return fcm_model | 5,357,531 |
def clear_response_status(response):
"""
Clear the 'status_type" and 'status_msg' attributes of the given response's cookies.
:param response: the response being processed
"""
response.delete_cookie(key='status_type')
response.delete_cookie(key='status_msg') | 5,357,532 |
def gfs_mos_forecast(stid, forecast_date):
"""
Do the data retrieval.
"""
# Generate a Forecast object
forecast = Forecast(stid, default_model_name, forecast_date)
import numpy as np
forecast.daily.high = np.round(np.random.rand() * 100.)
forecast.daily.low = np.round(np.random.rand() * 100.)
forecast.daily.wind = np.round(np.random.rand() * 40.)
forecast.daily.rain = np.round(np.random.rand() * 3., 2)
# Create a dummy pd dataframe to test
forecast.timeseries.data['DateTime'] = [forecast_date, forecast_date +
timedelta(hours=3)]
forecast.timeseries.data['temperature'] = [56., 55.]
forecast.timeseries.data['dewpoint'] = [51., 51.]
return forecast | 5,357,533 |
def print_mem_info(title = "Memory Info", device_id = None):
"""
Prints the memory used for the specified device.
Parameters
----------
title: optional. Default: "Memory Info"
- Title to display before printing the memory info.
device_id: optional. Default: None
- Specifies the device for which the memory info should be displayed.
- If None, uses the current device.
Examples
--------
>>> a = af.randu(5,5)
>>> af.print_mem_info()
Memory Info
---------------------------------------------------------
| POINTER | SIZE | AF LOCK | USER LOCK |
---------------------------------------------------------
| 0x706400000 | 1 KB | Yes | No |
---------------------------------------------------------
>>> b = af.randu(5,5)
>>> af.print_mem_info()
Memory Info
---------------------------------------------------------
| POINTER | SIZE | AF LOCK | USER LOCK |
---------------------------------------------------------
| 0x706400400 | 1 KB | Yes | No |
| 0x706400000 | 1 KB | Yes | No |
---------------------------------------------------------
>>> a = af.randu(1000,1000)
>>> af.print_mem_info()
Memory Info
---------------------------------------------------------
| POINTER | SIZE | AF LOCK | USER LOCK |
---------------------------------------------------------
| 0x706500000 | 3.815 MB | Yes | No |
| 0x706400400 | 1 KB | Yes | No |
| 0x706400000 | 1 KB | No | No |
---------------------------------------------------------
"""
device_id = device_id if device_id else get_device()
safe_call(backend.get().af_print_mem_info(title.encode('utf-8'), device_id)) | 5,357,534 |
def evaluate_accuracy_score(preprocessing, prediction_binary):
"""
Evaluates the accuracy score
:param preprocessing: prepared DataPreprocess instance
:param prediction_binary: boolean expression for the predicted classes
"""
accuracy = []
for j in range(len(DETECTION_CLASSES)):
acc = accuracy_score(preprocessing.target_classes[:, j], prediction_binary[:, j])
accuracy.append(acc)
return np.mean(accuracy) | 5,357,535 |
def test_domain_visualize(case, visu_case):
"""
test the domain visualization
"""
dom = pylbm.Domain(case)
views = dom.visualize(**visu_case)
return views.fig | 5,357,536 |
def create_supervised_evaluator(model, metrics,
device=None):
"""
Factory function for creating an evaluator for supervised models
Args:
model (`torch.nn.Module`): the model to train
metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: an evaluator engine with supervised inference function
"""
if device:
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
data, language, pids, camids = batch
batchsize = language.size(0)
wordclass_feed = np.zeros((batchsize, max_tokens), dtype='int64')
wordclass_feed[:,0] = wordlist_final.index('<S>')
outcaps = np.empty((batchsize, 0)).tolist()
data = data.to(device) if torch.cuda.device_count() >= 1 else data
# language = language.to(device) if torch.cuda.device_count() >= 1 else language
for j in range(max_tokens-1):
wordclass = Variable(torch.from_numpy(wordclass_feed)).cuda()
features, wordact, _= model(data, wordclass)
wordact = wordact[:,:,:-1]
wordact_t = wordact.permute(0, 2, 1).contiguous().view(batchsize*(max_tokens-1), -1)
wordprobs = F.softmax(wordact_t).cpu().data.numpy()
wordids = np.argmax(wordprobs, axis=1)
for k in range(batchsize):
word = wordlist_final[wordids[j+k*(max_tokens-1)]]
outcaps[k].append(word)
if(j < max_tokens-1):
wordclass_feed[k, j+1] = wordids[j+k*(max_tokens-1)]
for j in range(batchsize):
num_words = len(outcaps[j])
if 'EOS' in outcaps[j]:
num_words = outcaps[j].index('EOS')
outcap = ' '.join(outcaps[j][:num_words])
feat, _, _ = model(data, wordclass)
print (outcap)
return feat, pids, camids
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine | 5,357,537 |
def get_available_operations():
""" Return a dict of available operations """
return True, runtime.get_available_operations() | 5,357,538 |
def word_distance(word1, word2):
"""Computes the number of differences between two words.
word1, word2: strings
Returns: integer
"""
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count | 5,357,539 |
def rmLineno(node):
"""Strip lineno attributes from a code tree."""
if node.__dict__.has_key('lineno'):
del node.lineno
for child in node.getChildren():
if isinstance(child, ast.Node):
rmLineno(child) | 5,357,540 |
def get_parent_child(root: dict,
path: str) -> Union[Tuple[Tuple[None, None],
Tuple[None, None]],
Tuple[Tuple[dict, None],
Tuple[Any, str]],
Tuple[Tuple[Any, str],
Tuple[Any, str]]]:
""" Get first and second level node
:param root: The root node.
:param path: The path to identify the leaf node.
:return: (
(
parent node: The first level node in the hierarchy of the path
parent path: The path based on the root node
)
(
child node: The second level node in the hierarchy of the path
child path: The path based on the parent node
)
)
"""
res = Ddict.search(root, path)
if res is None:
if '.' not in path:
return (None, None), (None, None)
else:
child = Ddict.get(root, path)
return (root, None), (child, path)
parent_name, parent_value, child_name = res
if child_name:
child_value = Ddict.get(parent_value, child_name)
return (parent_value, parent_name), (child_value, child_name)
else:
return (root, None), (parent_value, parent_name) | 5,357,541 |
def get_data_loaders(personachat, tokenizer, args_num_candidates=1, args_personality_permutations=1, args_max_history=2):
""" Prepare the dataset for training and evaluation """
print("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if args_num_candidates > 0 and dataset_name == 'train':
num_candidates = min(args_num_candidates, num_candidates)
for dialog in dataset:
persona = dialog["personality"].copy()
for _ in range(args_personality_permutations):
for utterance in dialog["utterances"]:
history = utterance["history"][-(2*args_max_history+1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates-1)
instance, _ = build_input_from_segments(persona, history, candidate, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
persona = [persona[-1]] + persona[:-1] # permuted personalities
print("Pad inputs and convert to Tensor")
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids('<pad>'))
for input_name in MODEL_INPUTS:
tensor = dataset[input_name]
dataset[input_name] = np.array(tensor)
return datasets | 5,357,542 |
def remote(ctx, verbose: bool = False, debug: bool = False):
"""List remotes if no additional command is provided"""
if not ctx.invoked_subcommand:
try:
with fdp_session.FAIR(os.getcwd(), debug=debug) as fair_session:
fair_session.list_remotes(verbose)
except fdp_exc.FAIRCLIException as e:
if debug:
raise e
e.err_print()
if e.level.lower() == "error":
sys.exit(e.exit_code) | 5,357,543 |
def main(inargs):
"""
Main routine for evaluation. Saves the CRPS for all experiments
and plots a histogram.
"""
assert inargs.date_start == '2016-01-01' and inargs.date_stop == '2017-01-01', \
'Flexible dates not implemented.'
# Get observation data
raw_crps = prepare_obs_df_and_compute_raw_crps(inargs)
# Compute scores
crps_list = evaluate(inargs)
# Print and save results
print('Raw CRPS', raw_crps)
for i in range(len(inargs.eval_files)):
print(inargs.eval_files[i], crps_list[i])
crps_df = pd.DataFrame({
'name': ['raw_ensemble'] + [e.split('/')[-1].split('.')[0]
for e in inargs.eval_files],
'crps': [raw_crps] + crps_list
})
if inargs.sort_by_score:
crps_df = crps_df.sort_values('crps')
crps_df.to_csv('./crps.csv')
# Plot results
plot_results(crps_df) | 5,357,544 |
def polyMergeUV(q=1,e=1,cch=1,ch=1,d="float",n="string",nds="int",uvs="string",ws=1):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/polyMergeUV.html
-----------------------------------------
polyMergeUV is undoable, queryable, and editable.
Merge UVs of an object based on their distance. UVs are merge only if they
belong to the same 3D vertex.
-----------------------------------------
Return Value:
string The node name.
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
cch : caching [boolean] ['query', 'edit']
Toggle caching for all attributes so that no recomputation is needed
-----------------------------------------
ch : constructionHistory [boolean] ['query']
Turn the construction history on or off (where applicable). If construction history is on then the corresponding node will be inserted into the history chain for the mesh. If construction history is off then the operation will be performed directly on the object. Note: If the object already has construction history then this flag is ignored and the node will always be inserted into the history chain.
-----------------------------------------
d : distance [float] ['query', 'edit']
This flag specifies the maximum distance to merge UVs. C: Default is 0.0. Q: When queried, this flag returns a double.
-----------------------------------------
n : name [string] []
Give a name to the resulting node.
-----------------------------------------
nds : nodeState [int] ['query', 'edit']
Maya dependency nodes have 6 possible states. The Normal (0), HasNoEffect (1), and Blocking (2) states can be used to alter how the graph is evaluated. The Waiting-Normal (3), Waiting-HasNoEffect (4), Waiting-Blocking (5) are for internal use only. They temporarily shut off parts of the graph during interaction (e.g., manipulation). The understanding is that once the operation is done, the state will be reset appropriately, e.g. Waiting-Blocking will reset back to Blocking. The Normal and Blocking cases apply to all nodes, while HasNoEffect is node specific; many nodes do not support this option. Plug-ins store state in the MPxNode::state attribute. Anyone can set it or check this attribute. Additional details about each of these 3 states follow. | State | Description
-----------------------------------------
uvs : uvSetName [string] []
Name of the UV set to be created
-----------------------------------------
ws : worldSpace [boolean]
This flag specifies which reference to use. If "on" : all geometrical values are taken in world reference. If "off" : all geometrical values are taken in object reference. C: Default is off. Q: When queried, this flag returns an int.
""" | 5,357,545 |
def unescape_strict(s):
"""
Re-implements html.unescape to use our own definition of `_charref`
"""
if '&' not in s:
return s
return _charref.sub(_replace_charref, s) | 5,357,546 |
def CheckVPythonSpec(input_api, output_api, file_filter=None):
"""Validates any changed .vpython files with vpython verification tool.
Args:
input_api: Bag of input related interfaces.
output_api: Bag of output related interfaces.
file_filter: Custom function that takes a path (relative to client root) and
returns boolean, which is used to filter files for which to apply the
verification to. Defaults to any path ending with .vpython, which captures
both global .vpython and <script>.vpython files.
Returns:
A list of input_api.Command objects containing verification commands.
"""
file_filter = file_filter or (lambda f: f.LocalPath().endswith('.vpython'))
affected_files = input_api.AffectedTestableFiles(file_filter=file_filter)
affected_files = map(lambda f: f.AbsoluteLocalPath(), affected_files)
commands = []
for f in affected_files:
commands.append(input_api.Command(
'Verify %s' % f,
['vpython', '-vpython-spec', f, '-vpython-tool', 'verify'],
{'stderr': input_api.subprocess.STDOUT},
output_api.PresubmitError))
return commands | 5,357,547 |
def assert_is_valid_notebook(nb):
"""These are the current assumptions on notebooks in these tests. Loosen on demand."""
assert nb["nbformat"] == 4
# assert nb["nbformat_minor"] == 0
assert isinstance(nb["metadata"], dict)
assert isinstance(nb["cells"], list)
assert all(isinstance(cell, dict) for cell in nb["cells"]) | 5,357,548 |
def _read_from_file(paramfile):
"""
Code to load parameter data from a YAML file, moved out of
check_metadata_format to allow different inputs to that function.
:param paramfile: The parameter file created by 'precheck_data_format' and
'select_data_templates'.
:type paramfile: str
"""
# Read in the parameter file.
if os.path.isfile(paramfile):
with open(paramfile, 'r') as istream:
param_data = yaml.load(istream)
else:
raise OSError('Input parameter file not found. Looking for "' +
paramfile + '".')
return param_data | 5,357,549 |
def test_infer_errs() -> None:
"""Test inference applied to functions."""
with f.Fun(MockServer()):
a = f.put(b"bla bla")
b = f.put(3)
with pytest.raises(TypeError):
f.py(lambda x, y, z: (x, y), a, a, b)
# should NOT raise
f.py(
lambda x, y, z: (x, y),
a,
a,
b,
out=[types.Encoding.blob, types.Encoding.blob],
)
def i1o2(x: bytes) -> Tuple[bytes, bytes]:
return x, x
def i2o1(x: bytes, y: bytes) -> bytes:
return x
with pytest.raises(TypeError):
out = f.morph(i1o2, a) # type:ignore # noqa:F841
with pytest.raises(TypeError):
out = f.reduce(i1o2, a) # type:ignore # noqa:F841
with pytest.raises(TypeError):
out = f.reduce(lambda x, y: x, a, b) # type:ignore # noqa:F841
# If we pass out= then the inference is skipped
out = f.morph(i1o2, a, out=types.Encoding.blob) # type:ignore # noqa:F841
out = f.reduce(i1o2, a, out=types.Encoding.blob) | 5,357,550 |
def decode_captions(captions, idx_to_word):
""" Decode text captions from index in vocabulary to words.
"""
if captions.ndim == 1:
T = captions.shape[0]
N = 1
else:
N, T = captions.shape
decoded = []
for i in range(N):
words = []
for t in range(T):
if captions.ndim == 1:
word = idx_to_word[captions[t]]
else:
word = idx_to_word[captions[i, t]]
if word == '<END>':
words.append('.')
break
if word != '<NULL>':
words.append(word)
decoded.append(' '.join(words))
return decoded | 5,357,551 |
def save_kdeplot(df: DataFrame,
output_plot: str,
x_name: str,
title: str,
color: str,
x_label: str = None,
y_label: str = None,
normalize_x: bool = True,
fig_size: Tuple[int] = (24, 12),
):
"""This function helps for computing automated kdeplots using seaborn.
It sets up somewhat standardized figure output for a harmonized rendering.
:param df: the DataFrame with data to plot
:param output_plot: the output plot full file name
:param x_name: DF column name to use for x-axis
:param x_label: the name to display on the plot for x-axis
:param y_label: the name to display on the plot for y-axis
:param color: color to use for bars, theoritically could also be a list of colors
:param fig_size: tuple of integers defining the plot dimensions (x, y)
:return: the figure in searborn format
"""
# detect format from file extension
format = Path(output_plot).suffix[1:].lower()
if format != 'svg' and format != 'png':
raise ValueError(f"ERROR! UNKNOWN PLOT FORMAT! ('{format}')")
logging.debug(f"FORMAT FOR PLOT: '{format}'")
# delete existing file for preventing stacking of plots
p = Path(output_plot)
if p.exists():
p.unlink()
# general style for plotting
sns.set(rc={'figure.figsize': fig_size})
sns.set_style('whitegrid', {'axes.edgecolor': '0.2'})
sns.set_context("paper", font_scale=2)
ax = sns.kdeplot(df[x_name], shade=True, label='', color=color)
ax.set_title(title, fontsize=24, y=1.02)
ax.tick_params(labelsize=20)
ax.tick_params(axis='x', rotation=0)
ax.set_xlim(0, 1)
# ax.set_xticklabels(df[x_name])
label_format = '{:,.0%}'
ticks_loc = ax.get_xticks().tolist()
ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
ax.set_xticklabels([label_format.format(x) for x in ticks_loc])
#ax.set_xticklabels(['{:,.0%}'.format(x) for x in ax.get_xticks()])
ax.set_xlabel(x_label, fontsize=25, labelpad=20)
ax.set_ylabel(y_label, fontsize=25, labelpad=20)
# save
figure = ax.get_figure()
figure.savefig(output_plot, dpi=600)
plt.clf()
plt.close()
return figure | 5,357,552 |
def make_loc(caller):
"""
turn caller location into a string
"""
# return caller["file"] + ":" + caller["func"] + ":" + caller["line"]
return caller["file"] + ":" + str(caller["line"]) | 5,357,553 |
def diff(file1, file2):
"""
Compare two files, ignoring line end differences
If there are differences, print them to stderr in unified diff format.
@param file1 The full pathname of the first file to compare
@param file2 The full pathname of the second file to compare
@return True if the files are the same, o
"""
with open(file1, 'r') as input1:
with open(file2, 'r') as input2:
diffs = difflib.unified_diff(
input1.read().splitlines(),
input2.read().splitlines()
)
no_diffs = True
for diff in diffs:
no_diffs = False
print(diff, file=sys.stderr)
return no_diffs | 5,357,554 |
def run_part2(file_content):
"""Implmentation for Part 2."""
numbers = (int(number) for number in file_content.split())
root = _build_tree(numbers)
return _node_value(root) | 5,357,555 |
def get_cap_selected_frame(cap, show_frame):
"""
Gets a frame from an opencv video capture object to a specific frame
"""
cap_set_frame(cap, show_frame)
ret, frame = cap.read()
if not ret:
return None
else:
return frame | 5,357,556 |
def rotate_coords_x(pos, angle):
""" Rotate a set of coordinates about the x-axis
:param pos: (n, 3) xyz coordinates to be rotated
:param angle: angle to rotate them by w.r.t origin
:type pos: numpy.ndarray
:type angle: float
:return: array of rotated coordinates
:rtype: numpy.ndarray
"""
xyz = np.copy(pos)
angle *= (np.pi / 180) # convert to radians
R = rotate_x(angle)
for i in range(np.shape(xyz)[0]):
xyz[i, :] = np.dot(R, xyz[i, :])
return xyz | 5,357,557 |
def q_learning(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):
"""
Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy
while following an epsilon-greedy policy
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Lambda time discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, episode_lengths).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
current_state = env.reset()
# keep track number of time-step per episode only for plotting
for t in itertools.count():
# choose the action based on epsilon greedy policy
action_probs = policy(current_state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step(action)
# sse the greedy action to evaluate Q, not the one we actually follow
greedy_next_action = Q[next_state].argmax()
# evaluate Q using estimated action value of (next_state, greedy_next_action)
td_target = reward + discount_factor * Q[next_state][greedy_next_action]
td_error = td_target - Q[current_state][action]
Q[current_state][action] += alpha * td_error
# improve epsilon greedy policy using new evaluate Q
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
# update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
if done:
break
else:
current_state = next_state
return Q, stats | 5,357,558 |
def test_empty_document(text_window):
"""Empty text document can be rendered."""
text_window.dispatch_events()
text_window.close() | 5,357,559 |
def write_quality_stats(input_files, output_file):
"""
Iterate over a list of fastqc output files and generate a dataframe
containing summary statistics for each file, then write the result
to disk.
"""
quality_df = get_quality_stats(input_files)
quality_df.to_csv(output_file, sep='\t', index=False) | 5,357,560 |
def test_delete_contact_no_token_returns_401(client, session): # pylint:disable=unused-argument, invalid-name
"""Assert that deleting a contact without a token returns a 401."""
rv = client.delete('/api/v1/users/contacts', headers=None, content_type='application/json')
assert rv.status_code == http_status.HTTP_401_UNAUTHORIZED | 5,357,561 |
def run_experiment(hparams):
"""Run the training and evaluate using the high level API"""
train_input = model._make_training_input_fn(
hparams.tft_working_dir,
hparams.train_filebase,
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
buffer_size=hparams.train_buffer_size,
prefetch_buffer_size=hparams.train_prefetch_buffer_size)
# Don't shuffle evaluation data
eval_input = model._make_training_input_fn(
hparams.tft_working_dir,
hparams.eval_filebase,
shuffle=False,
batch_size=hparams.eval_batch_size,
buffer_size=1,
prefetch_buffer_size=hparams.eval_prefetch_buffer_size)
train_spec = tf.estimator.TrainSpec(train_input,
max_steps=hparams.train_steps
)
exporter = tf.estimator.FinalExporter('tft_classifier',
model._make_serving_input_fn(hparams.tft_working_dir))
eval_spec = tf.estimator.EvalSpec(eval_input,
steps=hparams.eval_steps,
exporters=[exporter],
name='tft_classifier-eval'
)
run_config = tf.estimator.RunConfig()
run_config = run_config.replace(model_dir=hparams.job_dir)
print('model dir {}'.format(run_config.model_dir))
estimator = model.build_estimator(
config=run_config,
tft_working_dir=hparams.tft_working_dir,
embedding_size=hparams.embedding_size,
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(hparams.first_layer_size *
hparams.scale_factor**i))
for i in range(hparams.num_layers)
],
)
tf.estimator.train_and_evaluate(estimator,
train_spec,
eval_spec) | 5,357,562 |
def find_rand_source_reg():
"""Find random source register based on readAfterWrite probability"""
prob=random.uniform(0,1)
while len(previousIntegerSources)>numberOfPreviousRegistersToConsider:
previousIntegerSources.popleft()
if prob<readAfterWrite and previousIntegerDestinations:
num=random.choice(previousIntegerDestinations)
else:
num=random.randint(1,31)
previousIntegerSources.append(num)
return num | 5,357,563 |
def _get_qnode_class(device, interface, diff_method):
"""Returns the class for the specified QNode.
Args:
device (~.Device): a PennyLane-compatible device
interface (str): the interface that will be used for classical backpropagation
diff_method (str, None): the method of differentiation to use in the created QNode
Raises:
ValueError: if an unrecognized ``diff_method`` is provided
Returns:
~.BaseQNode: the QNode class object that is compatible with the provided device and
differentiation method
"""
# pylint: disable=too-many-return-statements,too-many-branches
model = device.capabilities().get("model", "qubit")
passthru_interface = device.capabilities().get("passthru_interface", None)
device_provides_jacobian = device.capabilities().get("provides_jacobian", False)
allows_passthru = passthru_interface is not None
if diff_method is None:
# QNode is not differentiable
return BaseQNode
if diff_method == "best":
if allows_passthru and interface == passthru_interface:
# hand off differentiation to the device without type conversion
return PassthruQNode
if device_provides_jacobian:
# hand off differentiation to the device
return DeviceJacobianQNode
if model in PARAMETER_SHIFT_QNODES:
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
if diff_method == "backprop":
if allows_passthru:
if interface != passthru_interface:
raise ValueError(
"Device {} only supports diff_method='backprop' when using the "
"{} interface.".format(device.short_name, passthru_interface)
)
return PassthruQNode
raise ValueError(
"The {} device does not support native computations with "
"autodifferentiation frameworks.".format(device.short_name)
)
if diff_method == "device":
if device_provides_jacobian:
return DeviceJacobianQNode
raise ValueError(
"The {} device does not provide a native method "
"for computing the jacobian.".format(device.short_name)
)
if diff_method == "parameter-shift":
if model in PARAMETER_SHIFT_QNODES:
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
raise ValueError(
"The parameter shift rule is not available for devices with model {}.".format(model)
)
if diff_method == "reversible":
# pylint: disable=protected-access
if not device.capabilities().get("reversible_diff", False):
raise ValueError(
"Reversible differentiation method not supported on {}".format(device.short_name)
)
return ReversibleQNode
if diff_method in ALLOWED_DIFF_METHODS:
# finite differences
return JacobianQNode
raise ValueError(
"Differentiation method {} not recognized. Allowed "
"options are {}".format(diff_method, ALLOWED_DIFF_METHODS)
) | 5,357,564 |
def test_alamut_link(app, institute_obj, variant_obj):
"""Test to add a link to alamut browser"""
# GIVEN an institute with settings for Alamut Visual Plus
alamut_api_key = "test_alamut_key"
institute_obj["alamut_key"] = alamut_api_key
# GIVEN that genome build 38 is provided
build = 38
# GIVEN that the app settings contain parameter HIDE_ALAMUT_LINK = False
app.config["HIDE_ALAMUT_LINK"] = False
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# WHEN the alamut link is created
link_to_alamut = alamut_link(institute_obj, variant_obj, 38)
# THEN the link should contain genome build info
assert "GRCh38" in link_to_alamut
# As well as Alamut Visual Plus API key
assert alamut_api_key in link_to_alamut | 5,357,565 |
def evaluate_and_log_bleu(model, bleu_source, bleu_ref, vocab_file):
"""Calculate and record the BLEU score."""
subtokenizer = tokenizer.Subtokenizer(vocab_file)
uncased_score, cased_score = translate_and_compute_bleu(
model, subtokenizer, bleu_source, bleu_ref)
tf.compat.v1.logging.info("Bleu score (uncased): %s", uncased_score)
tf.compat.v1.logging.info("Bleu score (cased): %s", cased_score)
return uncased_score, cased_score | 5,357,566 |
def transform_call(red_node):
"""
Converts Python style function calls to VHDL style:
self.d(a) -> d(self, a)
If function owner is not exactly 'self' then 'type' is prepended.
self.next.moving_average.main(x) -> type.main(self.next.moving_average, x)
self.d(a) -> d(self, a)
self.next.d(a) -> d(self.next, a)
local.d() -> type.d(local)
self.local.d() -> type.d(self.local)
If return then:
b = self.a(arg) ->
variable pyha_ret_0: type;
a(self, arg, pyha_ret_0);
b := pyha_ret_0;
Handling call inside call is limited to depth 1.
"""
def find_line_node(red_obj):
line_node = red_obj
while True:
if type(line_node.next) == EndlNode:
break
if hasattr(line_node.parent, 'value') and type(line_node.parent.value) == LineProxyList:
if not (hasattr(line_node.parent, 'test') and (
line_node.parent.test == atom # if WE are the if condition, skip
or line_node.parent.test == atom.parent)): # if WE are the if condition (part of condition)
break
line_node = line_node.parent
return line_node
is_hack = False
# make sure each created variable is unique by appending this number and incrementing
tmp_var_count = 0
# loop over all atomtrailers, call is always a member of this
atomtrailers = red_node.find_all('atomtrailers')
for i, atom in enumerate(atomtrailers):
if is_hack: # when parsed out of order call
atom = atomtrailers[i - 1]
call = atom.call
is_hack = False
else:
call = atom.call # this actually points to the stuff between ()
if call is None: # this atomtrailer has no function call
continue
wat = call.call
if wat is not None: # one of the arguments is a call -> process it first (i expect it is next in the list)
call_index = wat.previous.index_on_parent
if call_index == 0: # input is something like x() -> len(), Sfix() ....
pass
else:
try:
atom = atomtrailers[i + 1]
call = atom.call
is_hack = True
except:
continue # no idea what is going on here...
if call is None: # this atomtrailer has no function call
continue
call_index = call.previous.index_on_parent
if call_index == 0: # input is something like x() -> len(), Sfix() ....
continue
# get the TARGET function object from datamodel
target_func_name = atom.copy()
del target_func_name[call_index + 1:]
try:
target_func_obj = super_getattr(convert_obj, str(target_func_name))
except: # happend for: (self.conjugate(complex_in) * complex_in).real
continue
if not target_func_obj.calls:
# function is not simulated...
line_node = find_line_node(atom)
line_node.replace(f'# comment out because not called in simulation: {line_node.dumps()}')
continue
prefix = atom.copy()
del prefix[call_index:]
del atom[:call_index]
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
# this branch happens because of 'for transform'
tmp[0][0] = 'self_const'
call.insert(0, tmp)
else:
tmp[0] = 'self_const'
call.insert(0, tmp)
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
tmp[0][0] = 'self_next'
call.insert(0, tmp)
else:
tmp[0] = 'self_next'
call.insert(0, tmp)
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
tmp[0][0] = 'self'
call.insert(0, tmp)
else:
tmp[0] = 'self'
call.insert(0, tmp)
# get the SOURCE (where call is going on) function object from datamodel
def_parent = atom
while not isinstance(def_parent, DefNode):
def_parent = def_parent.parent
# def_parent = atom.parent_find('def')
source_func_name = f'self.{def_parent.name}'
source_func_obj = super_getattr(convert_obj, str(source_func_name))
# if call is not to local class function
# self.moving_average.main(x) -> MODULE_NAME.main(self.moving_average, x)
if str(prefix) != 'self':
var = super_getattr(convert_obj, str(prefix))
var = init_vhdl_type('-', var, var)
atom.insert(0, var._pyha_module_name())
if target_func_obj.get_output_types() is None:
continue # function is not returning stuff -> this is simple
else:
# add return variables to function locals, so that they will be converted to VHDL variables
ret_vars = []
for x in get_iterable(target_func_obj.get_output_types()):
name = f'pyha_ret_{tmp_var_count}'
ret_vars.append(name)
source_func_obj.add_local_type(name, x)
tmp_var_count += 1
# add return variable to arguments
call.append(name)
# call.value[-1].target = f'ret_{j}'
# need to add new source line before the CURRENT line..search for the node with linenodes
line_node = find_line_node(atom)
# add function call BEFORE the CURRENT line
if line_node != atom: # equality means that value is not assigned to anything
line_node.parent.insert(line_node.index_on_parent, atom.copy())
atom.replace(','.join(ret_vars)) | 5,357,567 |
def delta_next_time_to_send(G, u, v):
"""How long to wait before U should send a message to V under diffusion
spreading. Per the Bitcoin protocol, this depends on if we have an outgoing
connection or an incoming connection."""
is_outgoing = G[u][v][ORIGINATOR] == u
average_interval_seconds = 2 if is_outgoing else 5
delta = int(log1p(-random.random()) * average_interval_seconds * -1000000 + 0.5)
return delta if delta > 0 else 0 | 5,357,568 |
def processData(list_pc, imo):
"""
Cette fonction traite les données de getData pour écrire une seule string
prête à être copié dans le csv et qui contient toutes les lignes d'un bateau
"""
str_pc = ''
for i in range(len(list_pc)):
if list_pc[i] == 'Arrival (UTC)':
tab = list_pc[i-1].split(',') # [Port, Country] (good) or [Port, Region, Country] (bad)
if len(tab) == 3:
tab = ['"' + tab[0] + ',' + tab[1].strip() + '"', tab[2]] # [Port+(Region), Country]
str_pc = str_pc + imo + ',' + tab[0] + ',' + tab[1] + ',"' + list_pc[i+1] + '","' + list_pc[i+3] + '","' + list_pc[i+5] + '"\n'
return str_pc | 5,357,569 |
def wrn(num_classes):
"""Constructs a wideres-28-10 model without dropout.
"""
return Wide_ResNet(28, 10, 0, num_classes) | 5,357,570 |
def info():
"""
Print application and environment version info
"""
print(f"PlexTraktSync Version: {get_version()}")
py_version = sys.version.replace("\n", "")
print(f"Python Version: {py_version}")
print(f"Plex API Version: {PLEX_API_VERSION}")
print(f"Trakt API Version: {TRAKT_API_VERSION}")
print(f"Cache Dir: {cache_dir}")
print(f"Config Dir: {config_dir}")
print(f"Log Dir: {log_dir}")
config = factory.config()
print(f"Plex username: {config['PLEX_USERNAME']}")
print(f"Trakt username: {config['TRAKT_USERNAME']}")
if has_plex_token():
plex = factory.plex_api()
print(f"Plex Server version: {plex.version}, updated at: {plex.updated_at}")
print(f"Enabled {len(plex.library_sections)} libraries in Plex Server: {plex.library_section_names}") | 5,357,571 |
def test_find_dup_timestamps_4():
"""Timestamps 500, 1200 should be detected as duplicate."""
assert logfile_check.find_duplicate_timestamps(
[0, 100, 200, 300, 500, 500, 600, 1200, 1200]) == [500, 1200] | 5,357,572 |
def _state_worker_func(indices, programs, params):
"""Compute the wavefunction for each program in indices."""
x_np = _convert_complex_view_to_np(INFO_DICT['arr'], INFO_DICT['shape'])
simulator = INFO_DICT['sim']
for i, index in enumerate(indices):
result = simulator.simulate(programs[i], params[i])
final_array = INFO_DICT['post_process'](result).astype(np.complex64)
_update_complex_np(x_np, index, final_array) | 5,357,573 |
def run(
config: Dict[str, Any],
log_dir: str = "",
kernel_seed: int = 0,
kernel_random_state: Optional[np.random.RandomState] = None,
) -> Dict[str, Any]:
"""
Wrapper function that enables to run one simulation.
It does the following steps:
- instantiation of the kernel
- running of the simulation
- return the end_state object
Arguments:
config: configuration file for the specific simulation
log_dir: directory where log files are stored
kernel_seed: simulation seed
kernel_random_state: simulation random state
"""
coloredlogs.install(
level=config["stdout_log_level"],
fmt="[%(process)d] %(levelname)s %(name)s %(message)s",
)
kernel = Kernel(
random_state=kernel_random_state or np.random.RandomState(seed=kernel_seed),
log_dir=log_dir,
**subdict(
config,
[
"start_time",
"stop_time",
"agents",
"agent_latency_model",
"default_computation_delay",
"custom_properties",
],
),
)
sim_start_time = dt.datetime.now()
logger.info(f"Simulation Start Time: {sim_start_time}")
end_state = kernel.run()
sim_end_time = dt.datetime.now()
logger.info(f"Simulation End Time: {sim_end_time}")
logger.info(f"Time taken to run simulation: {sim_end_time - sim_start_time}")
return end_state | 5,357,574 |
def _print_result(case, summary):
""" Show some statistics from the run """
for dof, data in summary.items():
print(" " + case + " " + dof)
print(" -------------------")
for header, val in data.items():
print(" " + header + " : " + str(val))
print("") | 5,357,575 |
def python_to_pydict(script_contents, namespace=None):
"""Load a Python script with dictionaries into a dictionary."""
if namespace is None:
namespace = {}
exec script_contents in {}, namespace
return to_lower(namespace) | 5,357,576 |
def check(atomTypes):
"""
Parameters
----------
atomTypes
Returns
-------
"""
N = len(atomTypes)
for a in atomTypes:
if len(a.matrix) != N:
print(a)
sys.exit('Matrix with wrong size\n') | 5,357,577 |
def convertInt(s):
"""Tells if a string can be converted to int and converts it
Args:
s : str
Returns:
s : str
Standardized token 'INT' if s can be turned to an int, s otherwise
"""
try:
int(s)
return "INT"
except:
return s | 5,357,578 |
def client_blocking_handler(messages, socket_object):
"""
Applies for numbers: 1
"""
pass | 5,357,579 |
def test_dll_shift(dll_20):
"""Testing the shift() function of dll."""
for num in range(20):
new_tail = dll_20.tail.previous
dll_20.shift()
assert dll_20.tail == new_tail
with pytest.raises(IndexError):
dll_20.shift() | 5,357,580 |
def action2cls():
"""
Root
/home/ty/DB/mmaction2/data/classification_data
train
- normal
-foldername
-img_00001.jpg
-img_00002.jpg
- cancer
test
- normal
- cancer
To
train
- normal
foldername.jpg
- cancer
foldername.jpg
test
- normal
foldername.jpg
- cancer
foldername.jpg
Returns:
"""
RootDir = '/home/ty/DB/mmaction2/data/classification_data'
MoveDir = '/home/ty/Project/mmclassification/data/basbaiImgNet'
TrainDir = os.path.join(MoveDir, 'train')
TestDir = os.path.join(MoveDir, 'test')
mkdir(MoveDir)
TType = ['train', 'test']
mkdir([TrainDir, TestDir])
ClsType = ['normal', 'cancer']
for tt_type in TType :
for cls_type in ClsType :
PrevDir = os.path.join(RootDir, tt_type, cls_type)
SaveDir = os.path.join(MoveDir, tt_type, cls_type)
mkdir(SaveDir)
for folder_name in os.listdir(PrevDir) :
new_img_name = os.path.join(SaveDir, folder_name + '.jpg')
img1_path = os.path.join(PrevDir, folder_name, 'img_00001.jpg')
img2_path = os.path.join(PrevDir, folder_name, 'img_00002.jpg')
img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)
# print(img1.shape, img2.shape)
# assert(img1.shape == img2.shape)
resize_img1 = cv2.resize(img1, (400, 600))
resize_img2 = cv2.resize(img2, (400, 600))
resize_shape = np.zeros((600, 400, 3), np.uint8)
resize_shape[:, :, 0] = resize_img1
resize_shape[:, :, 1] = resize_img2
cv2.imwrite(new_img_name, resize_shape) | 5,357,581 |
def pairwise_distance(A, B):
"""
Compute distance between points in A and points in B
:param A: (m,n) -m points, each of n dimension. Every row vector is a point, denoted as A(i).
:param B: (k,n) -k points, each of n dimension. Every row vector is a point, denoted as B(j).
:return: Matrix with (m, k). And the ele in (i,j) is the distance between A(i) and B(j)
"""
A_square = torch.sum(A * A, dim=1, keepdim=True)
B_square = torch.sum(B * B, dim=1, keepdim=True)
distance = A_square + B_square.t() - 2 * torch.matmul(A, B.t())
return distance | 5,357,582 |
def print_information(
option, option_string, value, parser, option_manager=None
):
"""Print debugging information used in bug reports.
:param option:
The optparse Option instance.
:type option:
optparse.Option
:param str option_string:
The option name
:param value:
The value passed to the callback parsed from the command-line
:param parser:
The optparse OptionParser instance
:type parser:
optparse.OptionParser
:param option_manager:
The Flake8 OptionManager instance.
:type option_manager:
flake8.options.manager.OptionManager
"""
if not option_manager.registered_plugins:
# NOTE(sigmavirus24): Flake8 parses options twice. The first time, we
# will not have any registered plugins. We can skip this one and only
# take action on the second time we're called.
return
print(json.dumps(information(option_manager), indent=2, sort_keys=True))
raise SystemExit(False) | 5,357,583 |
def _split_full_name(full_name: str) -> Tuple[str, str, str]:
"""Extracts the `(ds name, config, version)` from the full_name."""
if not tfds.core.registered.is_full_name(full_name):
raise ValueError(
f'Parsing builder name string {full_name} failed.'
'The builder name string must be of the following format:'
'`dataset_name[/config_name]/version`')
ds_name, *optional_config, version = full_name.split('/')
assert len(optional_config) <= 1
config = next(iter(optional_config)) if optional_config else ''
return ds_name, config, version | 5,357,584 |
def query_paginate(resources, arguments):
"""Return the resources paginated
Args:
resources(list): List to paginate
arguments(FormsDict): query arguments
Returns:
list: Paginated resource (asc or desc)
"""
if '_page' not in arguments:
return resources
page = int(arguments['_page'])
limit = 10 if '_limit' not in arguments else int(arguments['_limit'])
chunk_data = list(chunk_list(resources, limit))
results = chunk_data[page-1]
link_header = build_link_header(request, page, len(chunk_data))
response.set_header("Link", link_header)
return results | 5,357,585 |
def test_train(task_dataset, mocker):
"""Test train runs without crashing."""
wandb_log = mocker.patch.object(wandb, 'log')
probe = nn.Linear(N_DIMS_PER_REP, N_UNIQUE_FEATS)
before = probe.weight.data.clone()
learning.train(probe, task_dataset, epochs=EPOCHS, also_log_to_wandb=True)
after = probe.weight.data
assert not before.equal(after)
assert wandb_log.call_args_list == [
mocker.call({'train loss': mocker.ANY}),
] * len(task_dataset) * EPOCHS | 5,357,586 |
def convert_grad(graph):
"""Remove all instances of SymbolicKeyType in the graphs.
They will be replaced by globally-unique integers.
"""
mng = graph.manager
counter = 0
key_map = {}
for node in mng.all_nodes:
if node.is_constant(SymbolicKeyInstance):
if node.value not in key_map:
key_map[node.value] = counter
counter += 1
node.value = key_map[node.value]
node.abstract = to_abstract(node.value)
return graph | 5,357,587 |
def patch_marshmallow_fields():
"""
Patch marshmallow fields to look more like drf fields
"""
global _MA_FIELDS_PATCHED
if _MA_FIELDS_PATCHED:
return
ma_fields = {key: value
for key, value in globals().items()
if safe_issubclass(value, ma.fields.FieldABC)}
for field_name, field_cls in ma_fields.items():
cls_code = f"""class {field_cls.__name__}(field_cls):
_rf_patched = True
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", True)
kwargs.setdefault("dump_only", kwargs.pop("read_only", False))
kwargs.setdefault("load_only", kwargs.pop("write_only", False))
super(self.__class__, self).__init__(*args, **kwargs)
"""
exec(cls_code, globals(), locals())
# keep the name of original field
ma_fields[field_name] = locals()[field_cls.__name__]
globals().update(**ma_fields)
# also update mapping with patched classes
for key, value in sa_ma_pg_field_mapping.items():
if value.__name__ in ma_fields:
sa_ma_pg_field_mapping[key] = ma_fields[value.__name__] # noqa
_MA_FIELDS_PATCHED = True | 5,357,588 |
def site_data(db, settings):
"""Simple fake site data
"""
if organizations_support_sites():
settings.FEATURES['FIGURES_IS_MULTISITE'] = True
site_data = make_site_data()
ce = site_data['enrollments'][0]
lcgm = [
LearnerCourseGradeMetricsFactory(site=site_data['site'],
user=ce.user,
course_id=str(ce.course_id),
date_for='2020-10-01'),
]
site_data['lcgm'] = lcgm
return site_data | 5,357,589 |
def render_curve(name,
data,
x_range=None,
y_range=None,
x_label=None,
y_label=None,
legends=None,
legend_kwargs={},
img_height=None,
img_width=None,
dpi=300,
figsize=(2, 2),
**kwargs):
"""Plot 1D curves.
Args:
name (stor): rendering identifier
data (Tensor|np.ndarray): a rank-1 or rank-2 tensor/np.array. If rank-2,
then each row represents an individual curve.
x_range (tuple[float]): min/max for x values. If None, ``x`` is
the index sequence of curve points. If provided, ``x`` is
evenly spaced by ``(x_range[1] - x_range[0]) / (N - 1)``.
y_range (tuple[float]): a tuple of ``(min_y, max_y)`` for showing on
the figure. If None, then it will be decided according to the
``y`` values. Note that this range won't change ``y`` data; it's
only used by matplotlib for drawing ``y`` limits.
x_label (str): shown besides x-axis
y_label (str): shown besides y-axis
legends (list[str]): label for each curve. No legends are shown if
None.
legend_kwargs (dict): optional legend kwargs
img_height (int): height of the output image
img_width (int): width of the output image
dpi (int): resolution of each rendered image
figsize (tuple[int]): figure size. For the relationship between ``dpi``
and ``figsize``, please refer to `this post <https://stackoverflow.com/questions/47633546/relationship-between-dpi-and-figure-size>`_.
**kwargs: all other arguments to ``ax.plot()``.
Returns:
Image: an output image rendered for the tensor
"""
assert len(data.shape) in (1, 2), "Must be rank-1 or rank-2!"
if not isinstance(data, np.ndarray):
array = data.cpu().numpy()
else:
array = data
if len(array.shape) == 1:
array = np.expand_dims(array, 0)
fig, ax = plt.subplots(figsize=figsize)
M, N = array.shape
x = range(N)
if x_range is not None:
delta = (x_range[1] - x_range[0]) / float(N - 1)
x = delta * x + x_range[0]
for i in range(M):
ax.plot(x, array[i], **kwargs)
if legends is not None:
ax.legend(legends, loc="best", **legend_kwargs)
if y_range:
ax.set_ylim(y_range)
if x_label:
ax.set_xlabel(x_label)
if y_label:
ax.set_ylabel(y_label)
return _convert_to_image(name, fig, dpi, img_height, img_width) | 5,357,590 |
def CreateBooleanUnion1(breps, tolerance, manifoldOnly, multiple=False):
"""
Compute the Boolean Union of a set of Breps.
Args:
breps (IEnumerable<Brep>): Breps to union.
tolerance (double): Tolerance to use for union operation.
manifoldOnly (bool): If true, non-manifold input breps are ignored.
Returns:
Brep[]: An array of Brep results or None on failure.
"""
url = "rhino/geometry/brep/createbooleanunion-breparray_double_bool"
if multiple: url += "?multiple=true"
args = [breps, tolerance, manifoldOnly]
if multiple: args = list(zip(breps, tolerance, manifoldOnly))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 5,357,591 |
def make_singleton_class(class_reference, *args, **kwargs):
"""
Make the given class a singleton class.
*class_reference* is a reference to a class type, not an instance of a class.
*args* and *kwargs* are parameters used to instantiate a singleton instance.
To use this, suppose we have a class called ``DummyClass`` and later instantiate
a variable ``dummy_instnace`` as an instance of class ``DummyClass``. ``class_reference``
will be ``DummyClass``, not ``dummy_instance``.
Note that this method is not for direct use. Always use ``@singleton`` or ``@singleton_with``.
"""
# Name of the attribute that store the singleton instance
singleton_attr_name = '_singleton_instance'
# The statice method to get the singleton instance of the reference class
@staticmethod
def instance():
"""
Get a singleton instance.
.. note:: This class is capable to act as a singleton class by invoking this method.
"""
return class_reference._singleton_instance
# Intercept if the class has already been a singleton class.
if singleton_attr_name in dir(class_reference):
raise SingletonInitializationException(
'The attribute _singleton_instance is already assigned as instance of %s.'\
% type(class_reference._singleton_instance)
)
# Instantiate an instance for a singleton class.
class_reference._singleton_instance = class_reference(*args, **kwargs)
class_reference.instance = instance
return class_reference | 5,357,592 |
def get_coaches(soup):
"""
scrape head coaches
:param soup: html
:return: dict of coaches for game
"""
coaches = soup.find_all('tr', {'id': "HeadCoaches"})
# If it picks up nothing just return the empty list
if not coaches:
return coaches
coaches = coaches[0].find_all('td')
return {
'Away': coaches[1].get_text(),
'Home': coaches[3].get_text()
} | 5,357,593 |
def _check_base_parsers(
base_parsers: List[base.BaseParser], attrdict: dict
) -> None:
"""Check that the base parser list fulfills all requirements."""
if base.BaseParser.REPO_DISCOVERY in base_parsers:
# the REPO_DISCOVERY parser requires both the STUDENTS parser and
# the api argument to the command function, see
# https://github.com/repobee/repobee/issues/716 for details
if base.BaseParser.STUDENTS not in base_parsers:
raise exceptions.PlugError(
"REPO_DISCOVERY parser requires STUDENT parser"
)
elif "api" not in inspect.signature(attrdict["command"]).parameters:
raise exceptions.PlugError(
"REPO_DISCOVERY parser requires command function to use api "
"argument"
) | 5,357,594 |
async def test_sending_none_message(opp, events):
"""Test send with None as message."""
await setup_notify(opp)
with pytest.raises(vol.Invalid):
await opp.services.async_call(
notify.DOMAIN, notify.SERVICE_NOTIFY, {notify.ATTR_MESSAGE: None}
)
await opp.async_block_till_done()
assert len(events) == 0 | 5,357,595 |
def create_app(config_name):
"""
Factory to create Flask application context using config option found in
app.config
:param config_name: (string) name of the chosen config option
:return app: (Flask application context)
"""
logging.basicConfig(
filename="app.log",
filemode="w",
format="%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
app = Flask(__name__)
app.config.from_object(config[config_name])
logging.info("App initialized.")
register_extensions(app)
register_blueprints(app)
configure_database(app)
return app | 5,357,596 |
def _rectify_countdown_or_bool(count_or_bool):
"""
used by recrusive functions to specify which level to turn a bool on in
counting down yeilds True, True, ..., False
conting up yeilds False, False, False, ... True
Args:
count_or_bool (bool or int): if positive will count down, if negative
will count up, if bool will remain same
Returns:
int or bool: count_or_bool_
CommandLine:
python -m utool.util_str --test-_rectify_countdown_or_bool
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> result = [a1, a2, a3, a4, a5, a6, a7]
>>> print(result)
[1.0, 0.0, 0, 0.0, -1.0, True, False]
[1.0, True, False, False, -1.0, True, False]
"""
if count_or_bool is True or count_or_bool is False:
count_or_bool_ = count_or_bool
elif isinstance(count_or_bool, int):
if count_or_bool == 0:
return 0
sign_ = math.copysign(1, count_or_bool)
count_or_bool_ = int(count_or_bool - sign_)
#if count_or_bool_ == 0:
# return sign_ == 1
else:
count_or_bool_ = False
return count_or_bool_ | 5,357,597 |
def test_web_driver_available(testdir):
"""
`web_driver` fixture should be available for test run.
"""
result = testdir.runpytest('--fixtures')
assert ' Defines WebDriver instance with desired capabilities.' in result.stdout.lines | 5,357,598 |
def POpen (inUV, access, err):
""" Open an image persistent (disk) form
inUV = Python UV object
access = access 1=READONLY, 2=WRITEONLY, 3=READWRITE
err = Python Obit Error/message stack
"""
################################################################
if ('myClass' in inUV.__dict__) and (inUV.myClass=='AIPSUVData'):
raise TypeError("Function unavailable for "+inUV.myClass)
return inUV.Open(access, err)
# end POpen | 5,357,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.