content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def greyscale(state):
"""
Preprocess state (210, 160, 3) image into
a (80, 80, 1) image in grey scale
"""
state = np.reshape(state, [210, 160, 3]).astype(np.float32)
# grey scale
state = state[:, :, 0] * 0.299 + state[:, :, 1] * 0.587 + state[:, :, 2] * 0.114
# karpathy
state = state[35:195] # crop
state = state[::2,::2] # downsample by factor of 2
state = state[:, :, np.newaxis]
return state.astype(np.uint8) | 5,352,500 |
def main(
cl,
re,
ma,
n_ca,
n_th,
gen=100,
tolx=1e-8,
tolf=1e-8,
fix_te=True,
t_te_min=0.0,
t_c_min=0.01,
r_le_min=0.05,
A_cs_min=None,
A_bins_min=None,
Cm_max=None,
strategy="rand-to-best/1/exp/random",
f=None,
cr=None,
adaptivity=2,
repr_file="repr.yml",
dat_file="optimized.dat",
png_file="optimized.png",
):
"""
Create, analyze, optimize airfoil, and write optimized coordinates to a file. Then clean the problem up and exit.
Parameters
----------
cl : float
Design lift coefficient
re : float
Reynolds number
ma : float
Mach number
n_ca, n_th : int
Number of CST coefficients for the chord line and thickness distribution, respectively
gen : int, optional
Number of generations to use for the genetic algorithm. 100 by default
tolx : float, optional
Tolerance on the spread of the design vectors.
tolf: float, optional
Tolerance on the spread of objective functions.
fix_te : bool, optional
True if the trailing edge thickness should be fixed. True by default
t_te_min : float, optional
Minimum TE thickness as fraction of chord length. Default is 0.0.
t_c_min : float or None, optional
Minimum thickness over chord ratio. None if unconstrained. Defaults is 0.01.
r_le_min : float or None, optional
Minimum leading edge radius. None if unconstrained. Defaults is 0.05.
A_cs_min : float or None, optional
Minimum cross sectional area. None if unconstrained. Default is None.
A_bins_min : float or None, optional
Minimum relative area of the airfoil in each bin along the chord. None if unconstrained. Default is None.
Cm_max : float or None, optional
Maximum absolute moment coefficient. None if unconstrained. Default is None.
strategy : string, optional
Evolution strategy to use. Default is 'rand-to-best/1/exp/random'.
f : float or None, optional
Mutation rate
cr : float or None, optional
Crossover rate
adaptivity : 0, 1, or 2
Which kind of self-adaptivity to ue (0: none, 1: simple, 2: complex)
repr_file, dat_file, png_file : str, optional
Paths where the final representation, optimized airfoil coordinates, and output image should be saved.
"""
# Construct the OpenMDAO Problem
kwargs = dict(
n_ca=n_ca,
n_th=n_th,
fix_te=fix_te,
t_te_min=t_te_min,
t_c_min=t_c_min,
r_le_min=r_le_min,
A_cs_min=A_cs_min,
A_bins_min=A_bins_min,
Cm_max=Cm_max,
)
prob = om.Problem()
prob.model = AfOptModel(**kwargs)
prob.driver = get_de_driver(gen, tolx, tolf, strategy, f, cr, adaptivity)
prob.driver.add_recorder(PopulationReporter())
prob.setup()
# Set reference values
prob["Cl_des"] = cl
prob["Re"] = re
prob["M"] = ma
# Optimize the problem using a genetic algorithm
t0 = time.time()
prob.run_driver()
dt = time.time() - t0
# Show and write final results
if rank == 0:
yaml = prob.model.__repr__()
print("Optimized airfoil:")
print(" " + yaml.replace("\n", "\n "))
print(f"Time Elapsed: {timedelta(seconds=dt)}")
with open(repr_file, "w") as f:
f.write(yaml)
write(prob, filename=dat_file)
fig = plot(prob)
fig.savefig(png_file)
# Clean up and exit
prob.cleanup()
del prob
sys.exit(0) | 5,352,501 |
def test_generate_notification_payload(mocker, mocker2):
"""Test generate_notification_payload function."""
es.FINAL_DATA = {
"xyz/xyz": {
"notify": "true",
"direct_updates": [{
"ecosystem": "maven",
"name": "io.vertx:vertx-web",
"latest_version": "3.4.2"
}
],
"transitive_updates": [{
"ecosystem": "maven",
"name": "io.vertx:vertx-core",
"latest_version": "3.4.2"
}
]
}
}
mocker.return_value = "abcd"
mocker2.return_value = "defg"
out = es.generate_notification_payload()
assert out == "success" | 5,352,502 |
def get_mnist(data_folder='./', chunk_size=128):
"""Retreives images of mnist digits and corresponding labels
Saves data within a folder called mnist.
Required inputs:
data_folder - path to location where mnist is saved
Optional inputs:
chunk_size - rate at which bytes are downloaded
"""
folder_path = os.path.join(data_folder, "mnist")
if not os.path.exists(folder_path):
os.mkdir(folder_path)
img_url = "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"
zipname = img_url.split('/')[-1]
extract_name = zipname.split('.')[0]
save_path = os.path.join(folder_path, zipname)
image_bytes = os.path.join(folder_path, extract_name)
pull_file(img_url, save_path, chunk_size)
print(f"Extract {save_path} to {image_bytes}\n")
with gzip.open(save_path, 'rb') as f_in:
with open(image_bytes, 'wb') as f_out:
f_out.write(f_in.read())
# remove zip file
print(f"Removing {save_path}\n")
os.remove(save_path)
label_url = "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"
zipname = label_url.split('/')[-1]
extract_name = zipname.split('.')[0]
save_path = os.path.join(folder_path, zipname)
label_bytes = os.path.join(folder_path, extract_name)
pull_file(label_url, save_path, chunk_size)
print(f"Extract {save_path} to {label_bytes}\n")
with gzip.open(save_path, 'rb') as f_in:
with open(label_bytes, 'wb') as f_out:
f_out.write(f_in.read())
# remove zip file
print(f"Removing {save_path}\n")
os.remove(save_path)
# Load mnist data
X, y = loadlocal_mnist(
images_path=image_bytes,
labels_path=label_bytes)
X_path = os.path.join(folder_path,"X_MNIST.npy")
y_path = os.path.join(folder_path,"y_MNIST.npy")
print(f"Saving MNIST images to {X_path}\n")
np.save(X_path, X)
print(f"Saving MNIST image labels to {y_path}\n")
np.save(y_path, y)
os.remove(image_bytes)
os.remove(label_bytes) | 5,352,503 |
def functional_common_information(dist, rvs=None, crvs=None, rv_mode=None):
"""
Compute the functional common information, F, of `dist`. It is the entropy
of the smallest random variable W such that all the variables in `rvs` are
rendered independent conditioned on W, and W is a function of `rvs`.
Parameters
----------
dist : Distribution
The distribution from which the functional common information is
computed.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the total correlation. If None, then the
total correlation is calculated over all random variables, which is
equivalent to passing `rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to condition
on. If None, then no variables are conditioned on.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
F : float
The functional common information.
"""
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
dtc = dual_total_correlation(dist, rvs, crvs, rv_mode)
ent = entropy(dist, rvs, crvs, rv_mode)
if np.isclose(dtc, ent):
return dtc
d = functional_markov_chain(dist, rvs, crvs, rv_mode)
return entropy(d, [dist.outcome_length()]) | 5,352,504 |
def kubernetes_client() -> BatchV1Api:
"""
returns a kubernetes client
"""
config.load_config()
return BatchV1Api() | 5,352,505 |
def __test(priority_queue):
"""
Priority-Queue Test.
__test(priority_queue) -> None
@type priority_queue: basepriorityqueue
@param priority_queue: priority-queue instance.
"""
if not isinstance(priority_queue, basepriorityqueue):
raise TypeError("Expected type was PriorityQueue.")
print "### iPATH TEST DATA STRUCTURE"
print "### Data Type: Priority Queue ({})".format(str(priority_queue.__class__.__bases__[0].__name__))
print "### Implementation: {}".format(priority_queue.__class__.__name__)
print "\n*** INSERT ***\n"
for i in range(0, 10, 2):
print "insert({}, {})".format(str(i), str(float(i)))
priority_queue.insert(i, float(i))
for i in range(1, 10, 2):
print "insert({}, {})".format(str(i), str(float(i)))
priority_queue.insert(i, float(i))
print "\n{}\n".format(str(priority_queue))
print "\n*** FIND/DELETE MIN ***\n"
for i in range(2):
print "find_min: {}".format(str(priority_queue.find_min()))
print "delete_min: {}\n".format(str(priority_queue.delete_min()))
print "\n{}\n".format(str(priority_queue))
print "\n*** DECREASE KEY ***\n"
for i in range(5, 10):
print "decrease_key({}, {})".format(str(i), str(float(i / 2)))
priority_queue.decrease_key(i, float(i / 2))
print "\n{}\n".format(str(priority_queue))
print "\n*** INCREASE KEY ***\n"
for i in range(2, 5):
print "increase_key({}, {})".format(str(i), str(float(i * 2)))
priority_queue.increase_key(i, float(i * 2))
print "\n{}\n".format(str(priority_queue))
for i in range(2):
print "find_min: {}".format(str(priority_queue.find_min()))
print "delete_min: {}\n".format(str(priority_queue.delete_min()))
print "\n{}\n".format(str(priority_queue))
print "\n*** EMPTYING ***\n"
while priority_queue.is_empty() is False:
print "find_min: {}".format(str(priority_queue.find_min()))
print "delete_min: {}".format(str(priority_queue.delete_min()))
print "\n{}\n".format(str(priority_queue))
print "\n### END OF TEST ###\n" | 5,352,506 |
def get_replication_set_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetReplicationSetResult]:
"""
Resource type definition for AWS::SSMIncidents::ReplicationSet
:param str arn: The ARN of the ReplicationSet.
"""
... | 5,352,507 |
def adjust_wire_offset(wire):
"""
Adjusts the wire's offset so that the two labels have a
distinct last bit.
:param wire: the wire in question
"""
false_label = get_last_bit(wire.false_label.label)
true_label = get_last_bit(wire.true_label.label)
while false_label == true_label:
wire.true_label.label = os.urandom(settings.NUM_BYTES)
true_label = get_last_bit(wire.true_label.label) | 5,352,508 |
def admin_only(func):
"""[TODO summary of func]
args:
[TODO insert arguments]
returns:
[TODO insert returns]
"""
def isadmin(invoker, chatadmins):
adminids = []
for admin in chatadmins:
adminids.append(admin.user.id)
return invoker.id in adminids
async def wrapper(message: types.Message):
invoker = message.from_user
chatadmins = await message.chat.get_administrators()
if isadmin(invoker, chatadmins):
await func(message)
# print('isadmin')
#TODO tell that an admin thing is performed
else:
# print('notadmin')
#TODO tell that an admin thing is denied
pass
return wrapper | 5,352,509 |
def create_simple():
"""Create an instance of the `Simple` class."""
return Simple() | 5,352,510 |
def contains_rep_info(line):
"""
Checks does that line contains link to the github repo (pretty simple 'algorithm' at the moment)
:param line: string from aa readme file
:return: true if it has link to the github repository
:type line:string
:rtype: boolean
"""
return True if line.find("https://github.com/") != -1 else False | 5,352,511 |
def GetAtomPairFingerprintAsBitVect(mol):
""" Returns the Atom-pair fingerprint for a molecule as
a SparseBitVect. Note that this doesn't match the standard
definition of atom pairs, which uses counts of the
pairs, not just their presence.
**Arguments**:
- mol: a molecule
**Returns**: a SparseBitVect
>>> from rdkit import Chem
>>> m = Chem.MolFromSmiles('CCC')
>>> v = [ pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(1),1),
... pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(2),2),
... ]
>>> v.sort()
>>> fp = GetAtomPairFingerprintAsBitVect(m)
>>> list(fp.GetOnBits())==v
True
"""
res = DataStructs.SparseBitVect(fpLen)
fp = rdMolDescriptors.GetAtomPairFingerprint(mol)
for val in fp.GetNonzeroElements():
res.SetBit(val)
return res | 5,352,512 |
def get_registrations_by_player_id(db_cursor: sqlite3.Cursor, player_id: int) -> list[registration.Registration]:
"""
Get a list of registrations by player id.
:param db_cursor: database object to interact with database
:param player_id: player id
:return: a list of registrations
"""
db_cursor.execute("""SELECT * FROM registrations WHERE user_id = ?""", [player_id])
registration_infos = db_cursor.fetchall()
registrations = []
for registration_info in registration_infos:
registrations.append(registration.Registration.from_sqlite_table(registration_info))
return registrations | 5,352,513 |
def main(argv=None, from_checkout=False):
"""Top-level script function to create a new Zope instance."""
if argv is None:
argv = sys.argv
try:
options = parse_args(argv, from_checkout)
except SystemExit as e:
if e.code:
return 2
else:
return 0
app = Application(options)
try:
return app.process()
except KeyboardInterrupt:
return 1
except SystemExit as e:
return e.code | 5,352,514 |
def process_twitter_outbox():
""" Send Pending Twitter Messages """
msg.process_outbox(contact_method = "TWITTER") | 5,352,515 |
def run_sim(alpha,db,m,DELTA,game,game_constants,i):
"""run a single simulation and save interaction data for each clone"""
rates = (DEATH_RATE,DEATH_RATE/db)
rand = np.random.RandomState()
data = [get_areas_and_fitnesses(tissue,DELTA,game,game_constants)
for tissue in lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix='exclude_final',save_areas=True,return_events=False,save_cell_histories=False,
N_limit=MAX_POP_SIZE,DELTA=DELTA,game=game,game_constants=game_constants,
mutant_num=1,domain_size_multiplier=m,rates=rates,threshold_area_fraction=alpha,generator=True)]
return data | 5,352,516 |
def IMDB(*args, **kwargs):
""" Defines IMDB datasets.
The labels includes:
- 0 : Negative
- 1 : Positive
Create sentiment analysis dataset: IMDB
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
removed_tokens: removed tokens from output dataset (Default: [])
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well. A custom tokenizer is callable
function with input of a string and output of a token list.
data_select: a string or tuple for the returned datasets
(Default: ('train', 'test'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import IMDB
>>> from torchtext.data.utils import get_tokenizer
>>> train, test = IMDB(ngrams=3)
>>> tokenizer = get_tokenizer("spacy")
>>> train, test = IMDB(tokenizer=tokenizer)
>>> train, = IMDB(tokenizer=tokenizer, data_select='train')
"""
return _setup_datasets(*(("IMDB",) + args), **kwargs) | 5,352,517 |
def load_multiples(image_file_list: List, method: str='mean', stretch: bool=True, **kwargs) -> ImageLike:
"""Combine multiple image files into one superimposed image.
Parameters
----------
image_file_list : list
A list of the files to be superimposed.
method : {'mean', 'max', 'sum'}
A string specifying how the image values should be combined.
stretch : bool
Whether to normalize the images being combined by stretching their high/low values to the same values across images.
kwargs :
Further keyword arguments are passed to the load function.
Examples
--------
Load multiple images::
>>> from pylinac.core.image import load_multiples
>>> paths = ['starshot1.tif', 'starshot2.tif']
>>> superimposed_img = load_multiples(paths)
"""
# load images
img_list = [load(path, **kwargs) for path in image_file_list]
first_img = img_list[0]
# check that all images are the same size and stretch if need be
for img in img_list:
if img.shape != first_img.shape:
raise ValueError("Images were not the same shape")
if stretch:
img.array = stretcharray(img.array, fill_dtype=first_img.array.dtype)
# stack and combine arrays
new_array = np.dstack(tuple(img.array for img in img_list))
if method == 'mean':
combined_arr = np.mean(new_array, axis=2)
elif method == 'max':
combined_arr = np.max(new_array, axis=2)
elif method == 'sum':
combined_arr = np.sum(new_array, axis=2)
# replace array of first object and return
first_img.array = combined_arr
first_img.check_inversion_by_histogram()
return first_img | 5,352,518 |
def select_seeds(
img: np.ndarray, clust_result: np.ndarray, FN: int = 500,
TN: int = 700, n_clust_object: int = 2
):
"""
Sample seeds from the fluid and retina regions acording to the procedure
described in Rashno et al. 2017
Args:
img (np.ndarray): Image from where to sample the seeds.
clust_result (np.ndarray): Image from with the clustering labels.
FN (int, optional): Number of fluid points to sample. Defaults to 500.
TN (int, optional): Number of ratina points to sample. Defaults to 700.
n_clust_object (int, optional): number of clusters assigned to fluid.
Returns:
fluid_seeds, retina_seeds
"""
n_clust = len(np.unique(clust_result)) - 1
clusters_centers = []
for i in range(1, n_clust+1):
clusters_centers.append(np.mean(img[clust_result == i]))
clusters_centers = np.array(clusters_centers)
indices = np.flip(np.argsort(clusters_centers)) + 1
# Fluid Seeds
fluid_condition = (clust_result == indices[0])
for i in range(n_clust_object):
fluid_condition = fluid_condition | (clust_result == indices[i])
potential_seeds = np.array(np.where(fluid_condition)).T
sample_indx = np.random.randint(0, potential_seeds.shape[0], FN)
fluid_seeds = potential_seeds[sample_indx]
# Retina Seeds:
# Get sampling probabilities and number of samples per cluster
pi = 1/(2**np.arange(1, n_clust-n_clust_object+1))
Npi = np.ones((n_clust-n_clust_object))*70
pre_asigned = (n_clust-n_clust_object)*70
Npi = Npi + np.round((pi/np.sum(pi))*(700-pre_asigned))
Npi = Npi.astype('int')
# Npi = (np.ones((n_clust-n_clust_object))*(700 / n_clust)).astype('int')
# Sample seeds
retina_seeds = []
for i in range(n_clust_object, len(indices)):
bkg_condition = (clust_result == indices[i])
potential_seeds = np.array(np.where(bkg_condition)).T
sample_indx = \
np.random.randint(0, potential_seeds.shape[0], Npi[i-n_clust_object])
retina_seeds.append(potential_seeds[sample_indx])
retina_seeds = np.concatenate(retina_seeds)
return fluid_seeds, retina_seeds, clusters_centers, indices | 5,352,519 |
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"coinbase.wallet.client.Client.get_current_user",
return_value=mock_get_current_user(),
), patch(
"coinbase.wallet.client.Client.get_accounts", new=mocked_get_accounts
), patch(
"coinbase.wallet.client.Client.get_exchange_rates",
return_value=mock_get_exchange_rates(),
), patch(
"homeassistant.components.coinbase.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.coinbase.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "123456",
CONF_API_TOKEN: "AbCDeF",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Test User"
assert result2["data"] == {CONF_API_KEY: "123456", CONF_API_TOKEN: "AbCDeF"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1 | 5,352,520 |
def common(list1, list2):
"""
This function is passed two lists and returns a new list containing
those elements that appear in both of the lists passed in.
"""
common_list = []
temp_list = list1.copy()
temp_list.extend(list2)
temp_list = list(set(temp_list))
temp_list.sort()
for i in temp_list:
if (i in list1) and (i in list2):
common_list.append(i)
return common_list | 5,352,521 |
def create_combobox(root, values, **kwargs):
"""Creates and Grids A Combobox"""
box = ttk.Combobox(root, values=values, **kwargs)
box.set(values[0])
return box | 5,352,522 |
def configure_logging_console(logger_type):
"""
Configure logger
:param logger_type: The type to write logger and setup on the modules of App
:return _imoporter_log:
"""
_date_name = datetime.now().strftime('%Y-%m-%dT%H%M')
_importer_logger = logging.getLogger(logger_type)
_importer_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - Module: %(module)s - Line No: %(lineno)s : %(name)s : %(levelname)s - '
'%(message)s')
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
_importer_logger.addHandler(sh)
return _importer_logger | 5,352,523 |
def chi_x2(samples,df):
"""
Compute the central chi-squared statistics for set of chi-squared
distributed samples.
Parameters:
- - - - -
samples : chi-square random variables
df : degrees of freedom
"""
return chi2.pdf(samples,df) | 5,352,524 |
def integrate(
pc2i,
eos,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc2i" and equation of state described by energy density "eps/c2" and pressure "p/c2"
expects eos = (logenthalpy, pressurec2, energy_densityc2, baryon_density, cs2c2)
"""
### define initial condition
logh, vec = initial_condition(pc2i, eos, frac=initial_frac)
m, r, eta, omega, mb = engine(
logh,
vec,
eos,
dvecdlogh,
rtol=rtol,
)
# compute tidal deformability
l = eta2lambda(r, m, eta)
# compute moment of inertia
i = omega2i(r, omega)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
mb /= Msun
r *= 1e-5 ### convert from cm to km
i /= 1e45 ### normalize this to a common value but still in CGS
return m, r, l, i, mb | 5,352,525 |
def add_ingress_port_to_security_lists(**kwargs):
"""Checks if the given ingress port already is a security list,
if not it gets added.
Args:
**kwargs: Optional parameters
Keyword Args:
security_lists (list): A list of security_lists.
port (int): The port to check
description (str): A description for the ingress rule
compartment_id (str): The OCID of the compartment
config (object): An OCI config object or None.
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
Returns:
True on success
"""
security_lists = kwargs.get("security_lists")
port = kwargs.get("port")
description = kwargs.get("description")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
if security_lists is None:
raise ValueError("No security_lists given.")
try:
# Get the active config and compartment
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
for sec_list in security_lists:
for rule in sec_list.ingress_security_rules:
if rule.tcp_options is not None and \
port >= rule.tcp_options.destination_port_range.min and \
port <= rule.tcp_options.destination_port_range.max and \
rule.protocol == "6" and \
rule.source == "0.0.0.0/0":
return True
if len(security_lists) == 0:
raise Exception("No security list available for this network.")
sec_list = security_lists[0]
import oci.exceptions
try:
network_client = core.get_oci_virtual_network_client(
config=config)
sec_list.ingress_security_rules.append(
oci.core.models.IngressSecurityRule(
protocol="6",
source="0.0.0.0/0",
is_stateless=False,
source_type="CIDR_BLOCK",
tcp_options=oci.core.models.TcpOptions(
destination_port_range=oci.core.models.PortRange(
max=port,
min=port),
source_port_range=None),
udp_options=None,
description=description
)
)
details = oci.core.models.UpdateSecurityListDetails(
defined_tags=sec_list.defined_tags,
display_name=sec_list.display_name,
egress_security_rules=sec_list.egress_security_rules,
freeform_tags=sec_list.freeform_tags,
ingress_security_rules=sec_list.ingress_security_rules
)
network_client.update_security_list(
security_list_id=sec_list.id,
update_security_list_details=details)
return True
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise
print(f'Could not list the availability domains for this '
f'compartment.\nERROR: {str(e)}') | 5,352,526 |
def describe_cluster_instances(stack_name, node_type):
"""Return the cluster instances optionally filtered by tag."""
instances = _describe_cluster_instances(stack_name, filter_by_node_type=str(node_type))
if not instances:
# Support for cluster that do not have aws-parallelcluster-node-type tag
LOGGER.debug("Falling back to Name tag when describing cluster instances")
instances = _describe_cluster_instances(stack_name, filter_by_name=str(node_type))
return instances | 5,352,527 |
def region_root(data_dir):
"""Returns the path of test regions."""
return os.path.join(data_dir, 'regions') | 5,352,528 |
def list_statistics_keys():
"""ListStatistics definition"""
return ["list", "counts"] | 5,352,529 |
def forecast_handler(req, req_body, res, res_body, zip):
"""Handles forecast requests"""
return True | 5,352,530 |
def _configure_output(args):
"""
Configures the output. Loads templates and applies the specified formatter if any.
If none of these configurations are specified, it will return the default output
which is to print each value to standard out.
"""
writer = _get_writer(args)
if args.template:
log.debug('Using template: %s', args.template)
if '{{' in args.template:
engine = template_engines.string(args.template)
else:
engine = template_engines.for_file(args.template)
return outputs.RecordLevelOutput(engine, writer)
if args.format:
log.debug('Using %s formatter for output', args.format)
formatter = outputs.FormatProcessor(args.format)
return outputs.RecordLevelOutput(formatter, writer)
# default
return outputs.SingleFieldOutput(writer, args.printkey) | 5,352,531 |
def refToMastoidsNP(data, M1, M2):
"""
"""
mastoidsMean = np.mean([M1, M2], axis=0)
mastoidsMean = mastoidsMean.reshape(mastoidsMean.shape[0], 1)
newData = data - mastoidsMean
return newData | 5,352,532 |
def override_kwargs(
kwargs: Dict[str, str],
func: Callable[..., Any],
filter: Callable[..., Any] = lambda _: True,
) -> Dict[str, str]:
"""Override the kwargs of a function given a function to apply and an optional filter.
Parameters
----------
kwargs : Tuple
The function kwargs input.
func : Callable
A function to apply on the kwargs.
filter : Callable
An optional filter to apply the function only on some kwargs. (Default value = lambda _: True).
Returns
-------
Dict
The changed kwargs as a Dict.
"""
return {
key: func(value) if filter(value) else value for key, value in kwargs.items()
} | 5,352,533 |
def prepare_for_revival(bucket, obj_prefix):
"""
Makes a manifest for reviving any deleted objects in the bucket. A deleted
object is one that has a delete marker as its latest version.
:param bucket: The bucket that contains the stanzas.
:param obj_prefix: The prefix of the uploaded stanzas.
:return: The manifest as a list of lines in CSV format.
"""
try:
response = s3.meta.client.list_object_versions(
Bucket=bucket.name, Prefix=f'{obj_prefix}stanza')
manifest_lines = [
f"{bucket.name},{parse.quote(marker['Key'])},{marker['VersionId']}"
for marker in response['DeleteMarkers']
if marker['IsLatest']
]
except ClientError:
logger.exception("Couldn't get object versions from %s.", bucket.name)
raise
return manifest_lines | 5,352,534 |
def map_to_orf(fastq, index_dir, ht_prefix, mapped_sam,
unmapped_fastq, log_file, run_config):
"""
Align remaining reads to ORF index files using ``hisat2``.
``hisat2 --version`` is also invoked as ``hisat2`` does not log
its own version when it is run.
:param fastq: FASTQ file (input)
:type fastq: str or unicode
:param index_dir: Index directory
:type index_dir: str or unicode
:param ht_prefix: Prefix of HT2 index files (input)
:type ht_prefix: str or unicode
:param mapped_sam: SAM file for mapped reads (output)
:type mapped_sam: str or unicode
:param unmapped_fastq: FASTQ file for unmapped reads (output)
:type unmapped_fastq: str or unicode
:param log_file: Log file (output)
:type log_file: str or unicode
:param run_config: Run-related configuration
:type run_config: RunConfigTuple
:raise FileNotFoundError: if ``hisat2`` cannot be found
:raise AssertionError: if ``hisat2`` returns a non-zero exit \
code
"""
LOGGER.info(
"Align remaining reads to ORFs index files using hisat2. Log: %s",
log_file)
cmd = ["hisat2", "--version"]
process_utils.run_logged_command(cmd, log_file,
run_config.cmd_file,
run_config.is_dry_run)
index_file_path = os.path.join(index_dir, ht_prefix)
cmd = ["hisat2", "-p", str(run_config.nprocesses), "-k", "2",
"--no-spliced-alignment", "--rna-strandness",
"F", "--no-unal", "--un", unmapped_fastq,
"-x", index_file_path, "-S", mapped_sam,
"-U", fastq]
process_utils.run_logged_command(cmd, log_file,
run_config.cmd_file,
run_config.is_dry_run) | 5,352,535 |
def run_blend_images(
network_pkl1: str,
network_pkl2: str,
seeds: Optional[List[int]] = [700, 701, 702, 703, 704, 705, 706, 707],
outdir: str = "./out_blend",
truncation_psi: float = 0.7,
noise_mode: str = "const",
blending_layers: List[int] = [4, 8, 16, 32, 64, 128, 256],
network_size: int = 512,
blend_width: float = None,
verbose: bool = False,
projected_w: Optional[str] = None,
):
"""Generate images using pretrained network pickle.
Examples:
# Generate curated MetFaces images without truncation (Fig.10 left)
python generate.py --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print(f"Loading networks from {network_pkl1} and {network_pkl2} ...")
device = torch.device("cuda")
with dnnlib.util.open_url(network_pkl1) as f:
G1 = legacy.load_network_pkl(f)["G_ema"].to(device) # type: ignore
with dnnlib.util.open_url(network_pkl2) as f:
G2 = legacy.load_network_pkl(f)["G_ema"].to(device) # type: ignore
# print("G1", G1)
# print("G2", G2)
os.makedirs(outdir, exist_ok=True)
# blend_width = (
# None # # None = hard switch, float = smooth switch (logistic) with given width
# )
level = 0
images = []
blended_models = {}
for blending_layer in blending_layers:
resolution = f"b{blending_layer}" # blend at layer
blended_model = get_blended_model(
G1,
G2,
resolution,
level,
blend_width,
network_size=network_size,
verbose=verbose,
)
blended_models[blending_layer] = blended_model
w_filename = os.path.splitext(os.path.split(projected_w)[-1])[0]
name = w_filename.split("_")[0]
if projected_w is not None:
if seeds is not None:
print("warn: --seeds is ignored when using --projected-w")
print(f'Generating images from projected W "{projected_w}"')
ws = np.load(projected_w)["w"]
ws = torch.tensor(ws, device=device).squeeze(0) # pylint: disable=not-callable
print(ws.shape, (G1.num_ws, G1.w_dim))
assert ws.shape[1:] == (G1.num_ws, G1.w_dim)
# for idx, w in enumerate(ws):
images = []
w = ws[-1]
orig1 = get_image(
G1,
None,
truncation_psi=truncation_psi,
noise_mode=noise_mode,
w=w,
is_w=True,
)
orig2 = get_image(
G2,
None,
truncation_psi=truncation_psi,
noise_mode=noise_mode,
w=w,
is_w=True,
)
orig1.save(f"{outdir}/seed_{name}_G1.png")
orig2.save(f"{outdir}/seed_{name}_G2.png")
images.append(orig1)
images.append(orig2)
for blending_layer in blending_layers:
blended_model = blended_models[blending_layer]
blended = get_image(
blended_model,
None,
truncation_psi=truncation_psi,
noise_mode=noise_mode,
w=w,
is_w=True,
)
fprefix = f"seed_{name}_layer_{resolution}"
blended.save(f"{outdir}/{fprefix}_blended.png")
images.append(blended)
make_and_save_grid(images, f"{outdir}/{name}_finalgrid.png")
return
else:
# seed = 279
for seed in seeds:
images = []
z_vector = z = torch.from_numpy(
np.random.RandomState(seed).randn(1, G1.z_dim)
).to(device)
orig1 = get_image(
G1, z_vector, truncation_psi=truncation_psi, noise_mode=noise_mode
)
orig2 = get_image(
G2, z_vector, truncation_psi=truncation_psi, noise_mode=noise_mode
)
orig1.save(f"{outdir}/seed_{seed}_G1.png")
orig2.save(f"{outdir}/seed_{seed}_G2.png")
images.append(orig1)
images.append(orig2)
for blending_layer in blending_layers:
blended_model = blended_models[blending_layer]
blended = get_image(
blended_model,
z_vector,
truncation_psi=truncation_psi,
noise_mode=noise_mode,
)
fprefix = f"seed_{seed}_layer_{resolution}"
blended.save(f"{outdir}/{fprefix}_blended.png")
images.append(blended)
make_and_save_grid(images, f"{outdir}/{seed}_finalgrid.png") | 5,352,536 |
def make_drive_resource() -> Resource:
"""
Authenticates and returns a google drive resource.
"""
google_oauth_creds = ast.literal_eval(
credstash.getSecret("IA_PIPELINE_GLOBAL_GOOGLE_SHEETS_API_KEY")
)
with open("key.json", "w") as fp:
json.dump(google_oauth_creds, fp)
creds = service_account.Credentials.from_service_account_file(
"key.json", scopes=SCOPES
)
os.remove("key.json")
scoped_creds = creds.with_subject(DELEGATE_EMAIL)
http = AuthorizedHttp(scoped_creds)
return discovery.build("drive", "v3", http=http) | 5,352,537 |
def make_subparser(sub, command_name, help, command_func=None, details=None, **kwargs):
"""
Create the "sub-parser" for our command-line parser.
This facilitates having multiple "commands" for a single script,
for example "norm_yaml", "make_rest", etc.
"""
if command_func is None:
command_func_name = "command_{0}".format(command_name)
command_func = globals()[command_func_name]
# Capitalize the first letter for the long description.
desc = help[0].upper() + help[1:]
if details is not None:
desc += "\n\n{0}".format(details)
desc = _wrap(desc)
parser = sub.add_parser(command_name, formatter_class=FORMATTER_CLASS,
help=help, description=desc, **kwargs)
parser.set_defaults(run_command=command_func)
return parser | 5,352,538 |
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
) | 5,352,539 |
def application():
""" Flask application fixture. """
def _view():
return 'OK', 200
application = Flask('test-application')
application.testing = True
application.add_url_rule('/', 'page', view_func=_view)
return application | 5,352,540 |
def test_emr_container_operator_execute_complete_fail(check_job_status):
"""Assert execute_complete throw AirflowException"""
check_job_status.return_value = JOB_ID
with pytest.raises(AirflowException):
_emr_emr_container_operator_init().execute_complete(
context=None, event={"status": "error", "message": "test failure message"}
) | 5,352,541 |
def get_qe_specific_fp_run_inputs(
configure, code_pw, code_wannier90, code_pw2wannier90,
get_repeated_pw_input, get_metadata_singlecore
):
"""
Creates the InSb inputs for the QE fp_run workflow. For the
higher-level workflows (fp_tb, optimize_*), these are passed
in the 'fp_run' namespace.
"""
def inner():
return {
'scf': get_repeated_pw_input(),
'bands': {
'pw': get_repeated_pw_input()
},
'to_wannier': {
'nscf': get_repeated_pw_input(),
'wannier': {
'code': code_wannier90,
'metadata': get_metadata_singlecore()
},
'pw2wannier': {
'code': code_pw2wannier90,
'metadata': get_metadata_singlecore()
}
}
}
return inner | 5,352,542 |
def _get_cohort_representation(cohort, course):
"""
Returns a JSON representation of a cohort.
"""
group_id, partition_id = cohorts.get_group_info_for_cohort(cohort)
assignment_type = cohorts.get_assignment_type(cohort)
return {
'name': cohort.name,
'id': cohort.id,
'user_count': cohort.users.filter(courseenrollment__course_id=course.location.course_key,
courseenrollment__is_active=1).count(),
'assignment_type': assignment_type,
'user_partition_id': partition_id,
'group_id': group_id,
} | 5,352,543 |
def config_string(cfg_dict):
""" Pretty-print cfg_dict with one-line queries """
upper_level = ["queries", "show_attributes", "priority", "gtf", "bed", "prefix", "outdir", "threads", "output_by_query"]
query_level = ["feature", "feature_anchor", "distance", "strand", "relative_location", "filter_attribute", "attribute_values", "internals", "name"]
upper_lines = []
for upper_key in upper_level:
if upper_key == "queries":
query_lines = "\"queries\":[\n"
#Convert sets to lists
for query in cfg_dict["queries"]:
for key in query:
if type(query[key]) == set:
query[key] = list(query[key])
query_strings = [json.dumps(query, sort_keys=True) for query in cfg_dict["queries"]]
query_lines += " " + ",\n ".join(query_strings) + "\n ]"
upper_lines.append(query_lines)
elif upper_key == "show_attributes" and upper_key in cfg_dict:
upper_lines.append("\"{0}\": {1}".format(upper_key, json.dumps(cfg_dict[upper_key])))
else:
if upper_key in cfg_dict:
upper_lines.append("\"{0}\": \"{1}\"".format(upper_key, cfg_dict[upper_key]))
config_string = "{\n" + ",\n".join(upper_lines) + "\n}\n"
return(config_string) | 5,352,544 |
def area_km2_per_grid(infra_dataset, df_store):
"""Total area in km2 per assettype per grid, given in geographic coordinates
Arguments:
*infra_dataset* : a shapely object with WGS-84 coordinates
*df_store* : (empty) geopandas dataframe containing coordinates per grid for each grid
Returns:
area in km2 per assettype per grid in dataframe (with column = {asset}_km2 and row = the grid)
"""
asset_list = []
for asset in infra_dataset.asset.unique():
if not "{}_count".format(asset) in df_store.columns: df_store.insert(1, "{}_count".format(asset), "") #add assettype as column after first column for count calculations
if not "{}_km2".format(asset) in df_store.columns: df_store.insert(1, "{}_km2".format(asset), "") #add assettype as column after first column for area calculations
asset_list.append(asset)
for grid_row in df_store.itertuples():
grid_cell = grid_row.geometry #select grid
try:
asset_clip = gpd.clip(infra_dataset, grid_cell) #clip infra data using GeoPandas clip
#count per asset type
count = asset_clip.asset.value_counts() #count number of assets per asset type
for asset_type in asset_list:
if asset_type in count.index:
df_store.loc[grid_row.Index, "{}_count".format(asset_type)] = count.get(key = asset_type)
else:
df_store.loc[grid_row.Index, "{}_count".format(asset_type)] = 0
#calculate area for each asset in clipped infrastructure grid
asset_clip.insert(1, "area_km2", "") #add assettype as column after first column for length calculations
for polygon_object in asset_clip['index']:
asset_clip.loc[polygon_object, "area_km2"] = polygon_area((asset_clip.loc[asset_clip['index']==polygon_object].geometry.item())) #calculate area per object and put in dataframe
area_per_type = asset_clip.groupby(['asset'])['area_km2'].sum() #get total length per asset_type in grid
for asset_type in asset_list:
if asset_type in area_per_type.index:
df_store.loc[grid_row.Index, "{}_km2".format(asset_type)] = area_per_type.get(key = asset_type)
else:
df_store.loc[grid_row.Index, "{}_km2".format(asset_type)] = 0
except:
print("Grid number {} raises a ValueError, area has not been clipped".format(grid_row.index))
for asset_type in asset_list:
df_store.loc[grid_row.Index, "{}_count".format(asset_type)] = np.nan
df_store.loc[grid_row.Index, "{}_km2".format(asset_type)] = np.nan
return df_store | 5,352,545 |
def cli_runner(script_info):
"""Create a CLI runner for testing a CLI command.
Scope: module
.. code-block:: python
def test_cmd(cli_runner):
result = cli_runner(mycmd)
assert result.exit_code == 0
"""
from click.testing import CliRunner
def cli_invoke(command, input=None, *args):
return CliRunner().invoke(command, args, input=input, obj=script_info)
return cli_invoke | 5,352,546 |
def sgf_to_gamestate(sgf_string):
"""
Creates a GameState object from the first game in the given collection
"""
# Don't Repeat Yourself; parsing handled by sgf_iter_states
for (gs, move, player) in sgf_iter_states(sgf_string, True):
pass
# gs has been updated in-place to the final state by the time
# sgf_iter_states returns
return gs | 5,352,547 |
def register_email(email: str) -> None:
""" Stores an email in the mailing list. """
emails = load_file("emails")
if email not in emails["emails"]:
emails["emails"].append(email)
dump_file(emails, "emails") | 5,352,548 |
def get_titlebar_text():
"""Return (style, text) tuples for startup."""
return [
("class:title", "Hello World!"),
("class:title", " (Press <Exit> to quit.)"),
] | 5,352,549 |
def main(config_file_name):
"""
:param config_file_name: str, name of the configuration json file
:return:
"""
global push_notifier, settings
# read config file
settings = Settings(config_file_name)
# tell scraper about the settings
scraper.settings_ref = settings
# create notifier
push_notifier = PushBulletNotifier(settings.token)
# start timer
timer = Timer(settings.hours, unit_callback)
timer.start_counter() | 5,352,550 |
def on_message(client, userdata, msg):
"""
callback func
"""
print("got: "+msg.topic+" "+str(msg.payload)+"\n") | 5,352,551 |
def image_fnames_captions(captions_file, images_dir, partition):
"""
Loads annotations file and return lists with each image's path and caption
Arguments:
partition: string
either 'train' or 'val'
Returns:
all_captions: list of strings
list with each image caption
all_img_paths: list of paths as strings
list with each image's path to file
"""
with open(captions_file, 'r') as f:
annotations = json.load(f)
all_captions = []
all_img_paths = []
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = images_dir / ('COCO_{}2014_'.format(partition) + \
'{:012d}.jpg'.format(image_id))
all_img_paths.append(full_coco_image_path)
all_captions.append(caption)
return all_captions, all_img_paths | 5,352,552 |
def build_menu(
buttons: list,
columns: int = 3,
header_button=None,
footer_button=None,
resize_keyboard: bool = True
):
"""Хелпер для удобного построения меню."""
menu = [buttons[i:i + columns] for i in range(0, len(buttons), columns)]
if header_button:
menu.insert(0, [header_button])
if footer_button:
menu.append([footer_button])
return ReplyKeyboardMarkup(menu, resize_keyboard=resize_keyboard) | 5,352,553 |
def pandas_dataframe_to_unit_arrays(df, column_units=None):
"""Attach units to data in pandas dataframes and return united arrays.
Parameters
----------
df : `pandas.DataFrame`
Data in pandas dataframe.
column_units : dict
Dictionary of units to attach to columns of the dataframe. Overrides
the units attribute if it is attached to the dataframe.
Returns
-------
Dictionary containing united arrays with keys corresponding to the dataframe
column names.
"""
if not column_units:
try:
column_units = df.units
except AttributeError:
raise ValueError('No units attribute attached to pandas '
'dataframe and col_units not given.')
# Iterate through columns attaching units if we have them, if not, don't touch it
res = {}
for column in df:
if column in column_units and column_units[column]:
res[column] = df[column].values * units(column_units[column])
else:
res[column] = df[column].values
return res | 5,352,554 |
def is_empty(value: Any) -> bool:
"""
empty means given value is one of none, zero length string, empty list, empty dict
"""
if value is None:
return True
elif isinstance(value, str):
return len(value) == 0
elif isinstance(value, list):
return len(value) == 0
elif isinstance(value, dict):
return len(value) == 0
else:
return False | 5,352,555 |
def get_worksheets (path, **kwargs):
"""
Gets all available worksheets within a xlsx-file and returns a list
:param path: Path to excel file
:type path: str
:return: Returns a list with all worksheets within the excel-file
:rtype: list
"""
if not os.path.isabs(path):
path = os.path.join(_jinjamator.task_base_dir, path)
xlsx = XLSXReader(
path, "Sheet1", kwargs.get("cache", True)
)
return xlsx.get_worksheets() | 5,352,556 |
def blankScreen(disp, pix):
# pylint: disable=unused-argument
"""A blank screen used to hide any serial console output."""
if disp is None:
return
disp.show(Group()) | 5,352,557 |
def read_caffe_mean(caffe_mean_file):
"""
Reads caffe formatted mean file
:param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix
:return: mean image, converted from BGR to RGB format
"""
import caffe_parser
import numpy as np
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(caffe_mean_file, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(mean_blob.channels, mean_blob.height, mean_blob.width)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
return img_mean_np | 5,352,558 |
def test_chunked_las_reading_gives_expected_points(las_file_path):
"""
Test chunked LAS reading
"""
with laspy.open(las_file_path) as las_reader:
with laspy.open(las_file_path) as reader:
las = las_reader.read()
check_chunked_reading_is_gives_expected_points(las, reader, iter_size=50) | 5,352,559 |
def estimate_pauli_sum(pauli_terms,
basis_transform_dict,
program,
variance_bound,
quantum_resource,
commutation_check=True,
symmetrize=True,
rand_samples=16):
"""
Estimate the mean of a sum of pauli terms to set variance
The sample variance is calculated by
.. math::
\begin{align}
\mathrm{Var}[\hat{\langle H \rangle}] = \sum_{i, j}h_{i}h_{j}
\mathrm{Cov}(\hat{\langle P_{i} \rangle}, \hat{\langle P_{j} \rangle})
\end{align}
The expectation value of each Pauli operator (term and coefficient) is
also returned. It can be accessed through the named-tuple field
`pauli_expectations'.
:param pauli_terms: list of pauli terms to measure simultaneously or a
PauliSum object
:param basis_transform_dict: basis transform dictionary where the key is
the qubit index and the value is the basis to
rotate into. Valid basis is [I, X, Y, Z].
:param program: program generating a state to sample from. The program
is deep copied to ensure no mutation of gates or program
is perceived by the user.
:param variance_bound: Bound on the variance of the estimator for the
PauliSum. Remember this is the SQUARE of the
standard error!
:param quantum_resource: quantum abstract machine object
:param Bool commutation_check: Optional flag toggling a safety check
ensuring all terms in `pauli_terms`
commute with each other
:param Bool symmetrize: Optional flag toggling symmetrization of readout
:param Int rand_samples: number of random realizations for readout symmetrization
:return: estimated expected value, expected value of each Pauli term in
the sum, covariance matrix, variance of the estimator, and the
number of shots taken. The objected returned is a named tuple with
field names as follows: expected_value, pauli_expectations,
covariance, variance, n_shots.
`expected_value' == coef_vec.dot(pauli_expectations)
:rtype: EstimationResult
"""
if not isinstance(pauli_terms, (list, PauliSum)):
raise TypeError("pauli_terms needs to be a list or a PauliSum")
if isinstance(pauli_terms, PauliSum):
pauli_terms = pauli_terms.terms
# check if each term commutes with everything
if commutation_check:
if len(commuting_sets(sum(pauli_terms))) != 1:
raise CommutationError("Not all terms commute in the expected way")
program = program.copy()
pauli_for_rotations = PauliTerm.from_list(
[(value, key) for key, value in basis_transform_dict.items()])
program += get_rotation_program(pauli_for_rotations)
qubits = sorted(list(basis_transform_dict.keys()))
if symmetrize:
theta = program.declare("ro_symmetrize", "REAL", len(qubits))
for (idx, q) in enumerate(qubits):
program += [RZ(np.pi/2, q), RY(theta[idx], q), RZ(-np.pi/2, q)]
ro = program.declare("ro", "BIT", memory_size=len(qubits))
for num, qubit in enumerate(qubits):
program.inst(MEASURE(qubit, ro[num]))
coeff_vec = np.array(
list(map(lambda x: x.coefficient, pauli_terms))).reshape((-1, 1))
# upper bound on samples given by IV of arXiv:1801.03524
num_sample_ubound = 10 * int(np.ceil(np.sum(np.abs(coeff_vec))**2 / variance_bound))
if num_sample_ubound <= 2:
raise ValueError("Something happened with our calculation of the max sample")
if symmetrize:
if min(STANDARD_NUMSHOTS, num_sample_ubound)//rand_samples == 0:
raise ValueError(f"The number of shots must be larger than {rand_samples}.")
program = program.wrap_in_numshots_loop(min(STANDARD_NUMSHOTS, num_sample_ubound)//rand_samples)
else:
program = program.wrap_in_numshots_loop(min(STANDARD_NUMSHOTS, num_sample_ubound))
binary = quantum_resource.compiler.native_quil_to_executable(basic_compile(program))
results = None
sample_variance = np.infty
number_of_samples = 0
tresults = np.zeros((0, len(qubits)))
while (sample_variance > variance_bound and number_of_samples < num_sample_ubound):
if symmetrize:
# for some number of times sample random bit string
for r in range(rand_samples):
rand_flips = np.random.randint(low=0, high=2, size=len(qubits))
temp_results = quantum_resource.run(binary, memory_map={'ro_symmetrize': np.pi * rand_flips})
tresults = np.vstack((tresults, rand_flips ^ temp_results))
else:
tresults = quantum_resource.run(binary)
number_of_samples += len(tresults)
parity_results = get_parity(pauli_terms, tresults)
# Note: easy improvement would be to update mean and variance on the fly
# instead of storing all these results.
if results is None:
results = parity_results
else:
results = np.hstack((results, parity_results))
# calculate the expected values....
covariance_mat = np.cov(results, ddof=1)
sample_variance = coeff_vec.T.dot(covariance_mat).dot(coeff_vec) / (results.shape[1] - 1)
return EstimationResult(expected_value=coeff_vec.T.dot(np.mean(results, axis=1)),
pauli_expectations=np.multiply(coeff_vec.flatten(), np.mean(results, axis=1).flatten()),
covariance=covariance_mat,
variance=sample_variance,
n_shots=results.shape[1]) | 5,352,560 |
def version(ctx, f):
"""Extract the assets of a local Minecraft version"""
extractor.pack(ctx.obj["v"], ctx.obj["o"], lambda s: print(s) if ctx.obj["d"] else '', f) | 5,352,561 |
def GetQuasiSequenceOrderp(ProteinSequence, maxlag=30, weight=0.1, distancematrix={}):
"""
###############################################################################
Computing quasi-sequence-order descriptors for a given protein.
[1]:Kuo-Chen Chou. Prediction of Protein Subcellar Locations by
Incorporating Quasi-Sequence-Order Effect. Biochemical and Biophysical
Research Communications 2000, 278, 477-483.
Usage:
result = GetQuasiSequenceOrderp(protein,maxlag,weight,distancematrix)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
weight is a weight factor. please see reference 1 for its choice. default is 0.1.
distancematrix is a dict form containing 400 distance values
Output: result is a dict form containing all quasi-sequence-order descriptors
###############################################################################
"""
result = dict()
result.update(GetQuasiSequenceOrder1(ProteinSequence, maxlag, weight, distancematrix))
result.update(GetQuasiSequenceOrder2(ProteinSequence, maxlag, weight, distancematrix))
return result | 5,352,562 |
def check(lst: list, search_element: int) -> bool:
"""Check if the list contains the search_element."""
return any([True for i in lst if i == search_element]) | 5,352,563 |
def step_with_model(rhs, state, dt=.125, n=100):
"""Perform a number of time steps with a model"""
for t in range(n):
qt_dot = rhs(state)
new_qt = state['QT'] + dt * qt_dot['QT']
state = assoc(state, 'QT', new_qt)
yield state | 5,352,564 |
def halfcube(random_start=0,random_end=32,halfwidth0=1,pow=-1):
"""
Produce a halfcube with given dimension and decaying power
:param random_start: decay starting parameter
:param random_end: decay ending parameter
:param halfwidth0: base halfwidth
:param pow: decaying power
:return: A (random_end-random_start,) array
"""
ran=np.arange(random_start,random_end,dtype=float)
ran[0]=1.0
return ran**pow*halfwidth0 | 5,352,565 |
def db_connection():
"""Function for connecting, creating and
Returns
-------
"""
db_credentials = read_json('data/sql-connection.json')
conn = pyodbc.connect(
"Driver={};Server={};Database={};uid={};pwd={};".format(
db_credentials.get('driver'),
db_credentials.get('server'),
db_credentials.get('database'),
db_credentials.get('user'),
db_credentials.get('password')
)
)
conn.cursor().execute("""
CREATE TABLE RV.exp_acoes (
Cliente INTEGER PRIMARY KEY,
Financeiro FLOAT NOT NULL
);
INSERT INTO RV.exp_acoes (Cliente, FInanceiro) VALUES (1, 96000);
INSERT INTO RV.exp_acoes (Cliente, FInanceiro) VALUES (2, 250000);
INSERT INTO RV.exp_acoes (Cliente, FInanceiro) VALUES (3, 20500);
""")
conn.commit()
conn.close() | 5,352,566 |
def valid_passphrase(module, **kwargs):
"""Tests whether the given passphrase is valid for the specified device.
Return: <boolean> <error>"""
for req in ["device", "passphrase"]:
if req not in kwargs or kwargs[req] is None:
errmsg = "valid_passphrase: {0} is a required parameter".format(req)
return False, {"msg": errmsg}
is_keyfile = kwargs.get("is_keyfile", False)
slot = kwargs.get("slot", None)
args = ["cryptsetup", "open", "--test-passphrase", kwargs["device"]]
if slot is not None:
args.extend(["--key-slot", str(slot)])
_unused, err = run_cryptsetup(
module, args, passphrase=kwargs["passphrase"], is_keyfile=is_keyfile
)
if err:
errmsg = "valid_passphrase: We need a valid passphrase for {0}".format(
kwargs["device"]
)
return False, {"msg": errmsg, "err": err}
return True, None | 5,352,567 |
def contract_address(deploy_hash_base16: str, fn_store_id: int) -> bytes:
"""
Should match what the EE does (new_function_address)
//32 bytes for deploy hash + 4 bytes ID
blake2b256( [0;32] ++ [0;4] )
deploy_hash ++ fn_store_id
"""
def hash(data: bytes) -> bytes:
h = blake2b(digest_size=32)
h.update(data)
return h.digest()
deploy_hash_bytes = bytes.fromhex(deploy_hash_base16)
counter_bytes = fn_store_id.to_bytes(4, "little")
data = deploy_hash_bytes + counter_bytes
return hash(data) | 5,352,568 |
def security_safety(session):
"""Check for security issues in dependencies."""
# Include all extras here to check all is safe for ci.
session.install(".[dev,lint,tests,security]")
session.run("python", "-m", "safety", "check") | 5,352,569 |
def dump_js_escaped_json(obj, cls=EdxJSONEncoder):
"""
JSON dumps and escapes objects that are safe to be embedded in JavaScript.
Use this for anything but strings (e.g. dicts, tuples, lists, bools, and
numbers). For strings, use js_escaped_string.
The output of this method is also usable as plain-old JSON.
Usage:
Used as follows in a Mako template inside a <SCRIPT> tag::
var json_obj = ${obj | n, dump_js_escaped_json}
If you must use the cls argument, then use as follows::
var json_obj = ${dump_js_escaped_json(obj, cls) | n}
Use the "n" Mako filter above. It is possible that the default filter
may include html escaping in the future, and this ensures proper
escaping.
Ensure ascii in json.dumps (ensure_ascii=True) allows safe skipping of
Mako's default filter decode.utf8.
Arguments:
obj: The object soon to become a JavaScript escaped JSON string. The
object can be anything but strings (e.g. dicts, tuples, lists, bools, and
numbers).
cls (class): The JSON encoder class (defaults to EdxJSONEncoder).
Returns:
(string) Escaped encoded JSON.
"""
obj = list(obj) if isinstance(obj, type({}.values())) else obj # lint-amnesty, pylint: disable=isinstance-second-argument-not-valid-type, dict-values-not-iterating, line-too-long
json_string = json.dumps(obj, ensure_ascii=True, cls=cls)
json_string = _escape_json_for_js(json_string)
return json_string | 5,352,570 |
def test_execution_failure(tmp_path):
"""Test script error."""
script = "ls non-existing-file"
error_msg = "execution failure, see the stdout and stderr files in /"
runner = ScriptRunner(EXE_RUNNER_NAME, script, tmp_path)
with pytest.raises(ScriptExecutionError, match=error_msg):
runner.run()
_assertions(
tmp_path,
runner,
script,
"",
"ls: cannot access '?non-existing-file'?: No such file or directory",
) | 5,352,571 |
def scan_repositories(read_repofile_func=_read_repofile):
"""
Scan the repository mapping file and produce RepositoriesMap msg.
See the description of the actor for more details.
"""
# TODO: add filter based on the current arch
# TODO: deprecate the product type and introduce the "channels" ?.. more or less
# NOTE: product type is changed, now it's channel: eus,e4s,aus,tus,ga,beta
if os.path.exists(os.path.join('/etc/leapp/files', OLD_REPOMAP_FILE)):
# NOTE: what about creating the report (instead of warning)
api.current_logger().warning(
'The old repomap file /etc/leapp/files/repomap.csv is present.'
' The file has been replaced by the repomap.json file and it is'
' not used anymore.'
)
json_data = read_repofile_func(REPOMAP_FILE)
try:
repomap_data = RepoMapData.load_from_dict(json_data)
mapping = repomap_data.get_mappings(get_source_major_version(), get_target_major_version())
valid_major_versions = [get_source_major_version(), get_target_major_version()]
api.produce(RepositoriesMapping(
mapping=mapping,
repositories=repomap_data.get_repositories(valid_major_versions)
))
except ModelViolationError as err:
err_message = (
'The repository mapping file is invalid: '
'the JSON does not match required schema (wrong field type/value): {}'
.format(err)
)
_inhibit_upgrade(err_message)
except KeyError as err:
_inhibit_upgrade(
'The repository mapping file is invalid: the JSON is missing a required field: {}'.format(err))
except ValueError as err:
# The error should contain enough information, so we do not need to clarify it further
_inhibit_upgrade('The repository mapping file is invalid: {}'.format(err)) | 5,352,572 |
def RawTuple(num_fields, name_prefix='field'):
"""
Creates a tuple of `num_field` untyped scalars.
"""
assert isinstance(num_fields, int)
assert num_fields >= 0
return NamedTuple(name_prefix, *([np.void] * num_fields)) | 5,352,573 |
def uninitializePlugin(mobject):
""" Unitializes the plug-in. """
mplugin2 = om2.MFnPlugin(mobject, kAuthor, kVersion, kRequiredAPIVersion)
DEREGISTER_LOCATOR_NODE(n_DebugVector.DebugVector, mplugin2)
DEREGISTER_LOCATOR_NODE(n_DebugMatrix.DebugMatrix, mplugin2)
DEREGISTER_LOCATOR_NODE(n_MeshController.MeshController, mplugin2)
DEREGISTER_NODE(n_VectorAnglePSD.VectorAnglePSD, mplugin2)
DEREGISTER_NODE(n_IKVChainSolver.IKVChainSolver, mplugin2)
DEREGISTER_NODE(n_HelperJoint.HelperJoint, mplugin2)
DEREGISTER_NODE(n_DistributeAlongSurface.DistributeAlongSurface, mplugin2)
DEREGISTER_NODE(n_TwistExtractor.TwistExtractor, mplugin2)
DEREGISTER_NODE(n_QuadraticCurve.QuadraticCurve, mplugin2)
DEREGISTER_NODE(n_BlendTransform.BlendTransform, mplugin2)
DEREGISTER_NODE(n_AimConstraint.AimConstraint, mplugin2)
DEREGISTER_NODE(n_ParentConstraint.ParentConstraint, mplugin2)
DEREGISTER_NODE(n_PoleVectorConstraint.PoleVectorConstraint, mplugin2)
DEREGISTER_NODE(n_SpaceConstraint.SpaceConstraint, mplugin2, n_SpaceConstraint.SpaceConstraint.kCallbackIDs)
DEREGISTER_NODE(n_AngularMath.AngularMath, mplugin2)
DEREGISTER_NODE(n_AngularScalarMath.AngularScalarMath, mplugin2)
DEREGISTER_NODE(n_AngularTrigMath.AngularTrigMath, mplugin2)
DEREGISTER_NODE(n_AngleToDouble.AngleToDouble, mplugin2)
DEREGISTER_NODE(n_DoubleToAngle.DoubleToAngle, mplugin2)
DEREGISTER_NODE(n_EulerMath.EulerMath, mplugin2)
DEREGISTER_NODE(n_EulerScalarMath.EulerScalarMath, mplugin2)
DEREGISTER_NODE(n_EulerToVector.EulerToVector, mplugin2)
DEREGISTER_NODE(n_VectorToEuler.VectorToEuler, mplugin2)
DEREGISTER_NODE(n_DecomposeRowMatrix.DecomposeRowMatrix, mplugin2)
DEREGISTER_NODE(n_FindParamFromLength.FindParamFromLength, mplugin2)
DEREGISTER_SURFACESHAPE_NODE(n_DebugGeometry.DebugGeometry, mplugin2)
om2.MGlobal.displayInfo("[gfTools_P] Plugin unloaded successfully.")
# m_Menu.MainMenu.unloadMenu() | 5,352,574 |
def test_success_ignore_blank_program_activity_name(database):
""" Testing program activity name validation to ignore blanks if monetary sum is 0 """
op = ObjectClassProgramActivityFactory(row_number=1, beginning_period_of_availa=2016, agency_identifier='test',
main_account_code='test', program_activity_name='',
program_activity_code='test',
deobligations_recov_by_pro_cpe=0, gross_outlay_amount_by_pro_cpe=0,
gross_outlay_amount_by_pro_fyb=0, gross_outlays_delivered_or_cpe=0,
gross_outlays_delivered_or_fyb=0, gross_outlays_undelivered_cpe=0,
gross_outlays_undelivered_fyb=0, obligations_delivered_orde_cpe=0,
obligations_delivered_orde_fyb=0, obligations_incurred_by_pr_cpe=0,
obligations_undelivered_or_cpe=0, obligations_undelivered_or_fyb=0,
ussgl480100_undelivered_or_cpe=0, ussgl480100_undelivered_or_fyb=0,
ussgl480200_undelivered_or_cpe=0, ussgl480200_undelivered_or_fyb=0,
ussgl483100_undelivered_or_cpe=0, ussgl483200_undelivered_or_cpe=0,
ussgl487100_downward_adjus_cpe=0, ussgl487200_downward_adjus_cpe=0,
ussgl488100_upward_adjustm_cpe=0, ussgl488200_upward_adjustm_cpe=0,
ussgl490100_delivered_orde_cpe=0, ussgl490100_delivered_orde_fyb=0,
ussgl490200_delivered_orde_cpe=0, ussgl490800_authority_outl_cpe=0,
ussgl490800_authority_outl_fyb=0, ussgl493100_delivered_orde_cpe=0,
ussgl497100_downward_adjus_cpe=0, ussgl497200_downward_adjus_cpe=0,
ussgl498100_upward_adjustm_cpe=0, ussgl498200_upward_adjustm_cpe=0)
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[op, pa]) == 0 | 5,352,575 |
def pose2mat(R, p):
""" convert pose to transformation matrix """
p0 = p.ravel()
H = np.block([
[R, p0[:, np.newaxis]],
[np.zeros(3), 1]
])
return H | 5,352,576 |
def _fill_missing_values(df=None):
"""replace missing values with NaN"""
# fills in rows where lake refroze in same season
df['WINTER'].replace(to_replace='"', method='ffill', inplace=True)
# use nan as the missing value
for headr in ['DAYS', 'OPENED', 'CLOSED']:
df[headr].replace(to_replace=['-', '--', '---'], value=_np.nan, inplace=True)
return df.sort_values(by=['WINTER']) | 5,352,577 |
def stop_tracking(
unsafe_password: str = None,
save_to_json: Union[str, Path] = None,
firestore_key_file: str = None,
firestore_collection_name: str = "counts",
verbose: bool = False,
):
"""
Stop tracking user inputs to a streamlit app.
Should be called after `streamlit-analytics.start_tracking()`. This method also
shows the analytics results below your app if you attach `?analytics=on` to the URL.
"""
if verbose:
print("Finished script execution. New counts:")
print(counts)
print("-" * 80)
# sess = get_session_state
# print(sess.state_dict)
# Reset streamlit functions.
st.button = _orig_button
st.checkbox = _orig_checkbox
st.radio = _orig_radio
st.selectbox = _orig_selectbox
st.multiselect = _orig_multiselect
st.slider = _orig_slider
st.select_slider = _orig_select_slider
st.text_input = _orig_text_input
st.number_input = _orig_number_input
st.text_area = _orig_text_area
st.date_input = _orig_date_input
st.time_input = _orig_time_input
st.file_uploader = _orig_file_uploader
st.color_picker = _orig_color_picker
st.sidebar.button = _orig_sidebar_button
st.sidebar.checkbox = _orig_sidebar_checkbox
st.sidebar.radio = _orig_sidebar_radio
st.sidebar.selectbox = _orig_sidebar_selectbox
st.sidebar.multiselect = _orig_sidebar_multiselect
st.sidebar.slider = _orig_sidebar_slider
st.sidebar.select_slider = _orig_sidebar_select_slider
st.sidebar.text_input = _orig_sidebar_text_input
st.sidebar.number_input = _orig_sidebar_number_input
st.sidebar.text_area = _orig_sidebar_text_area
st.sidebar.date_input = _orig_sidebar_date_input
st.sidebar.time_input = _orig_sidebar_time_input
st.sidebar.file_uploader = _orig_sidebar_file_uploader
st.sidebar.color_picker = _orig_sidebar_color_picker
# Save count data to firestore.
# TODO: Maybe don't save on every iteration but on regular intervals in a background
# thread.
if firestore_key_file:
if verbose:
print("Saving count data to firestore:")
print(counts)
print()
firestore.save(counts, firestore_key_file, firestore_collection_name)
# Dump the counts to json file if `save_to_json` is set.
# TODO: Make sure this is not locked if writing from multiple threads.
if save_to_json is not None:
with Path(save_to_json).open("w") as f:
json.dump(counts, f)
if verbose:
print("Storing results to file:", save_to_json)
# Show analytics results in the streamlit app if `?analytics=on` is set in the URL.
query_params = st.experimental_get_query_params()
if "analytics" in query_params and "on" in query_params["analytics"]:
st.write("---")
display.show_results(counts, reset_counts, unsafe_password) | 5,352,578 |
def csi_prelu(data, alpha, axis, out_dtype, q_params, layer_name=""):
"""Quantized activation relu.
Parameters
----------
data : relay.Expr
The quantized input data.
alpha : relay.Expr
The quantized alpha.
out_dtype : str
Specifies the output data type for mixed precision dense can be uint8.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.CSIPRelu(data, alpha, axis, out_dtype, q_params, layer_name) | 5,352,579 |
def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image box with the given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image using resize; if using .thumbnail and the image is
# already smaller than max_width, max_height, then this won't scale up
# at all (maybe could be an option someday...)
img = img.resize((width_sc, height_sc), Image.BICUBIC)
# img.thumbnail((width_sc, height_sc), Image.BICUBIC)
# width_sc, height_sc = img.size # necessary if using thumbnail
# insert centered
thumb = Image.new('RGBA', (max_width, max_height), (255, 255, 255, 255))
pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname) | 5,352,580 |
def test_crps_ensemble_dim(o, f_prob, dim):
"""Check that crps_ensemble reduces only dim."""
actual = crps_ensemble(o, f_prob, dim=dim)
assert_only_dim_reduced(dim, actual, o) | 5,352,581 |
def json(filename):
"""Returns the parsed contents of the given JSON fixture file."""
content = contents(filename)
return json_.loads(content) | 5,352,582 |
def _parse_assayData(assayData, assay):
"""Parse Rpy2 assayData (Environment object)
assayData: Rpy2 Environment object.
assay: An assay name indicating the data to be loaded.
Return a parsed expression dataframe (Pandas).
"""
pandas2ri.activate()
mat = assayData[assay] # rpy2 expression matrix object
data = pandas2ri.ri2py(mat)
features = pandas2ri.ri2py(r.rownames(mat))
samples = pandas2ri.ri2py(r.colnames(mat))
return pd.DataFrame(data, index=features, columns=samples) | 5,352,583 |
def method_list():
""" list of available electronic structure methods
"""
return theory.METHOD_LST | 5,352,584 |
def test_init_as_for_groutmaterial():
"""Test that the init_as is working as expected for all materials."""
matnames = ['Grout', 'Ground', 'Pipe']
classes = [GroutMaterial, GroundMaterial, PipeMaterial]
for (matname, class_) in zip(matnames, classes):
predmats = PREDEFINED_MATERIALS[matname]
keys = list(predmats.keys())
mat = class_.init_as(1)
assert (mat.kth, mat.Cp) == predmats[keys[1]]
# Assert no error is raised when the material index is out of range.
mat = class_.init_as(-1)
assert (mat.kth, mat.Cp) == predmats[keys[0]]
mat = class_.init_as(len(keys)+1)
assert (mat.kth, mat.Cp) == predmats[keys[-1]]
assert mat._category == matname
assert mat._material == keys[-1] | 5,352,585 |
def readReadQualities(fastqfile):
"""
Reads a .fastqfile and calculates a defined readscore
input: fastq file
output: fastq dictionary key = readid; value = qualstr
@type fastqfile: string
@param fastqfile: path to fastq file
@rtype: dictionary
@return: dictionary containing read ids and read qualities.
"""
fastq_file = HTSeq.FastqReader(fastqfile , "phred")
readdictionary = {}
for read in fastq_file:
readdictionary[read.name.split()[0]] = ComputeRQScore(read.qualstr)
print("\tReading Fastq file done!")
return readdictionary | 5,352,586 |
def test_check_args_no_rules(base_add, a, b, expected):
"""Tests that check_args does nothing"""
add = check_args(base_add)
assert add(a, b) == expected | 5,352,587 |
async def default_field_resolver(
parent: Optional[Any],
args: Dict[str, Any],
ctx: Optional[Any],
info: "ResolveInfo",
) -> Any:
"""
Default callable to use as resolver for field which doesn't implement a
custom one.
:param parent: default root value or field parent value
:param args: computed arguments related to the resolved field
:param ctx: context passed to the query execution
:param info: information related to the execution and the resolved field
:type parent: Optional[Any]
:type args: Dict[str, Any]
:type ctx: Optional[Any]
:type info: ResolveInfo
:return: the computed field value
:rtype: Any
"""
# pylint: disable=unused-argument
try:
return getattr(parent, info.field_name)
except AttributeError:
pass
try:
return parent[info.field_name]
except (KeyError, TypeError):
pass
return None | 5,352,588 |
def revision_list_to_str(diffs: List[Dict]) -> str:
"""Convert list of diff ids to a comma separated list, prefixed with "D"."""
return ', '.join([diff_to_str(d['id']) for d in diffs]) | 5,352,589 |
def find_clouds(images):
"""
While very basic in principle. I found out that by applying a blue
layer to the reflectance instruments images ibands and mbands; I was
able to see the clouds clearly.
Method:
- Get the blue layer out of the 3D images ibands and mbands
- Add the two images to make sure that we are getting the clouds total
reflectance.
"""
pass | 5,352,590 |
def get_path_of_latest_file() -> Optional[Path]:
"""Gets the path of the latest produced file that contains weight information"""
path = Path(storage_folder)
latest_file = None
time_stamp_latest = -1
for entry in path.iterdir():
if entry.is_file():
if latest_file == None:
latest_file = entry
time_stamp_latest = time.mktime(get_time_tuple_from_filename(entry.name))
else:
time_stamp_latest = time.mktime(get_time_tuple_from_filename(latest_file.name))
time_stamp_current = time.mktime(get_time_tuple_from_filename(entry.name))
if time_stamp_current > time_stamp_latest:
latest_file = entry
# print_d(f"Latest file: {latest_file}")
return latest_file | 5,352,591 |
def index():
"""
A function than returns the home page when called upon
"""
#get all available news sources
news_sources = get_sources()
#get all news articles available
everything = get_everything()
print(everything)
# title = 'Home - Find all the current news at your convinience'
return render_template('index.html', news_sources = news_sources, everything = everything) | 5,352,592 |
def xclCopyBO(handle, dstBoHandle, srcBoHandle, size, dst_offset, src_offset):
"""
Copy device buffer contents to another buffer
:param handle: Device handle
:param dstBoHandle: Destination BO handle
:param srcBoHandle: Source BO handle
:param size: Size of data to synchronize
:param dst_offset: dst Offset within the BO
:param src_offset: src Offset within the BO
:return: 0 on success or standard errno
"""
libc.xclCopyBO.restype = ctypes.c_int
libc.xclCopyBO.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_uint, ctypes.c_size_t, ctypes.c_size_t,
ctypes.c_uint]
libc.xclCopyBO(handle, dstBoHandle, srcBoHandle, size, dst_offset, src_offset) | 5,352,593 |
def parse_secret_from_literal(literal):
"""Parse a literal string, into a secret dict.
:param literal: String containg a key and a value. (e.g. 'KEY=VALUE')
:returns secret: Dictionary in the format suitable for sending
via http request.
"""
try:
key, value = literal.split("=", 1)
secret = {
key: {
"value": base64.b64encode(value.encode("utf-8")).decode("utf-8"),
"type": "env",
}
}
return secret
except ValueError as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
'Option "{0}" is invalid: \n'
'For literal strings use "SECRET_NAME=VALUE" format'.format(literal),
msg_type="error",
)
sys.exit(1) | 5,352,594 |
def test_run_internal_median_filter_piped_input():
"""
The run_median_filter asserts if the input file doesn't exist.
This is to prevent specifying streams we can't rewind.
"""
check_run_raises(mod.run_internal_median_filter, fasta_tests[5])
with pytest.raises(AssertionError) as e:
mod.run_internal_median_filter(sys.stdin, '', '')
assert 'Cannot rewind' in str(e.value) | 5,352,595 |
def blur(img):
"""
:param img: SimpleImage, the input image
:return: the processed image which is blurred
the function calculate the every position and its neighbors' pixel color and then average then
set it as the new pixel's RGB
"""
sum_red = 0
sum_blue = 0
sum_green = 0
neighbors = 0
new_img = SimpleImage.blank(img.width, img.height)
for x in range(img.width):
for y in range(img.height):
new_pixel = new_img.get_pixel(x, y)
for i in range(-1, 2):
for j in range(-1, 2):
if x + i >= 0 and x+i <= img.width -1 and y + j >= 0 and y + j <= img.height -1:
sum_red += img.get_pixel(x + i, y + j).red
sum_blue += img.get_pixel(x + i, y + j).blue
sum_green += img.get_pixel(x + i, y + j).green
neighbors += 1
new_pixel.red = sum_red // neighbors
new_pixel.blue = sum_blue // neighbors
new_pixel.green = sum_green // neighbors
neighbors = 0
sum_red = 0
sum_blue = 0
sum_green = 0
return new_img | 5,352,596 |
def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str:
"""
Delete the annotation of a document.
:param document_id: ID of the document as integer
:param annotation_id: ID of the annotation as integer
:return: URL to delete annotation of a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/' | 5,352,597 |
def initiate_strategy_trader(strategy, strategy_trader):
"""
Do it async into the workers to avoid long blocking of the strategy thread.
"""
with strategy_trader._mutex:
if strategy_trader._initialized != 1:
# only if waiting for initialize
return
strategy_trader._initialized = 2
now = datetime.now()
instrument = strategy_trader.instrument
try:
watcher = instrument.watcher(Watcher.WATCHER_PRICE_AND_VOLUME)
if watcher:
# update from last ticks
watcher.subscribe(instrument.market_id, None, -1, None)
# initialization processed, waiting for data be ready
with strategy_trader._mutex:
strategy_trader._initialized = 0
# wake-up
strategy.send_update_strategy_trader(instrument.market_id)
except Exception as e:
logger.error(repr(e))
logger.debug(traceback.format_exc()) | 5,352,598 |
def subprocess_call_wrapper(lst, stdin=None):
"""Wrapper around the subprocess.call functions."""
print_debug('About to run "%s"' % ' '.join(lst))
try:
ret = subprocess.call(lst, stdin=stdin)
except (OSError, IOError):
ret = 127 # an error code
except IndexError:
ret = 127 # an error code
except KeyboardInterrupt:
ret = 127 # an error code
print_debug('Command "%s" returned %d' % (lst[0] if lst else '', ret))
return ret == 0 | 5,352,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.