content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def sigmoid(x: np.ndarray, derivative: bool = False) -> np.ndarray:
"""
The sigmoid function which is given by
1/(1+exp(-x))
Where x is a number or np vector. if derivative is True it applied the
derivative of the sigmoid function instead.
Examples:
>>> sigmoid(0)
0.5
>>> abs(sigmoid(np.array([100, 30, 10])) - 1) < 0.001
array([ True, True, True])
>>> abs(sigmoid(-100) - 0) < 0.001
True
"""
if derivative:
return sigmoid(x) * (1 - sigmoid(x))
return 1 / (1 + np.exp(-x)) | 5,353,200 |
def recv_rectangle(data):
"""
Handles receiving a rectangle
Args:
data (list): the data sent over by the server
"""
global board_elements
global canvas
if len(data) == 5:
top_left = (int(data[1].split(" ")[0]), int(data[1].split(" ")[1]))
bottom_right = (int(data[2].split(" ")[0]), int(data[2].split(" ")[1]))
colour = (int(data[3].split(" ")[0]), int(
data[3].split(" ")[1]), int(data[3].split(" ")[2]))
fill = int(data[4])
rect = shapes.Rectangle(top_left, bottom_right, colour, 1)
board_elements.append(rect)
canvas = rect.mark(canvas) | 5,353,201 |
def create_hierarchy(
num_samples,
bundle_size,
directory_sizes=None,
root=".",
start_sample_id=0,
start_bundle_id=0,
address="",
n_digits=1,
):
"""
SampleIndex Hierarchy Factory method. Wraps
create_hierarchy_from_max_sample, which is a max_sample-based API, not a
numSample-based API like this method.
:param num_samples: The total number of samples.
:bundle_size: The max number of samples a bundle file is responsible for.
:directory_sizes: The number of samples each directory is responsible
for - a list, one value for each level in the directory hierarchy.
:root: The root path of this index. Defaults to ".".
:start_sample_id: The start of the sample count. Defaults to 0.
:n_digits: The number of digits to pad the directories with
"""
if directory_sizes is None:
directory_sizes = []
return create_hierarchy_from_max_sample(
num_samples + start_sample_id,
bundle_size,
directory_sizes=directory_sizes,
root=root,
start_bundle_id=start_bundle_id,
min_sample=start_sample_id,
address=address,
n_digits=n_digits,
) | 5,353,202 |
def run_process(grammar, commandline, tmpdir):
"""
'PROCESS' test command runner. It will call ``grammarinator-process`` with
the specified command line. Tests whether the processing of the grammar
(creating a fuzzer from it) is working properly.
:param grammar: file name of the grammar that contained the test command.
:param commandline: command line as specified in the test command.
:param tmpdir: path to a temporary directory (provided by the environment).
"""
run_subprocess(grammar,
'{python} -m grammarinator.process {commandline}'
.format(python=sys.executable, commandline=commandline),
tmpdir) | 5,353,203 |
def mock_config_file_with_auth_browser():
"""A pytest fixture that creates a temporary directory and a config file to match. Deletes directory after test"""
# Load auth config for testing
test_auth_file = os.path.join(resource_filename('gtmcore',
'auth{}tests'.format(os.path.sep)), 'auth_config.json')
if not os.path.exists(test_auth_file):
test_auth_file = f"{test_auth_file}.example"
with open(test_auth_file, 'rt') as conf:
auth_data = json.load(conf)
overrides = {
'auth': {
'provider_domain': 'gigantum.auth0.com',
'signing_algorithm': 'RS256',
'client_id': auth_data['client_id'],
'audience': auth_data['audience'],
'identity_manager': 'browser'
}
}
conf_file, working_dir = _create_temp_work_dir(override_dict=overrides)
# Go get a JWT for the test user from the dev auth client (real users are not in this DB)
response = requests.post("https://gigantum.auth0.com/oauth/token", json=auth_data)
token_data = response.json()
yield conf_file, working_dir, token_data
shutil.rmtree(working_dir) | 5,353,204 |
def eval_sysu(distmat, q_pids, g_pids, q_camids, g_camids, max_rank = 20):
"""Evaluation with sysu metric
Key: for each query identity, its gallery images from the same camera view are discarded. "Following the original setting in ite dataset"
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
indices = np.argsort(distmat, axis=1)
pred_label = g_pids[indices]
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
new_all_cmc = []
all_cmc = []
all_AP = []
all_INP = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query pid and camid
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (q_camid == 3) & (g_camids[order] == 2)
keep = np.invert(remove)
# compute cmc curve
# the cmc calculation is different from standard protocol
# we follow the protocol of the author's released code
new_cmc = pred_label[q_idx][keep]
new_index = np.unique(new_cmc, return_index=True)[1]
new_cmc = [new_cmc[index] for index in sorted(new_index)]
new_match = (new_cmc == q_pid).astype(np.int32)
new_cmc = new_match.cumsum()
new_all_cmc.append(new_cmc[:max_rank])
orig_cmc = matches[q_idx][keep] # binary vector, positions with value 1 are correct matches
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
# compute mINP
# refernece Deep Learning for Person Re-identification: A Survey and Outlook
pos_idx = np.where(orig_cmc == 1)
pos_max_idx = np.max(pos_idx)
inp = cmc[pos_max_idx]/ (pos_max_idx + 1.0)
all_INP.append(inp)
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q # standard CMC
new_all_cmc = np.asarray(new_all_cmc).astype(np.float32)
new_all_cmc = new_all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
mINP = np.mean(all_INP)
return new_all_cmc, mAP, mINP | 5,353,205 |
def espa_login() -> str:
"""
Get ESPA password using command-line input
:return:
"""
return getpass.getpass("Enter ESPA password: ") | 5,353,206 |
def search_hyperparameters(simulation_folder, method='RF', mode='classification'):
"""
Function for searching best hyperparameters for random forest algorithm
:param simulation_folder: str, name of subfolder for given data set
:return: none, hyperparameters are saved down as side effect
"""
Start = datetime.now()
project_directory = os.path.dirname(os.getcwd())
path_to_save = os.path.join(project_directory, "Data")
path_to_characteristics_data = os.path.join(path_to_save, simulation_folder, "Characteristics")
path_to_hyperparameters = os.path.join(project_directory, "Models", simulation_folder, method, 'Model')
if not os.path.exists(path_to_hyperparameters):
os.makedirs(path_to_hyperparameters)
X_train = np.load(os.path.join(path_to_characteristics_data, "X_train.npy"), allow_pickle=True)
y_train = np.load(os.path.join(path_to_characteristics_data, "y_train.npy"), allow_pickle=True)
if method == 'RF':
if mode == 'classification':
random_params = {'n_estimators': [int(x) for x in range(100, 1001, 100)],
'criterion': ["gini", "entropy"],
'max_features': ['log2', 'sqrt', None],
'max_depth': [int(x) for x in np.linspace(10, 110, num=11)] + [None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
'bootstrap': [True, False]
}
box = RandomForestClassifier()
elif mode == 'regression':
random_params = {'n_estimators': [int(x) for x in range(100, 1001, 100)],
'criterion': ["mae"],
'max_depth': [int(x) for x in np.linspace(10, 110, num=11)] + [None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
}
box = RandomForestRegressor()
elif method == 'GB':
if mode == 'classification':
random_params = {'n_estimators': [int(x) for x in range(100, 1001, 100)],
'max_features': ['log2', 'sqrt', None],
'max_depth': [int(x) for x in np.linspace(10, 110, num=11)] + [None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
'criterion': ['friedman_mse']
}
box = GradientBoostingClassifier()
elif mode == 'regression':
random_params = {'n_estimators': [int(x) for x in range(100, 1001, 100)],
'max_features': ['log2', 'sqrt', None],
'max_depth': [int(x) for x in np.linspace(10, 110, num=11)] + [None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
'criterion': ['friedman_mse']
}
box = GradientBoostingRegressor()
elif method == 'XGB':
if mode == 'classification':
random_params = {'eta': list(np.arange(0.3, 1.1, 0.1)),
'max_depth': [int(x) for x in range(1, 11)],
'min_child_weight': [int(x) for x in range(1, 11)],
'gamma': [int(x) for x in range(0, 11)],
'base_score': [0.5, 1],
'eval_metric': ['merror', 'auc'],
}
box = XGBClassifier()
elif mode == 'regression':
random_params = {'eta': list(np.arange(0.3, 1.1, 0.1)),
'max_depth': [int(x) for x in range(1, 11)],
'min_child_weight': [int(x) for x in range(1, 11)],
'gamma': [int(x) for x in range(0, 11)],
'base_score': [1],
'eval_metric': ['mae'],
}
box = XGBRegressor()
# Random search of parameters, using 10 fold cross validation,
# search across 100 different combinations, and use all available cores
box_random = RandomizedSearchCV(estimator=box, param_distributions=random_params, n_iter=100, cv=10,
verbose=2, random_state=42, n_jobs=-1)
# Fit the random search model
box_random.fit(X_train, y_train)
print(box_random.best_params_)
with open(os.path.join(path_to_hyperparameters, "hyperparameters.json"), 'w') as fp:
json.dump(box_random.best_params_, fp)
End = datetime.now()
ExecutedTime = End - Start
df = pd.DataFrame({'ExecutedTime': [ExecutedTime]})
df.to_csv(os.path.join(path_to_hyperparameters, "time_for_searching.csv"))
print(ExecutedTime) | 5,353,207 |
def row_up1_array(row, col):
"""This function establishes an array that contains the index for the row above each entry"""
up1_array = np.zeros((row, col), dtype=np.uint8)
for i in range(row):
up1_array[i, :] = np.ones(col, dtype = np.uint8) * ((i - 1) % row)
return up1_array | 5,353,208 |
def lcm(a, b):
"""Return lowest common multiple."""
return a * b // gcd(a, b) | 5,353,209 |
def chl_mean_hsl(weights: np.ndarray) -> Callable[[np.ndarray], np.ndarray]:
"""
return a function that can calculate the channel-wise average
of the input picture in HSL color space
"""
return lambda img: np.average(cv2.cvtColor(img, cv2.COLOR_BGR2HLS), axis=(0, 1), weights=weights) | 5,353,210 |
def cosine_mrl_option(labels, predicts):
"""For a minibatch of image and sentences embeddings, computes the pairwise contrastive loss"""
#batch_size, double_n_emd = tensor.shape(predicts)
#res = tensor.split(predicts, [double_n_emd/2, double_n_emd/2], 2, axis=-1)
img = l2norm(labels)
text = l2norm(predicts)
scores = tensor.dot(img, text.T)
diagonal = scores.diagonal()
mrl_margin = 0.3
loss_max_violation = True
# caption retrieval (1 + neg - pos)
cost_s = tensor.maximum(0, mrl_margin + scores - diagonal.reshape((-1,1)))
# clear diagonals
cost_s = fill_diagonal(cost_s, 0)
# img retrieval
cost_im = tensor.maximum(0, mrl_margin + scores - diagonal)
cost_im = fill_diagonal(cost_im, 0)
if loss_max_violation:
if cost_s:
cost_s = tensor.max(cost_s, axis=1)
if cost_im:
cost_im = tensor.max(cost_im, axis=0)
loss = cost_s.mean() + cost_im.mean()
return loss | 5,353,211 |
def escape_cdata(cdata):
"""Escape a string for an XML CDATA section"""
return cdata.replace(']]>', ']]>]]><![CDATA[') | 5,353,212 |
def insert_hosts(raw_data, to_site_id, to_taxon_id):
"""Insert hosts."""
log(f'Inserting {DATASET_ID} hosts')
raw_data['host_id'] = db.get_ids(raw_data, 'hosts')
raw_data['site_key'] = tuple(zip(
raw_data.Latitude_Original,
raw_data.Longitude_Original,
raw_data.verbatim_elevation))
raw_data['site_id'] = raw_data.site_key.map(to_site_id)
raw_data['host_taxon_id'] = raw_data.sci_name.map(to_taxon_id)
raw_data['mass'] = raw_data['mass (g)']
raw_data['age'] = None
raw_data['dataset_host_id'] = None
fields = """needs_confirmation""".split()
raw_data['host_json'] = json_object(raw_data, fields)
raw_data.loc[:, db.HOST_COLUMNS].to_sql(
'hosts', db.connect(), if_exists='append', index=False) | 5,353,213 |
def _collect_data_for_docstring(func, annotation):
"""
Collect data to be printed in docstring. The data is collected from
custom annotation (dictionary passed as a parameter for the decorator)
and standard Python annotations for the parameters (if any). Data from
custom annotation always overrides Python parameter annotations.
Parameters
----------
func: callable
Reference to the function.
annotation: dict
Custom annotation.
Returns
-------
Dictionary of the collected parameters
"""
signature = inspect.signature(func)
parameters = signature.parameters
return_annotation = signature.return_annotation
doc_params = dict()
# Description of the function
doc_params["description"] = annotation.get("description", "")
# Flag that tells if the function is a generator. Title for returning
# values for generator is 'Yields' and for regular functions it is 'Returns'
doc_params["is_generator"] = inspect.isgeneratorfunction(func)
doc_params["parameters"] = {}
if parameters: # The function may have no parameters
# We will print names of ALL parameters from the signature
for p_name, p in parameters.items():
# Select description, annotation and types from available sources.
# Annotation (parameter of the wrapper) always overrides Python annotation.
doc_params["parameters"][p_name] = {}
kind = p.kind.name
kind = kind.lower().replace("_", " ")
doc_params["parameters"][p_name]["kind"] = kind
desc, an, plans, devices, enums = "", "", {}, {}, {}
if ("parameters" in annotation) and (p_name in annotation["parameters"]):
p_an = annotation["parameters"][p_name]
desc = p_an.get("description", "")
if "annotation" in p_an:
an = p_an["annotation"]
# Ignore annotation if it is an empty string. Lists of plans
# and devices make no sense, so don't include them.
if an:
# Now save the lists of plans and devices if any
plans = p_an.get("plans", {})
devices = p_an.get("devices", {})
enums = p_an.get("enums", {})
if not an and parameters[p_name].annotation != inspect.Parameter.empty:
an = str(parameters[p_name].annotation)
doc_params["parameters"][p_name]["annotation"] = _convert_annotation_to_type(an)
doc_params["parameters"][p_name]["description"] = desc
doc_params["parameters"][p_name]["plans"] = plans
doc_params["parameters"][p_name]["devices"] = devices
doc_params["parameters"][p_name]["enums"] = enums
if p.default != inspect.Parameter.empty:
# Print will print strings in quotes (desired behavior)
v_default = pprint.pformat(p.default)
else:
v_default = None
# If 'v_default' is None, it is not specified, so it should not be printed
# in the docstring at all
doc_params["parameters"][p_name]["default"] = v_default
# Print return value annotation and description. Again the annotation from
# custom annotation overrides Python annotation.
doc_params["returns"] = {}
desc, an = "", ""
if "returns" in annotation or (return_annotation != inspect.Parameter.empty):
if "returns" in annotation:
desc = annotation["returns"].get("description", "")
an = annotation["returns"].get("annotation", "")
if not an:
if return_annotation != inspect.Signature.empty:
an = str(return_annotation)
doc_params["returns"]["description"] = desc
if doc_params["is_generator"]:
an = _extract_yield_type(an)
doc_params["returns"]["annotation"] = _convert_annotation_to_type(an)
return doc_params | 5,353,214 |
def _strip_unbalanced_punctuation(text, is_open_char, is_close_char):
"""Remove unbalanced punctuation (e.g parentheses or quotes) from text.
Removes each opening punctuation character for which it can't find
corresponding closing character, and vice versa.
It can only handle one type of punctuation
(e.g. it could strip quotes or parentheses but not both).
It takes functions (is_open_char, is_close_char),
instead of the characters themselves,
so that we can determine from nearby characters whether a straight quote is
an opening or closing quote.
Args:
text (string): the text to fix
is_open_char: a function that accepts the text and an index,
and returns true if the character at that index is
an opening punctuation mark.
is_close_char: same as is_open_char for closing punctuation mark.
Returns:
The text with unmatched punctuation removed.
"""
# lists of unmatched opening and closing chararacters
opening_chars = []
unmatched_closing_chars = []
for idx, c in enumerate(text):
if is_open_char(text, idx):
opening_chars.append(idx)
elif is_close_char(text, idx):
if opening_chars:
# this matches a character we found earlier
opening_chars.pop()
else:
# this doesn't match any opening character
unmatched_closing_chars.append(idx)
char_indices = [i for (i, _) in enumerate(text)
if not(i in opening_chars or i in unmatched_closing_chars)]
stripped_text = "".join([text[i] for i in char_indices])
return stripped_text | 5,353,215 |
def read(fin, alphabet=None):
"""Read and parse a fasta file.
Args:
fin -- A stream or file to read
alphabet -- The expected alphabet of the data, if given
Returns:
SeqList -- A list of sequences
Raises:
ValueError -- If the file is unparsable
"""
seqs = [s for s in iterseq(fin, alphabet)]
name = names[0]
if hasattr(fin, "name"):
name = fin.name
return SeqList(seqs, name=name) | 5,353,216 |
def trip_destination(
trips,
tours_merged,
chunk_size, trace_hh_id):
"""
Choose a destination for all 'intermediate' trips based on trip purpose.
Final trips already have a destination (the primary tour destination for outbound trips,
and home for inbound trips.)
"""
trace_label = 'trip_destination'
model_settings_file_name = 'trip_destination.yaml'
model_settings = config.read_model_settings(model_settings_file_name)
CLEANUP = model_settings.get('CLEANUP', True)
fail_some_trips_for_testing = model_settings.get('fail_some_trips_for_testing', False)
trips_df = trips.to_frame()
tours_merged_df = tours_merged.to_frame()
estimator = estimation.manager.begin_estimation('trip_destination')
if estimator:
estimator.write_coefficients(model_settings=model_settings)
# estimator.write_spec(model_settings, tag='SAMPLE_SPEC')
estimator.write_spec(model_settings, tag='SPEC')
estimator.set_alt_id(model_settings["ALT_DEST_COL_NAME"])
estimator.write_table(inject.get_injectable('size_terms'), 'size_terms', append=False)
estimator.write_table(inject.get_table('land_use').to_frame(), 'landuse', append=False)
estimator.write_model_settings(model_settings, model_settings_file_name)
logger.info("Running %s with %d trips", trace_label, trips_df.shape[0])
trips_df, save_sample_df = run_trip_destination(
trips_df,
tours_merged_df,
estimator=estimator,
chunk_size=chunk_size,
trace_hh_id=trace_hh_id,
trace_label=trace_label,
fail_some_trips_for_testing=fail_some_trips_for_testing)
# testing feature t0 make sure at least one trip fails so trip_purpose_and_destination model is run
if config.setting('testing_fail_trip_destination', False) and not trips_df.failed.any():
if (trips_df.trip_num < trips_df.trip_count).sum() == 0:
raise RuntimeError(f"can't honor 'testing_fail_trip_destination' setting because no intermediate trips")
fail_o = trips_df[trips_df.trip_num < trips_df.trip_count].origin.max()
trips_df.failed = (trips_df.origin == fail_o) & \
(trips_df.trip_num < trips_df.trip_count)
if trips_df.failed.any():
logger.warning("%s %s failed trips", trace_label, trips_df.failed.sum())
if inject.get_injectable('pipeline_file_prefix', None):
file_name = f"{trace_label}_failed_trips_{inject.get_injectable('pipeline_file_prefix')}"
else:
file_name = f"{trace_label}_failed_trips"
logger.info("writing failed trips to %s", file_name)
tracing.write_csv(trips_df[trips_df.failed], file_name=file_name, transpose=False)
if estimator:
estimator.end_estimation()
# no trips should have failed since we overwrite choices and sample should have not failed trips
assert not trips_df.failed.any()
if CLEANUP:
if trips_df.failed.any():
flag_failed_trip_leg_mates(trips_df, 'failed')
if save_sample_df is not None:
save_sample_df.drop(trips_df.index[trips_df.failed], level='trip_id', inplace=True)
trips_df = cleanup_failed_trips(trips_df)
trips_df.drop(columns='failed', inplace=True, errors='ignore')
pipeline.replace_table("trips", trips_df)
if trace_hh_id:
tracing.trace_df(trips_df,
label=trace_label,
slicer='trip_id',
index_label='trip_id',
warn_if_empty=True)
if save_sample_df is not None:
# might be none if want_sample_table but there are no intermediate trips
# expect samples only for intermediate trip destinations
assert len(save_sample_df.index.get_level_values(0).unique()) == \
len(trips_df[trips_df.trip_num < trips_df.trip_count])
sample_table_name = model_settings.get('DEST_CHOICE_SAMPLE_TABLE_NAME')
assert sample_table_name is not None
logger.info("adding %s samples to %s" % (len(save_sample_df), sample_table_name))
# lest they try to put tour samples into the same table
if pipeline.is_table(sample_table_name):
raise RuntimeError("sample table %s already exists" % sample_table_name)
pipeline.extend_table(sample_table_name, save_sample_df) | 5,353,217 |
def get_min_max_value(dfg):
"""
Gets min and max value assigned to edges
in DFG graph
Parameters
-----------
dfg
Directly follows graph
Returns
-----------
min_value
Minimum value in directly follows graph
max_value
Maximum value in directly follows graph
"""
min_value = 9999999999
max_value = -1
for edge in dfg:
if dfg[edge] < min_value:
min_value = dfg[edge]
if dfg[edge] > max_value:
max_value = dfg[edge]
return min_value, max_value | 5,353,218 |
def process_post(category_id, post_details, user):
"""Check topic is present in Discourse. IF exists then post, otherwise create new topic for category
"""
error = False
error_message = ''
# DISCOURSE_DEV_POST_SUFFIX is used to differentiate the same target name from different dev systems in Discourse
# It is not intended to be used for production when there is a dedicated Discourse.
post_details['title'] = post_details['title'] + settings.DISCOURSE_DEV_POST_SUFFIX
try:
topic = DiscourseTopic.objects.get(topic_title=post_details['title'])
topic_id = topic.discourse_topic_id
if post_details['content'] == '':
# No content - Return the URL for the topic
post_url = os.path.join(settings.DISCOURSE_HOST, 't', str(topic_id))
else:
# Create post for topic
error, error_message, null_id, post_url = create_post(user, post_details, topic_id=topic_id)
except DiscourseTopic.DoesNotExist:
# Create Topic for Category
error, error_message, topic_id, post_url = create_post(user, post_details, category_id=category_id)
if not error:
DiscourseTopic.objects.create(topic_title=post_details['title'],
author=user,
discourse_topic_id=topic_id)
return error, error_message, topic_id, post_url | 5,353,219 |
def vgg_fcn(num_classes=1000, pretrained=False, batch_norm=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
num_classes(int): the number of classes at dataset
pretrained (bool): If True, returns a model pre-trained on ImageNet
batch_norm: if you want to introduce batch normalization
"""
if pretrained:
kwargs['init_weights'] = True
model = VGG(make_layers(cfg['D'], batch_norm=batch_norm), num_classes, **kwargs)
if pretrained:
# loading weights
if batch_norm:
pretrained_weights = model_zoo.load_url(model_urls['vgg19_bn'])
else:
pretrained_weights = model_zoo.load_url(model_urls['vgg19'])
model.load_state_dict(pretrained_weights, strict=False)
return model | 5,353,220 |
def test_write(string: str) -> None:
"""Test illud.outputs.standard_output.StandardOutput.write."""
stdout_mock = MagicMock(sys.stdout)
with patch('sys.stdout', stdout_mock):
standard_output: StandardOutput = StandardOutput()
standard_output.write(string)
stdout_mock.write.assert_called_once_with(string) | 5,353,221 |
def apply_heatmap(
frame: npt.NDArray[np.uint8],
cmap: Union[str, Colormap] = "Pastel1",
normalize: bool = True,
) -> npt.NDArray[np.uint8]:
"""Apply heatmap to an input BGR image.
Args:
frame (npt.NDArray[np.uint8]) : Input image (BGR).
cmap (Union[str, Colormap], optional) : An identifier for color maps. Defaults to ``"Pastel1"``.
normalize (bool, optional) : Whether to perform :func:`min-max normalization <veditor.utils.image_utils.min_max_normalization>`. Defaults to ``True``.
Returns:
npt.NDArray[np.uint8]: [description]
.. plot::
:class: popup-img
>>> import cv2
>>> import matplotlib.pyplot as plt
>>> from veditor.utils import cv2plot, SampleData, apply_heatmap
>>> frame = cv2.imread(SampleData().IMAGE_PATH)
>>> colormaps = ["Pastel1", "Set1", "tab10", "hsv", "bwr", "Reds"]
>>> num_methods = len(colormaps)
>>> ncols = 3; nrows = num_methods//ncols
>>> fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(6 * ncols, 4 * nrows))
>>> for i,cmap in enumerate(colormaps):
... ax = cv2plot(apply_heatmap(frame, cmap=cmap), ax=axes[i%2][i//2])
... ax.set_title(cmap)
>>> fig.show()
"""
cmap = plt.get_cmap(cmap)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if normalize:
gray = min_max_normalization(gray)
gray = gray.astype(float) / 255.0
frame = cv2.cvtColor((255 * cmap(gray)).astype(np.uint8)[:, :, :3], cv2.COLOR_RGB2BGR)
return frame | 5,353,222 |
def fmt_time(timestamp):
"""Return ISO formatted time from seconds from epoch."""
if timestamp:
return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(timestamp))
else:
return '-' | 5,353,223 |
def lislice(iterable, *args):
""" (iterable, stop) or (iterable, start, stop[, step])
>>> lislice('ABCDEFG', 2)
['A', 'B']
>>> lislice('ABCDEFG', 2, 4)
['C', 'D']
>>> lislice('ABCDEFG', 2, None)
['C', 'D', 'E', 'F', 'G']
>>> lislice('ABCDEFG', 0, None, 2)
['A', 'C', 'E', 'G']
"""
return list(islice(iterable, *args)) | 5,353,224 |
async def up(ctx):
"""
A command to update the kebab cote.
"""
global cote
cote += 1
msg = "```"
msg += "+1 pour Tristan : ta cote est maintenant de "
msg += str(cote)
msg += "```"
await ctx.send(msg) | 5,353,225 |
def solve_with_duplicate_optional_items(data, max_height, max_width):
"""Solve the problem by building 2 optional items (rotated or not) for each item."""
# Derived data (expanded to individual items).
data_widths = data['width'].to_numpy()
data_heights = data['height'].to_numpy()
data_availability = data['available'].to_numpy()
data_values = data['value'].to_numpy()
# Non duplicated items data.
base_item_widths = np.repeat(data_widths, data_availability)
base_item_heights = np.repeat(data_heights, data_availability)
base_item_values = np.repeat(data_values, data_availability)
num_data_items = len(base_item_values)
# Create rotated items by duplicating.
item_widths = np.concatenate((base_item_widths, base_item_heights))
item_heights = np.concatenate((base_item_heights, base_item_widths))
item_values = np.concatenate((base_item_values, base_item_values))
num_items = len(item_values)
# OR-Tools model
model = cp_model.CpModel()
# Variables
x_starts = []
y_starts = []
is_used = []
x_intervals = []
y_intervals = []
for i in range(num_items):
## Is the item used?
is_used.append(model.NewBoolVar(f'is_used{i}'))
## Item coordinates.
x_starts.append(
model.NewIntVar(0, max_width - int(item_widths[i]), f'x_start{i}'))
y_starts.append(
model.NewIntVar(0, max_height - int(item_heights[i]),
f'y_start{i}'))
## Interval variables.
x_intervals.append(
model.NewOptionalFixedSizeIntervalVar(x_starts[i], item_widths[i],
is_used[i], f'x_interval{i}'))
y_intervals.append(
model.NewOptionalFixedSizeIntervalVar(y_starts[i], item_heights[i],
is_used[i], f'y_interval{i}'))
# Constraints.
## Only one of non-rotated/rotated pair can be used.
for i in range(num_data_items):
model.Add(is_used[i] + is_used[i + num_data_items] <= 1)
## 2D no overlap.
model.AddNoOverlap2D(x_intervals, y_intervals)
## Objective.
model.Maximize(cp_model.LinearExpr.WeightedSum(is_used, item_values))
# Output proto to file.
if FLAGS.output_proto:
print('Writing proto to %s' % FLAGS.output_proto)
with open(FLAGS.output_proto, 'w') as text_file:
text_file.write(str(model))
# Solve model.
solver = cp_model.CpSolver()
if FLAGS.params:
text_format.Parse(FLAGS.params, solver.parameters)
status = solver.Solve(model)
# Report solution.
if status == cp_model.OPTIMAL:
used = {i for i in range(num_items) if solver.BooleanValue(is_used[i])}
data = pd.DataFrame({
'x_start': [solver.Value(x_starts[i]) for i in used],
'y_start': [solver.Value(y_starts[i]) for i in used],
'item_width': [item_widths[i] for i in used],
'item_height': [item_heights[i] for i in used],
'x_end': [solver.Value(x_starts[i]) + item_widths[i] for i in used],
'y_end': [
solver.Value(y_starts[i]) + item_heights[i] for i in used
],
'item_value': [item_values[i] for i in used]
})
print(data) | 5,353,226 |
def calculate_transition_cost(number_objs: int, target_storage_class: str) -> float:
"""
Calculates the cost of transition data from one class to another
Args:
number_objs: the number of objects that are added on a monthly basis
target_storage_class: the storage class the objects will reside in after they are transitioned
Returns:
int, the cost of the transition
"""
target_storage_class_data = data[target_storage_class]
transition_cost = (
number_objs / target_storage_class_data["items_per_transition_chunk"]
) * target_storage_class_data["transition_cost"]
return transition_cost | 5,353,227 |
def _exceptionwarning(ui):
"""Produce a warning message for the current active exception"""
# For compatibility checking, we discard the portion of the hg
# version after the + on the assumption that if a "normal
# user" is running a build with a + in it the packager
# probably built from fairly close to a tag and anyone with a
# 'make local' copy of hg (where the version number can be out
# of date) will be clueful enough to notice the implausible
# version number and try updating.
ct = util.versiontuple(n=2)
worst = None, ct, b'', b''
if ui.config(b'ui', b'supportcontact') is None:
for name, mod in extensions.extensions():
# 'testedwith' should be bytes, but not all extensions are ported
# to py3 and we don't want UnicodeException because of that.
testedwith = stringutil.forcebytestr(
getattr(mod, 'testedwith', b'')
)
version = extensions.moduleversion(mod)
report = getattr(mod, 'buglink', _(b'the extension author.'))
if not testedwith.strip():
# We found an untested extension. It's likely the culprit.
worst = name, b'unknown', report, version
break
# Never blame on extensions bundled with Mercurial.
if extensions.ismoduleinternal(mod):
continue
tested = [util.versiontuple(t, 2) for t in testedwith.split()]
if ct in tested:
continue
lower = [t for t in tested if t < ct]
nearest = max(lower or tested)
if worst[0] is None or nearest < worst[1]:
worst = name, nearest, report, version
if worst[0] is not None:
name, testedwith, report, version = worst
if not isinstance(testedwith, (bytes, str)):
testedwith = b'.'.join(
[stringutil.forcebytestr(c) for c in testedwith]
)
extver = version or _(b"(version N/A)")
warning = _(
b'** Unknown exception encountered with '
b'possibly-broken third-party extension "%s" %s\n'
b'** which supports versions %s of Mercurial.\n'
b'** Please disable "%s" and try your action again.\n'
b'** If that fixes the bug please report it to %s\n'
) % (name, extver, testedwith, name, stringutil.forcebytestr(report))
else:
bugtracker = ui.config(b'ui', b'supportcontact')
if bugtracker is None:
bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
warning = (
_(
b"** unknown exception encountered, "
b"please report by visiting\n** "
)
+ bugtracker
+ b'\n'
)
sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
def ext_with_ver(x):
ext = x[0]
ver = extensions.moduleversion(x[1])
if ver:
ext += b' ' + ver
return ext
warning += (
(_(b"** Python %s\n") % sysversion)
+ (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
+ (
_(b"** Extensions loaded: %s\n")
% b", ".join(
[ext_with_ver(x) for x in sorted(extensions.extensions())]
)
)
)
return warning | 5,353,228 |
def get_covid():
"""This module sends off a covid notification. You can't get covid from this."""
covid_data = covid_handler()
covid_content = Markup("Date: " + str(covid_data["date"]) + ",<br/>Country: " + str(
covid_data["areaName"]) + ",<br/>New Cases: " + str(
covid_data["newCasesByPublishDate"]) + ",<br/>Total Cases: " + str(
covid_data["cumCasesByPublishDate"]))
# The above formats the covid data, ready to send it off as a notification
covid_notification = {"title": "Covid Cases", "content": covid_content}
return covid_notification | 5,353,229 |
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n] | 5,353,230 |
def send_songogram(your_name, artist_first_name, artist_last_name, song_name, number_to_call):
""" Function for sending a Sonogram.
:param your_name: string containing the person sending the sonogram's name.
:param artist_first_name: string containing the musician's first name.
:param artist_last_name: string containing the musician's last name.
:param song_name: string containing the song name.
:param number_to_call: string of the telephone number to send a sonogram to.
"""
try:
lyrics = scrape_lyrics(artist_first_name, artist_last_name, song_name)
make_call(number_to_call, lyrics, your_name)
send_text(song_name, artist_first_name + ' ' + artist_last_name, number_to_call, your_name)
return {'status': 201}
except:
return {'status': 400,'error': 'Bad Request', 'message': 'Unable to process request'} | 5,353,231 |
def set_proto_message_event(
pb_message_event,
span_data_message_event):
"""Sets properties on the protobuf message event.
:type pb_message_event:
:class: `~opencensus.proto.trace.Span.TimeEvent.MessageEvent`
:param pb_message_event: protobuf message event
:type span_data_message_event:
:class: `~opencensus.trace.time_event.MessageEvent`
:param span_data_message_event: opencensus message event
"""
pb_message_event.type = span_data_message_event.type
pb_message_event.id = span_data_message_event.id
pb_message_event.uncompressed_size = \
span_data_message_event.uncompressed_size_bytes
pb_message_event.compressed_size = \
span_data_message_event.compressed_size_bytes | 5,353,232 |
def segm_and_cat(sersic_2d_image):
"""fixture for segmentation and catalog"""
image_mean, image_median, image_stddev = sigma_clipped_stats(sersic_2d_image, sigma=3)
threshold = image_stddev * 3
# Define smoothing kernel
kernel_size = 3
fwhm = 3
# Min Source size (area)
npixels = 4 ** 2
return make_catalog(
sersic_2d_image,
threshold=threshold,
deblend=True,
kernel_size=kernel_size,
fwhm=fwhm,
npixels=npixels,
contrast=0.00,
plot=False,
) | 5,353,233 |
def p_function_definition(p):
"""function_definition : declaration_specifiers declarator declaration_list compound_statement
| function_definition_full compound_statement
| declarator declaration_list compound_statement
| declarator compound_statement"""
global LAST_FUNCTION_DECLARATION
symTab = get_current_symtab()
if len(p) == 3:
p[0] = p[1]
p[0]["code"] += p[2]["code"] + [["ENDFUNCTION"]]
no_return = True
ignorecheck = False
for code in p[2]["code"]:
# print(code, ignorecheck, p[1]["value"])
if len(code) > 0 and code[0] == "BEGINFUNCTION":
ignorecheck = True
elif len(code) > 0 and code[0] == "ENDFUNCTION":
ignorecheck = False
elif len(code) > 0 and code[0] == "RETURN" and not ignorecheck:
if len(code) == 1 and p[1]["value"] != "void":
err_msg = "Error at line number " + str(p.lineno(1)) + ": Return type not matching declared type"
GLOBAL_ERROR_LIST.append(err_msg)
raise SyntaxError
# raise Exception("Return type not matching declared type")
elif (
len(code) > 1
and p[1]["value"] != code[1]["type"]
and not (p[1]["value"] == "int" and code[1]["type"].startswith("enum"))
):
err_msg = "Error at line number " + str(p.lineno(1)) + ": Return type not matching declared type"
GLOBAL_ERROR_LIST.append(err_msg)
raise SyntaxError
# raise Exception("Return type not matching declared type")
no_return = False
if no_return and p[1]["value"] != "void":
err_msg = "Error at line number " + str(p.lineno(1)) + ": Return type not matching declared type"
GLOBAL_ERROR_LIST.append(err_msg)
raise SyntaxError
# raise Exception("Return type not matching declared type")
LAST_FUNCTION_DECLARATION = None
else:
# TODO
p[0] = ("function_definition",) + tuple(p[-len(p) + 1 :]) | 5,353,234 |
def break_word_by_trailing_integer(pname_fid: str) -> Tuple[str, str]:
"""
Splits a word that has a value that is an integer
Parameters
----------
pname_fid : str
the DVPRELx term (e.g., A(11), NSM(5))
Returns
-------
word : str
the value not in parentheses
value : int
the value in parentheses
Examples
--------
>>> break_word_by_trailing_integer('T11')
('T', '11')
>>> break_word_by_trailing_integer('THETA11')
('THETA', '11')
"""
nums = []
i = 0
for i, letter in enumerate(reversed(pname_fid)):
if letter.isdigit():
nums.append(letter)
else:
break
num = ''.join(nums[::-1])
if not num:
msg = ("pname_fid=%r does not follow the form 'T1', 'T11', 'THETA42' "
"(letters and a number)" % pname_fid)
raise SyntaxError(msg)
word = pname_fid[:-i]
assert len(word)+len(num) == len(pname_fid), 'word=%r num=%r pname_fid=%r' % (word, num, pname_fid)
return word, num | 5,353,235 |
def parse_args():
"""
parse command line arguments
:return dict: dictionary of parameters
"""
argparser = argparse.ArgumentParser()
# training data
argparser.add_argument('--trainfiles', nargs='*', default=['./data/valid.tfrecords'], help='Data file(s) for training (tfrecord).')
argparser.add_argument('--testfiles', nargs='*', default=['./data/valid.tfrecords'], help='Data file(s) for validation or evaluation (tfrecord).')
# input configuration
argparser.add_argument('--map_pixel_in_meters', type=float, default=0.02, help='The width (and height) of a pixel of the map in meters. Defaults to 0.02 for House3D data.')
argparser.add_argument('--init_particles_distr', type=str, default='tracking', help='Distribution of initial particles. Possible values: tracking / one-room.')
argparser.add_argument('--init_particles_std', nargs='*', default=["0.3", "0.523599"], help='Standard deviations for generated initial particles for tracking distribution. Values: translation std (meters), rotation std (radians)')
argparser.add_argument('--trajlen', type=int, default=24, help='Length of trajectories.')
# PF configuration
argparser.add_argument('--num_particles', type=int, default=30, help='Number of particles in Particle Filter.')
argparser.add_argument('--transition_std', nargs='*', default=["0.0", "0.0"], help='Standard deviations for transition model. Values: translation std (meters), rotation std (radians)')
argparser.add_argument('--resample', type=str, default='false', help='Resample particles in Particle Filter. Possible values: true / false.')
argparser.add_argument('--alpha_resample_ratio', type=float, default=1.0, help='Trade-off parameter for soft-resampling in PF-net. Only effective if resample == true. Assumes values 0.0 < alpha <= 1.0. Alpha equal to 1.0 corresponds to hard-resampling.')
# training configuration
argparser.add_argument('--batch_size', type=int, default=24, help='Minibatch size for training.')
argparser.add_argument('--learningrate', type=float, default=0.0025, help='Initial learning rate for training.')
argparser.add_argument('--epochs', type=int, default=1, help='Number of epochs for training.')
argparser.add_argument('--load', type=str, default='', help='Load a previously trained model from a checkpoint file.')
argparser.add_argument('--seed', type=int, default='42', help='Fix the random seed of numpy and tensorflow.')
argparser.add_argument('--logpath', type=str, default='./log/', help='Specify path for logs.')
argparser.add_argument('--gpu_num', type=int, default='0', help='use gpu no. to train')
params = argparser.parse_args()
# convert multi-input fileds to numpy arrays
params.transition_std = np.array(params.transition_std, np.float32)
params.init_particles_std = np.array(params.init_particles_std, np.float32)
# build initial covariance matrix of particles, in pixels and radians
particle_std = params.init_particles_std.copy()
particle_std[0] = particle_std[0] / params.map_pixel_in_meters # convert meters to pixels
particle_std2 = np.square(particle_std) # variance
params.init_particles_cov = np.diag(particle_std2[(0, 0, 1),])
params.transition_std = np.array(params.transition_std[0] / params.map_pixel_in_meters, params.transition_std[1]) # in pixels & radians
# fix seed
np.random.seed(params.seed)
tf.random.set_seed(params.seed)
# use RNN as stateful/non-stateful
params.stateful = False
params.return_state = True
#HACK hardcode fix padding for map
params.global_map_size = (4000, 4000, 1)
params.window_scaler = 8.0
# filter out info and warning messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# convert boolean fields
if params.resample not in ['false', 'true']:
raise ValurError
else:
params.resample = (params.resample == 'true')
gpus = tf.config.experimental.list_physical_devices('GPU')
assert params.gpu_num < len(gpus)
if gpus:
# restrict TF to only use the first GPU
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.experimental.set_visible_devices(gpus[params.gpu_num], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
except RuntimeError as e:
# visible devices must be set before GPUs have been initialized
print(e)
return params | 5,353,236 |
def preprocess_data(image, label, is_training):
"""CIFAR data preprocessing"""
image = tf.image.convert_image_dtype(image, tf.float32)
if is_training:
crop_padding = 4
image = tf.pad(image, [[crop_padding, crop_padding],
[crop_padding, crop_padding], [0, 0]], 'REFLECT')
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
if FLAGS.distort_color:
image = color_distortion(image, s=1.0)
else:
image = tf.image.resize_with_crop_or_pad(image, 32, 32) # central crop
return image, label | 5,353,237 |
def delete_env(dlpx_obj, env_name):
"""
Deletes an environment
engine: Dictionary of engines
env_name: Name of the environment to delete
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_obj = find_obj_by_name(dlpx_obj.server_session, environment,
env_name)
if env_obj:
environment.delete(dlpx_obj.server_session, env_obj.reference)
dlpx_obj.jobs[engine_name] = \
dlpx_obj.server_session.last_job
elif env_obj is None:
print('Environment was not found in the Engine: {}'.format(env_name))
sys.exit(1) | 5,353,238 |
def write_to_json_file(content,
path: str,
indent: int = 4,
sort_keys: bool = False,
silent: bool = True) -> None:
"""
Convenience function to write the given content to a json file
:param content: Content to write to the json file
:param path: Path to the json file
:param indent: Indentation to be used for writing to the json file
:param sort_keys: Whether to sort keys when writing or not
:param silent: Whether to silently pass exceptions or not
:return: None
"""
try:
with open(path, 'w') as json_file:
json.dump(content, json_file, indent=indent, sort_keys=sort_keys)
except TypeError as type_error:
if not silent:
raise type_error | 5,353,239 |
def load_Counties():
"""
Use load_country() instead of this function
"""
# Get data
# Load data using Pandas
dfd = {
'positive': reread_csv(csv_data_file_Global['confirmed_US']),
'death': reread_csv(csv_data_file_Global['deaths_US']),
}
return dfd | 5,353,240 |
def extract_parmtop_residue_with_name(filename, resname):
"""
fixme - update doc
Extract residue name and atom name/type mapping from input parmtop.
Note: Only one residue must be present in the topology.
Parameters
----------
filename: Path
Filename of the input parmtop.
Returns
-------
dict
key = residue name, value = atom name to type mapping (dict).
"""
res_top = pmd.load_file(str(filename))
extracted_residues = {}
for atom in res_top:
# extract only the requested residues
if atom.residue.name != resname:
continue
if atom.residue.name not in extracted_residues:
extracted_residues[atom.residue.name] = {}
extracted_residues[atom.residue.name][atom.name] = atom.type
return extracted_residues, res_top | 5,353,241 |
def validate_model_present(program_name, optional_arg_map):
"""
Determine if the model file was passed separately or requires extraction from the archive.
If the model is in the archive, extract it to the temporary model location, and set that file as the
MODEL_FILE_SWITCH argument.
The MODEL_FILE_SWITCH value may be specified as multiple comma-separated models.
:param program_name: the name of the calling program, for logging
:param optional_arg_map: the optional arguments from the command line
:raises CLAException: if the specified model is not an existing file, or the model is not found in the archive,
or the model is not found from either argument
"""
_method_name = 'validate_model_present'
global __tmp_model_dir
if CommandLineArgUtil.MODEL_FILE_SWITCH in optional_arg_map:
model_file_value = optional_arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH]
model_files = cla_utils.get_model_files(model_file_value)
for model_file in model_files:
try:
FileUtils.validateExistingFile(model_file)
except IllegalArgumentException, iae:
ex = exception_helper.create_cla_exception('WLSDPLY-20006', program_name, model_file,
iae.getLocalizedMessage(), error=iae)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
elif CommandLineArgUtil.ARCHIVE_FILE_SWITCH in optional_arg_map:
archive_file_name = optional_arg_map[CommandLineArgUtil.ARCHIVE_FILE_SWITCH]
try:
archive_file = WLSDeployArchive(archive_file_name)
__tmp_model_dir = FileUtils.createTempDirectory(program_name)
tmp_model_raw_file = archive_file.extractModel(__tmp_model_dir)
if not tmp_model_raw_file:
ex = exception_helper.create_cla_exception('WLSDPLY-20026', program_name, archive_file_name,
CommandLineArgUtil.MODEL_FILE_SWITCH)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
model_file_name = FileUtils.fixupFileSeparatorsForJython(tmp_model_raw_file.getAbsolutePath())
except (IllegalArgumentException, IllegalStateException, WLSDeployArchiveIOException), archex:
ex = exception_helper.create_cla_exception('WLSDPLY-20010', program_name, archive_file_name,
archex.getLocalizedMessage(), error=archex)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
optional_arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name
else:
ex = exception_helper.create_cla_exception('WLSDPLY-20015', program_name,
CommandLineArgUtil.MODEL_FILE_SWITCH,
CommandLineArgUtil.ARCHIVE_FILE_SWITCH)
ex.setExitCode(CommandLineArgUtil.USAGE_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
return | 5,353,242 |
def issetiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a set iterator.
Parameters
----------
object: Any
The object to see if it's a set iterator.
Returns
-------
bool
Whether the given object is a set iterator.
"""
if not isiterable(object):
return False
return isinstance(object, SetIteratorType) | 5,353,243 |
def temporal_discretization(Y, method='ups_downs', kwargs={}):
"""This function acts as a switcher for temporal discretizations and wraps
all the functions which carry out discretization over time of time-series.
Parameters
----------
Y : array_like, shape (N, M)
the signal of each element of the system. M time-series.
method: str
method used for performing temporal discretization.
kwargs: dict
required arguments for the method choosen.
Returns
-------
Yt : array_like, shape (N, M)
discretized activation matrix.
TODO
----
More than one descriptor.
"""
if method == 'ups_downs':
Yt = ups_downs_temporal_discretization_matrix(Y, **kwargs)
elif method not in ['ups_downs']:
pass
return Yt | 5,353,244 |
def delete(client: ModelTrainingClient, train_id: str, file: str, ignore_not_found: bool):
"""
Delete a training.\n
For this command, you must provide a training ID or path to file with one training.
The file must contain only one training.
If you want to delete multiples trainings than you should use "legionctl res delete" instead.
For now, CLI supports yaml and JSON file formats.
The command will be failed if you provide both arguments.\n
Usage example:\n
* legionctl train delete --id examples-git\n
* legionctl train delete -f train.yaml
\f
:param client: Model training HTTP client
:param train_id: Model training ID
:param file: Path to the file with only one training
:param ignore_not_found: ignore if Model Training is not found
"""
if not train_id and not file:
raise ValueError(f'You should provide a training ID or file parameter, not both.')
if train_id and file:
raise ValueError(f'You should provide a training ID or file parameter, not both.')
if file:
train = parse_resources_file_with_one_item(file).resource
if not isinstance(train, ModelTraining):
raise ValueError(f'Model training expected, but {type(train)} provided')
train_id = train.id
try:
message = client.delete(train_id)
click.echo(message)
except WrongHttpStatusCode as e:
if e.status_code != 404 or not ignore_not_found:
raise e
click.echo(f'Model training {train_id} was not found. Ignore') | 5,353,245 |
def test_pre_flop_pot(n_players: int, small_blind: int, big_blind: int):
"""Test preflop the state is set up for player 2 to start betting."""
state, pot = _new_game(
n_players=n_players, small_blind=small_blind, big_blind=big_blind,
)
n_bet_chips = sum(p.n_bet_chips for p in state.players)
target = small_blind + big_blind
assert state.player_i == 0 if n_players == 2 else 2
assert state.betting_stage == "pre_flop"
assert (
n_bet_chips == target
), f"small and big blind have not bet! {n_bet_chips} == {target}"
assert (
n_bet_chips == pot.total
), f"small and big blind have are not in pot! {n_bet_chips} == {pot.total}" | 5,353,246 |
def plotModeScatter( pc , xMode=0, yMode=1, zMode=None, pointLabels=None, nTailLabels=3, classes=None):
"""
scatter plot mode projections for up to 3 different modes.
PointLabels is a list of strings corresponding to each shape.
nTailLabels defines number of points that are labelled at the tails of the distributions,
can be 'all' to label all points. Point labels are for 2D plots only.
"""
xWeights = pc.projectedWeights[xMode]
yWeights = pc.projectedWeights[yMode]
colourMap = mpl.cm.gray
if classes==None:
c = 'r'
else:
c = classes
if zMode == None:
fig = plot.figure()
ax = fig.add_subplot(111)
plt = ax.scatter(xWeights,yWeights, c=c, marker='o', cmap=colourMap)
ax.set_title('Scatter: Mode %d vs Mode %d'%(xMode, yMode))
ax.set_xlabel('Mode %d'%(xMode))
ax.set_ylabel('Mode %d'%(yMode))
if pointLabels!=None:
if nTailLabels=='all':
for label, x, y in zip(pointLabels, xWeights, yWeights):
plot.annotate( label, xy=(x,y), xytext=(-5, 5),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
elif isinstance(nTailLabels, int):
# sort weights
xSortedArgs = scipy.argsort(xWeights)
ySortedArgs = scipy.argsort(yWeights)
# label x tails
for i in xSortedArgs[:nTailLabels]:
plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
for i in xSortedArgs[-nTailLabels:]:
plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
# label y tails
for i in ySortedArgs[:nTailLabels]:
plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
for i in ySortedArgs[-nTailLabels:]:
plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
else:
raise ValueError, "nTailLabels must be 'all' or an integer"
plot.show()
else:
from mpl_toolkits.mplot3d import Axes3D
fig = plot.figure()
zWeights = pc.projectedWeights[zMode]
ax = fig.add_subplot(111, projection='3d')
plt = ax.scatter(xWeights,yWeights, zWeights, c =c, marker='o', cmap=colourMap)
ax.set_title('3D Scatter')
ax.set_xlabel('Mode %d'%(xMode))
ax.set_ylabel('Mode %d'%(yMode))
ax.set_zlabel('Mode %d'%(zMode))
plot.show()
return fig, plt | 5,353,247 |
def log_benchmarks(benchmarks):
"""Record all the benchmarks in the log"""
log.debug('Benchmarks')
log.debug('==========')
for bench in benchmarks:
log.debug(bench)
log.debug('') | 5,353,248 |
def cron_worker(request):
"""Parse JSON/request arguments and start ingest for a single date export"""
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'image' in request_json:
image_id = request_json['image']
elif request_args and 'image' in request_args:
image_id = request_args['image']
else:
abort(400, description='"image" parameter not set')
# TODO: Add additional image ID format checking
if not re.match('L[TEC]0[4578]_\d{6}_\d{8}', image_id.split('/')[-1], re.I):
abort(400, description=f'Image ID {image_id} could not be parsed')
elif not re.match('LANDSAT/L[TEC]0[4578]/C0[12]/T1\w+', image_id, re.I):
abort(400, description=f'Image ID {image_id} could not be parsed')
if request_json and 'overwrite' in request_json:
overwrite_flag = request_json['overwrite']
elif request_args and 'overwrite' in request_args:
overwrite_flag = request_args['overwrite']
else:
overwrite_flag = 'true'
if overwrite_flag.lower() in ['true', 't']:
overwrite_flag = True
elif overwrite_flag.lower() in ['false', 'f']:
overwrite_flag = False
else:
abort(400, description=f'overwrite="{overwrite_flag}" could not be parsed')
response = tcorr_gridded_asset_ingest(
image_id=image_id, gee_key_file=GEE_KEY_FILE,
overwrite_flag=overwrite_flag)
return Response(response, mimetype='text/plain') | 5,353,249 |
def test_debug():
"""Log an debug message"""
print("This message is a %s" % colors.debug("debug message.")) | 5,353,250 |
def test_r1t6(capsys):
"""Check that you cannot transfer between accounts before logging in
Arguments:
capsys -- object created by pytest to capture stdout and stderr
"""
helper(
capsys=capsys,
terminal_input=['transfer', 'login', 'atm', 'logout', 'No'],
intput_valid_accounts=['1234568'],
expected_tail_of_terminal_output=['Thank you for using Quinterac, have a nice day!'],
expected_output_transactions=['EOS 0000000 000 0000000 ***']
) | 5,353,251 |
def calculatetm(seq):
""" Calculate Tm of a target candidate, nearest neighbor model """
NNlist = chopseq(seq, 2, 1)
NNtable = ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT']
NNendtable = ['A', 'C', 'G', 'T']
NNcount = np.zeros(16)
NNend = np.zeros(4)
for c, NN in enumerate(NNtable):
NNcount[c] = NNlist.count(NN)
for c, NN in enumerate(NNendtable):
NNend[c] = seq[0].count(NN)
# numbers below from Sugimoto et al. NAR (1996)
NNEnthalpy = np.array([-8.0, -9.4, -6.6, -5.6, -8.2, -10.9, -11.8, -6.6, -8.8, -10.5, -10.9, -9.4, -6.6, -8.8, -8.2, -8.0])
NNEntropy = np.array([-21.9, -25.5, -16.4, -15.2, -21.0, -28.4, -29.0, -16.4, -23.5, -26.4, -28.4, -25.5, -18.4, -23.5, -21.0, -21.9])
NNendEnthalpy = np.array([.6, .6, .6, .6])
NNendEntropy = np.array([-9.0, -9.0, -9.0, -9.0])
sumEnthalpy = np.sum(np.multiply(NNcount, NNEnthalpy)) + np.sum(np.multiply(NNend, NNendEnthalpy))
sumEntropy = np.sum(np.multiply(NNcount, NNEntropy)) + np.sum(np.multiply(NNend, NNendEntropy))
Tm = (sumEnthalpy * 1000)/(sumEntropy + (1.9872 * math.log(1e-7))) - 273.15 # oligo concentration: 1e-7 M
sumSalt = 0.075 + (3.795 * 0.01**0.5) # monovalent: 0.075 M, bivalent: 0.01 M
Tm += 16.6 * math.log10(sumSalt) # salt correction
Tm -= 0.72 * 20 # formamide correction
return Tm | 5,353,252 |
def share_article_to_group(user, base_list_id, article_id, group_id, target_list_id):
"""
@api {post} /user/list/:id/article/:id/share/group/:id/list/:id Share a article to group list.
@apiName Share a article into a group list.
@apiGroup Share
@apiUse AuthorizationTokenHeader
@apiUse UnauthorizedAccessError
@apiUse ResourceDoesNotExist
"""
app.logger.info('User {} Access {}'.format(user, request.full_path))
result = MongoUtil.share_article_to_group_list(user, base_list_id, article_id, group_id, target_list_id)
if isinstance(result, str):
app.logger.debug(result)
return ResponseUtil.error_response(result)
app.logger.info('User {} share article {} to group {}'.format(user, article_id, group_id))
return jsonify(msg='Success') | 5,353,253 |
def _patch_command_processing(command_plugin):
"""
Patches the command processing functionality to work with promises.
:param command_plugin: command plugin to modify.
:return:
"""
if hasattr(command_plugin.__class__, '_old_process_command_response'):
logger.debug('Already patched')
return
command_plugin.__class__._old_process_command_response = command_plugin.__class__._process_command_response
def new_command_response(self, iq, session):
promise_or_session = session
# Can assume that the promise_or_session is a promise.
if hasattr(promise_or_session, 'then'):
logger.debug('Handling a promise')
def promise_handler(_session):
logger.debug('Promise resolved')
self._old_process_command_response(iq, _session)
promise_or_session.then(promise_handler)
else:
logger.debug('Handling default path')
self._old_process_command_response(iq, promise_or_session)
command_plugin.__class__._process_command_response = new_command_response
logger.debug('Patching the command processing plugin') | 5,353,254 |
def get_data_shape(X_train, X_test, X_val=None):
"""
Creates, updates and returns data_dict containing metadata of the dataset
"""
# Creates data_dict
data_dict = {}
# Updates data_dict with lenght of training, test, validation sets
train_len = len(X_train)
test_len = len(X_test)
data_dict.update({'train_len': train_len, 'test_len': test_len})
if X_val is not None:
val_len = len(X_val)
data_dict.update({'val_len': val_len})
# else : val_len = None
# Updates number of dimensions of data
no_of_dim = X_train.ndim
data_dict.update({'no_of_dim': no_of_dim})
# Updates number of features(, number of channels, width, height)
if no_of_dim == 2:
no_of_features = X_train.shape[1]
data_dict.update({'no_of_features': no_of_features})
elif no_of_dim == 3:
channels = X_train.shape[1]
features_per_c = X_train.shape[2]
no_of_features = channels * features_per_c
data_dict.update({'no_of_features': no_of_features,
'channels': channels,
'features_per_c': features_per_c})
elif no_of_dim == 4:
channels = X_train.shape[1]
height = X_train.shape[2]
width = X_train.shape[3]
features_per_c = height*width
no_of_features = channels*features_per_c
data_dict.update({'height':height, 'width':width, 'channels':channels,
'features_per_c':features_per_c,
'no_of_features':no_of_features})
return data_dict | 5,353,255 |
def setupAnnotations(context):
"""
set up the annotations if they haven't been set up
already. The rest of the functions in here assume that
this has already been set up
"""
annotations = IAnnotations(context)
if not FAVBY in annotations:
annotations[FAVBY] = PersistentList()
return annotations | 5,353,256 |
def _get_media(media_types):
"""Helper method to map the media types."""
get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x]
if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None)
return list(map(get_mapped_media, media_types)) | 5,353,257 |
def get_int_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
number = ctypes.c_uint16()
if CFContainer:
output = cf.CFNumberGetValue(CFContainer, 2, ctypes.byref(number))
return number.value | 5,353,258 |
def _get_unique_barcode_ids(pb_index, isoseq_mode=False):
"""
Get a list of sorted, unique fw/rev barcode indices from an index object.
"""
bc_sel = (pb_index.bcForward != -1) & (pb_index.bcReverse != -1)
bcFw = pb_index.bcForward[bc_sel]
bcRev = pb_index.bcReverse[bc_sel]
bc_ids = sorted(list(set(zip(bcFw, bcRev))))
if isoseq_mode:
bc_ids = sorted(list(set([tuple(sorted(bc)) for bc in bc_ids])))
return bc_ids | 5,353,259 |
def AddCommonArguments(parser):
"""Adds arguments common to parsers.
Args:
parser: ArgumentParser object, used to parse flags.
"""
parser.add_argument("--email",
type=str,
dest="email",
help="Email account to use for authentcation.")
parser.add_argument(
"--config_file",
type=str,
dest="config_file",
default=DEFAULT_CONFIG_FILE,
help="Path to the config file, default to acloud.config"
"in the current working directory")
parser.add_argument("--report_file",
type=str,
dest="report_file",
default=None,
help="Dump the report this file in json format. "
"If not specified, just log the report")
parser.add_argument("--log_file",
dest="log_file",
type=str,
default=None,
help="Path to log file.")
parser.add_argument("-v",
dest="verbose",
action="store_true",
default=False,
help="Verbose mode")
parser.add_argument("-vv",
dest="very_verbose",
action="store_true",
default=False,
help="Very verbose mode") | 5,353,260 |
def tests(session_: Session, django: str) -> None:
"""Run the test suite."""
requirements = Path("requirements.txt")
session_.run(
"poetry",
"export",
f"-o{requirements}",
"--dev",
"--without-hashes",
external=True,
)
session_.install(f"-r{requirements}")
session_.install(f"django=={django}")
session_.run("python", "-m", "pytest")
requirements.unlink()
try:
session_.run("coverage", "run", "--parallel", "-m", "pytest", *session_.posargs)
finally:
if session_.interactive:
session_.notify("coverage") | 5,353,261 |
def clean_logs(test_yaml, args):
"""Remove the test log files on each test host.
Args:
test_yaml (str): yaml file containing host names
args (argparse.Namespace): command line arguments for this program
"""
# Use the default server yaml and then the test yaml to update the default
# DAOS log file locations. This should simulate how the test defines which
# log files it will use when it is run.
log_files = get_log_files(test_yaml, get_log_files(BASE_LOG_FILE_YAML))
host_list = get_hosts_from_yaml(test_yaml, args)
command = "sudo rm -fr {}".format(" ".join(log_files.values()))
print("Cleaning logs on {}".format(host_list))
if not spawn_commands(host_list, command):
print("Error cleaning logs, aborting")
return False
return True | 5,353,262 |
def call_posterior_haplotypes(posteriors, threshold=0.01):
"""Call haplotype alleles for VCF output from a population
of genotype posterior distributions.
Parameters
----------
posteriors : list, PosteriorGenotypeDistribution
A list of individual genotype posteriors.
threshold : float
Minimum required posterior probability of occurrence
with in any individual for a haplotype to be included.
Returns
-------
haplotypes : ndarray, int, shape, (n_haplotypes, n_base)
VCF sorted haplotype arrays.
"""
# maps of bytes to arrays and bytes to sum probs
haplotype_arrays = {}
haplotype_values = {}
# iterate through genotype posterors
for post in posteriors:
# include haps based on probability of occurrence
(
haps,
probs,
) = post.allele_occurrence()
_, weights = post.allele_frequencies(dosage=True)
idx = probs >= threshold
# order haps based on weighted prob
haps = haps[idx]
weights = weights[idx]
for h, w in zip(haps, weights):
b = h.tobytes()
if b not in haplotype_arrays:
haplotype_arrays[b] = h
haplotype_values[b] = 0
haplotype_values[b] += w
# remove reference allele if present
refbytes = None
for b, h in haplotype_arrays.items():
if np.all(h == 0):
# ref allele
refbytes = b
if refbytes is not None:
haplotype_arrays.pop(refbytes)
haplotype_values.pop(refbytes)
# combine all called haplotypes into array
n_alleles = len(haplotype_arrays) + 1
n_base = posteriors[0].genotypes.shape[-1]
haplotypes = np.full((n_alleles, n_base), -1, np.int8)
values = np.full(n_alleles, -1, float)
for i, (b, h) in enumerate(haplotype_arrays.items()):
p = haplotype_values[b]
haplotypes[i] = h
values[i] = p
haplotypes[-1][:] = 0 # ref allele
values[-1] = values.max() + 1
order = np.flip(np.argsort(values))
return haplotypes[order] | 5,353,263 |
def to_linprog(x, y, xy_dist) -> LinProg:
"""
Parameters
----------
x : ndarray
1 - dimensional array of weights
y : ndarray
1 - dimensional array of weights
xy_dist : ndarray
2 - dimensional array containing distances between x and y density coordinates
Returns
-------
LinProg
This was sometimes flaking out when called with single-precision matrices
because of numerical instability in the scipy _presolve step when eliminating
redundant constraints, so ensure sufficient precision
TODO: use sparse A_eq, A_ub matrices
"""
# constant used in scipy.optimize._remove_redundancy
tol = 1e-8
assert np.abs(x.sum() - y.sum()) < tol, "x and y must be close to avoid instability"
assert xy_dist.shape[0] == x.shape[0]
assert xy_dist.shape[1] == y.shape[0]
x_dim = x.shape[0]
y_dim = y.shape[0]
c = xy_dist.flatten()
A_eq = []
b_eq = []
for i in range(x_dim):
constraint = np.zeros(xy_dist.shape)
constraint[i] = 1.0
A_eq.append(constraint.flatten())
b_eq.append(x[i])
for i in range(y_dim):
constraint = np.zeros(xy_dist.shape)
constraint[:, i] = 1.0
A_eq.append(constraint.flatten())
b_eq.append(y[i])
A_ub = np.diag(-np.ones(x_dim * y_dim))
b_ub = np.zeros(x_dim * y_dim)
return LinProg(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=np.array(A_eq), b_eq=np.array(b_eq)) | 5,353,264 |
def get_discorded_labels():
"""
Get videos with citizen discorded labels
Partial labels will only be set by citizens
"""
return get_video_labels(discorded_labels) | 5,353,265 |
def set_common_tags(span: object, result: object):
"""Function used to set a series of common tags
to a span object"""
if not isinstance(result, dict):
return span
for key, val in result.items():
if key.lower() in common_tags:
span.set_tag(key, val)
return span | 5,353,266 |
def local_timezone():
"""
Returns:
(str): Name of current local timezone
"""
try:
return time.tzname[0]
except (IndexError, TypeError):
return "" | 5,353,267 |
def ask_credentials():
"""Interactive function asking the user for ASF credentials
:return: tuple of username and password
:rtype: tuple
"""
# SciHub account details (will be asked by execution)
print(
" If you do not have a ASF/NASA Earthdata user account"
" go to: https://search.asf.alaska.edu/ and register"
)
uname = input(" Your ASF/NASA Earthdata Username:")
pword = getpass.getpass(" Your ASF/NASA Earthdata Password:")
return uname, pword | 5,353,268 |
def lab2lch(lab):
"""CIE-LAB to CIE-LCH color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lab : array_like
The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LCH format, in a N-D array with same shape as input `lab`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, a, b).
Notes
-----
The Hue is expressed as an angle between ``(0, 2*pi)``
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lab2lch
>>> img = data.astronaut()
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
"""
lch = _prepare_lab_array(lab)
a, b = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = _cart2polar_2pi(a, b)
return lch | 5,353,269 |
def get_valid_principal_commitments(principal_id=None, consumer_id=None):
"""
Returns the list of valid commitments for the specified principal (org or actor.
If optional consumer_id (actor) is supplied, then filtered by consumer_id
"""
log.debug("Finding commitments for principal: %s", principal_id)
if principal_id is None:
return None
try:
gov_controller = bootstrap.container_instance.governance_controller
commitments, _ = gov_controller.rr.find_objects(principal_id, PRED.hasCommitment, RT.Commitment, id_only=False)
if not commitments:
return None
cur_time = get_ion_ts_millis()
commitment_list = [com for com in commitments if (consumer_id == None or com.consumer == consumer_id) and \
(int(com.expiration) == 0 or (int(com.expiration) > 0 and cur_time < int(com.expiration)))]
if commitment_list:
return commitment_list
except Exception:
log.exception("Could not determine actor resource commitments")
return None | 5,353,270 |
def callattice(twotheta, energy_kev=17.794, hkl=(1, 0, 0)):
"""
Calculate cubic lattice parameter, a from reflection two-theta
:param twotheta: Bragg angle, deg
:param energy_kev: energy in keV
:param hkl: reflection (cubic only
:return: float, lattice contant
"""
qmag = calqmag(twotheta, energy_kev)
dspace = q2dspace(qmag)
return dspace * np.sqrt(np.sum(np.square(hkl))) | 5,353,271 |
def reset_password(
*,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_active_user),
background_tasks: BackgroundTasks,
):
"""reset current user password"""
email = current_user.email
# send confirm email
if settings.EMAILS_ENABLED and email:
confirm_token = create_access_token(
subject=email, expires_delta=timedelta(settings.EMAIL_CONFIRM_TOKEN_EXPIRE)
)
background_tasks.add_task(
send_reset_password_email, email_to=email, token=confirm_token
)
return {"msg": "Password reset email sent"} | 5,353,272 |
def rule_valid_histone_target(attr):
""" {
"applies" : ["ChIP-Seq", "experiment_target_histone"],
"description" : "'experiment_target_histone' attributes must be 'NA' only for ChIP-Seq Input"
} """
histone = attr.get('experiment_target_histone', [''])[0]
if attr.get('experiment_type', [""])[0].lower() in ['ChIP-Seq Input'.lower()]:
return histone == 'NA'
else:
return histone != 'NA' | 5,353,273 |
def extend(curve: CustomCurve, deg):
"""returns curve over the deg-th relative extension"""
E = curve.EC
q = curve.q
K = curve.field
if q % 2 != 0:
R = K["x"]
pol = R.irreducible_element(deg)
Fext = GF(q ** deg, name="z", modulus=pol)
return E.base_extend(Fext)
charac = K.characteristic()
R = GF(charac)["x"]
ext_deg = q ** deg
pol = R.irreducible_element(deg * ZZ(log(q, charac)))
Kext = GF(ext_deg, name="ex", modulus=pol)
gKext = Kext.gen()
h = gKext ** ((ext_deg - 1) // (q - 1))
assert charac ** (h.minpoly().degree()) == q
H = GF(q, name="h", modulus=h.minpoly())
inclusion = H.hom([h])
new_coefficients = [
inclusion(stupid_coerce_K_to_L(a, K, H)) for a in E.a_invariants()
]
EE = EllipticCurve(Kext, new_coefficients)
return EE | 5,353,274 |
def flatten3D(inputs: tf.Tensor) -> tf.Tensor:
"""
Flatten the given ``inputs`` tensor to 3 dimensions.
:param inputs: >=3d tensor to be flattened
:return: 3d flatten tensor
"""
shape = inputs.get_shape().as_list()
if len(shape) == 3:
return inputs
assert len(shape) > 3
return tf.reshape(inputs, [tf.shape(inputs)[0], tf.shape(inputs)[1], np.prod(inputs.get_shape().as_list()[2:])]) | 5,353,275 |
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels | 5,353,276 |
def split_exclude_string(people):
"""
Function to split a given text of persons' name who wants to exclude
with comma separated for each name e.g. ``Konrad, Titipat``
"""
people = people.replace('Mentor: ', '').replace('Lab-mates: ', '').replace('\r\n', ',').replace(';', ',')
people_list = people.split(',')
return [p.strip() for p in people_list if p.strip() is not ''] | 5,353,277 |
def now(tz=DEFAULT_TZ):
"""
Get the current datetime.
:param tz: The preferred time-zone, defaults to DEFAULT_TZ
:type tz: TzInfo (or similar pytz time-zone)
:return: A time-zone aware datetime set to now
:rtype: datetime
"""
return datetime.now(tz=tz) | 5,353,278 |
def egd_add_metatags(context):
"""Override `Web Page` Doctype template if CMS page."""
if is_app_for_actual_site():
# Allow CMS webpages
if context.doctype == "Web Page":
context.template = "templates/web.html"
# Override metatags
if not "metatags" in context:
context.metatags = frappe._dict({})
context.metatags["lang"] = frappe.local.lang
context.metatags["url"] = context.url
context.metatags["og:url"] = context.url
# If blog image or no default use the "summary_large_image" value
if "image" in context.metatags and context.metatags["image"]:
context.metatags["twitter:card"] = "summary_large_image"
else:
context.metatags["image"] = frappe.utils.get_url() + "/assets/egd_site/images/logo-square.png"
context.metatags["twitter:card"] = "summary"
if not "title" in context.metatags:
context.metatags["title"] = ""
if "meta_title" in context:
context.metatags["title"] = context["meta_title"]
elif context.title:
context.metatags["title"] = context.title
# Add title suffix except for home
if context["path"] != "":
from frappe import _
context.metatags["title"] += " | " + _("hero:title")
if not "description" in context.metatags and "meta_description" in context:
context.metatags["description"] = context["meta_description"]
frappe_add_metatags(context) | 5,353,279 |
def MultMat(dest, A, B):
""" Multiply two matrices together. Store result in "dest".
"""
I = len(A)
J = len(B[0])
K = len(B) # or len(A[0])
for i in range(0,I):
for j in range(0,J):
dest[i][j] = 0.0
for k in range(0,K):
dest[i][j] += A[i][k] * B[k][j] | 5,353,280 |
def peek_with_kwargs(init, args=[], permissive=False):
"""
Make datatypes passing keyworded arguments to the constructor.
This is a factory function; returns the actual `peek` routine.
Arguments:
init (callable): type constructor.
args (iterable): arguments NOT to be keyworded; order does matter.
permissive (bool): missing positional arguments are set to None (*new in 0.8.5*).
Returns:
callable: deserializer (`peek` routine).
All the peeked attributes that are not referenced in `args` are passed to `init` as
keyworded arguments.
"""
if permissive:
def try_peek(store, attr, container, _stack=None):
try:
return store.peek(attr, container, _stack=_stack)
except KeyError:
return None
def peek(store, container, _stack=None):
return init(\
*[ try_peek(store, attr, container, _stack) for attr in args ], \
**dict([ (attr, store.peek(attr, container, _stack=_stack)) \
for attr in container if attr not in args ]))
else:
def peek(store, container, _stack=None):
return init(\
*[ store.peek(attr, container, _stack=_stack) for attr in args ], \
**dict([ (attr, store.peek(attr, container, _stack=_stack)) \
for attr in container if attr not in args ]))
return peek | 5,353,281 |
def idc_asset_manage(request,aid=None,action=None):
"""
Manage IDC
"""
if request.user.has_perms(['asset.view_asset', 'asset.edit_asset']):
page_name = ''
if aid:
idc_list = get_object_or_404(IdcAsset, pk=aid)
if action == 'edit':
page_name = '编辑IDC机房'
if action == 'delete':
idc_list.delete()
return redirect('idc_asset_list')
else:
idc_list = IdcAsset()
action = 'add'
page_name = '新增IDC机房'
if request.method == 'POST':
form = IdcAssetForm(request.POST,instance=idc_list)
if form.is_valid():
if action == 'add':
form.save()
return redirect('idc_asset_list')
if action == 'edit':
form.save()
return redirect('idc_asset_list')
else:
form = IdcAssetForm(instance=idc_list)
return render(request, 'asset_idc_manage.html', {"form":form, "page_name":page_name, "action":action})
else:
raise Http404 | 5,353,282 |
def set_justspeaklasttime(speackdata):
"""
Adds a warning to user
"""
data_file_path = os.getcwd() + '/just_speack_data.json'
if not os.path.exists(data_file_path):
with open(data_file_path, 'w', encoding='UTF-8') as data_file:
data_file.write(json.dumps({}))
data_file.close()
with open(data_file_path, 'r', encoding='UTF-8') as data_file:
justspeackdata = json.loads(data_file.read())
data_file.close()
justspeackdata[speackdata]['pause_last'] = time.time()
with open(data_file_path, 'w', encoding='UTF-8') as data_file:
data_file.write(json.dumps(users_data))
data_file.close()
return True | 5,353,283 |
def get_detected_column_types(df):
""" Get data type of each columns ('DATETIME', 'NUMERIC' or 'STRING')
Parameters:
df (df): pandas dataframe
Returns
df (df): dataframe that all datatypes are converted (df)
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
for c in df.columns:
# Convert column to string
col_data = df[c].map(str)
col_data = col_data.replace("NaT", None)
col_data = col_data.replace("NaN", None)
# Check NULL column
if(df[c].isnull().values.all()):
continue
# Check DATETIME
try:
# Check if it's able to convert column to datetime
# if column is datetime, then skip to convert
if 'datetime' in str(col_data.dtype):
continue
df[c] = pd.to_datetime(col_data)
continue
except ValueError:
pass
# Check NUMERIC
try:
# Drop NaN rows
series = df[c].dropna()
# if column_name is int or float, then skip to convert
if 'int' in str(col_data.dtype) or 'float' in str(col_data.dtype):
continue
# Check if it can be converted to numeric
df[c] = pd.to_numeric(series)
except ValueError:
pass
return df | 5,353,284 |
def set_bin_path():
""" Sets the juju binary path
"""
candidates = [
'/snap/bin/juju',
'/snap/bin/conjure-up.juju',
'/usr/bin/juju',
'/usr/local/bin/juju',
]
_check_bin_candidates(candidates, 'bin_path')
# Update $PATH so that we make sure this candidate is used
# first.
app.env['PATH'] = "{}:{}".format(Path(app.juju.bin_path).parent,
app.env['PATH']) | 5,353,285 |
def distance(coords):
"""Calculates the distance of a path between multiple points
Arguments:
coords -- List of coordinates, e.g. [(0,0), (1,1)]
Returns: Total distance as a float
"""
distance = 0
for p1, p2 in zip(coords[:-1], coords[1:]):
distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
return distance | 5,353,286 |
def roll_dice(dicenum, dicetype, modifier=None, conditional=None, return_tuple=False):
"""
This is a standard dice roller.
Args:
dicenum (int): Number of dice to roll (the result to be added).
dicetype (int): Number of sides of the dice to be rolled.
modifier (tuple): A tuple `(operator, value)`, where operator is
one of `"+"`, `"-"`, `"/"` or `"*"`. The result of the dice
roll(s) will be modified by this value.
conditional (tuple): A tuple `(conditional, value)`, where
conditional is one of `"=="`,`"<"`,`">"`,`">="`,`"<=`" or "`!=`".
This allows the roller to directly return a result depending
on if the conditional was passed or not.
return_tuple (bool): Return a tuple with all individual roll
results or not.
Returns:
roll_result (int): The result of the roll + modifiers. This is the
default return.
condition_result (bool): A True/False value returned if `conditional`
is set but not `return_tuple`. This effectively hides the result
of the roll.
full_result (tuple): If, return_tuple` is `True`, instead
return a tuple `(result, outcome, diff, rolls)`. Here,
`result` is the normal result of the roll + modifiers.
`outcome` and `diff` are the boolean result of the roll and
absolute difference to the `conditional` input; they will
be will be `None` if `conditional` is not set. `rolls` is
itself a tuple holding all the individual rolls in the case of
multiple die-rolls.
Raises:
TypeError if non-supported modifiers or conditionals are given.
Notes:
All input numbers are converted to integers.
Examples:
print roll_dice(2, 6) # 2d6
<<< 7
print roll_dice(1, 100, ('+', 5) # 1d100 + 5
<<< 34
print roll_dice(1, 20, conditional=('<', 10) # let'say we roll 3
<<< True
print roll_dice(3, 10, return_tuple=True)
<<< (11, None, None, (2, 5, 4))
print roll_dice(2, 20, ('-', 2), conditional=('>=', 10), return_tuple=True)
<<< (8, False, 2, (4, 6)) # roll was 4 + 6 - 2 = 8
"""
dicenum = int(dicenum)
dicetype = int(dicetype)
# roll all dice, remembering each roll
rolls = tuple([randint(1, dicetype) for roll in range(dicenum)])
result = sum(rolls)
if modifier:
# make sure to check types well before eval
mod, modvalue = modifier
if mod not in ('+', '-', '*', '/'):
raise TypeError("Non-supported dice modifier: %s" % mod)
modvalue = int(modvalue) # for safety
result = eval("%s %s %s" % (result, mod, modvalue))
outcome, diff = None, None
if conditional:
# make sure to check types well before eval
cond, condvalue = conditional
if cond not in ('>', '<', '>=', '<=', '!=', '=='):
raise TypeError("Non-supported dice result conditional: %s" % conditional)
condvalue = int(condvalue) # for safety
outcome = eval("%s %s %s" % (result, cond, condvalue)) # True/False
diff = abs(result - condvalue)
if return_tuple:
return result, outcome, diff, rolls
else:
if conditional:
return outcome
else:
return result | 5,353,287 |
def build_central_hierarchical_histogram_computation(
lower_bound: float,
upper_bound: float,
num_bins: int,
arity: int = 2,
max_records_per_user: int = 1,
epsilon: float = 1,
delta: float = 1e-5,
secure_sum: bool = False):
"""Create the tff federated computation for central hierarchical histogram aggregation.
Args:
lower_bound: A `float` specifying the lower bound of the data range.
upper_bound: A `float` specifying the upper bound of the data range.
num_bins: The integer number of bins to compute.
arity: The branching factor of the tree. Defaults to 2.
max_records_per_user: The maximum number of records each user is allowed to
contribute. Defaults to 1.
epsilon: Differential privacy parameter. Defaults to 1.
delta: Differential privacy parameter. Defaults to 1e-5.
secure_sum: A boolean deciding whether to use secure aggregation. Defaults
to `False`.
Returns:
A tff.federated_computation function to perform central tree aggregation.
"""
if upper_bound < lower_bound:
raise ValueError(f'upper_bound: {upper_bound} is smaller than '
f'lower_bound: {lower_bound}.')
if num_bins <= 0:
raise ValueError(f'num_bins: {num_bins} smaller or equal to zero.')
if arity < 2:
raise ValueError(f'Arity should be at least 2.' f'arity={arity} is given.')
if max_records_per_user < 1:
raise ValueError(f'Maximum records per user should be at least 1. '
f'max_records_per_user={max_records_per_user} is given.')
if epsilon < 0 or delta < 0 or delta > 1:
raise ValueError(f'Privacy parameters in wrong range: '
f'(epsilon, delta): ({epsilon}, {delta})')
if epsilon == 0.:
stddev = 0.
else:
stddev = max_records_per_user * _find_noise_multiplier(
epsilon, delta, steps=math.ceil(math.log(num_bins, arity)))
central_tree_aggregation_factory = hierarchical_histogram_factory.create_central_hierarchical_histogram_factory(
stddev, arity, max_records_per_user, secure_sum=secure_sum)
return _build_hierarchical_histogram_computation(
lower_bound, upper_bound, num_bins, central_tree_aggregation_factory) | 5,353,288 |
def initialise_halo_params():
"""Initialise the basic parameters needed to simulate a forming Dark matter halo.
Args:
None
Returns:
G: gravitational constant.
epsilon: softening parameter.
limit: width of the simulated universe.
radius: simulated radius of each particle
(for proper handling of boundary conditions).
num_pos_particles: number of positive mass particles.
num_neg_particles: number of negative mass particles.
chunks_value: dask chunks value.
time_steps: number of time steps to simulate.
"""
G = 1.0
epsilon = 0.07
limit = 80000
radius = 4
num_pos_particles = 5000
num_neg_particles = 45000
chunks_value = (num_pos_particles+num_neg_particles)/5.0
time_steps = 1000
return G, epsilon, limit, radius, num_pos_particles, num_neg_particles, chunks_value, time_steps | 5,353,289 |
def read_data(image_paths, label_list, image_size, batch_size, max_nrof_epochs, num_threads, shuffle, random_flip,
random_brightness, random_contrast):
"""
Creates Tensorflow Queue to batch load images. Applies transformations to images as they are loaded.
:param random_brightness:
:param random_flip:
:param image_paths: image paths to load
:param label_list: class labels for image paths
:param image_size: size to resize images to
:param batch_size: num of images to load in batch
:param max_nrof_epochs: total number of epochs to read through image list
:param num_threads: num threads to use
:param shuffle: Shuffle images
:param random_flip: Random Flip image
:param random_brightness: Apply random brightness transform to image
:param random_contrast: Apply random contrast transform to image
:return: images and labels of batch_size
"""
images = ops.convert_to_tensor(image_paths, dtype=tf.string)
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
# Makes an input queue
input_queue = tf.train.slice_input_producer((images, labels),
num_epochs=max_nrof_epochs, shuffle=shuffle, )
images_labels = []
imgs = []
lbls = []
for _ in range(num_threads):
image, label = read_image_from_disk(filename_to_label_tuple=input_queue)
image = tf.random_crop(image, size=[image_size, image_size, 3])
image.set_shape((image_size, image_size, 3))
image = tf.image.per_image_standardization(image)
if random_flip:
image = tf.image.random_flip_left_right(image)
if random_brightness:
image = tf.image.random_brightness(image, max_delta=0.3)
if random_contrast:
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
imgs.append(image)
lbls.append(label)
images_labels.append([image, label])
image_batch, label_batch = tf.train.batch_join(images_labels,
batch_size=batch_size,
capacity=4 * num_threads,
enqueue_many=False,
allow_smaller_final_batch=True)
return image_batch, label_batch | 5,353,290 |
def test_run_retries():
"""Should retry until a success condition is reached"""
responses = [httpretty.Response(body="Internal Server Error", status=500),
httpretty.Response(body="Internal Server Error", status=500),
httpretty.Response(body="<html></html>", status=200)]
httpretty.register_uri(httpretty.GET, URL, responses=responses)
with mock.patch('httsleep.main.sleep') as mock_sleep:
resp = HttSleeper(URL, {'status_code': 200}).run()
assert mock_sleep.called
assert mock_sleep.call_count == 2
assert resp.status_code == 200
assert resp.text == '<html></html>' | 5,353,291 |
def reorder_matrix (m, d) :
"""
Reorder similarity matrix : put species in same cluster together.
INPUT:
m - similarity matrix
d - medoid dictionary : {medoid : [list of species index in cluster]}
OUTPUT :
m in new order
new_order - order of species indexes in matrix
"""
new_order = []
for i, med_class in enumerate(d.values()):
new_order.append(med_class)
return m[np.concatenate(new_order), :], new_order | 5,353,292 |
def extract_sound(video_filename):
"""Given the name of a video, extract the sound to a .wav file, and return the filename of the new file."""
# Generate a filename for the temporary audio file
with NamedTemporaryFile(suffix='.wav') as tf:
wave_filename = tf.name
# Extract the sound from the video using ffmpeg
subprocess.run(['ffmpeg', '-i', video_filename, '-vn', wave_filename],
check=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
return wave_filename | 5,353,293 |
def init_plugins(config_path: Optional[str] = None) -> None:
"""
init_plugins loads the plugins from the specified config, the path
specified by TORCHX_CONFIG environment variable or the default location
at /etc/torchx/config.yaml.
"""
if not config_path:
config_path = os.getenv(TORCHX_CONFIG_ENV, DEFAULT_TORCHX_CONFIG_PATH)
print(f"config path: {config_path}")
if not os.path.exists(config_path):
return
with open(config_path, "r") as f:
config = yaml.safe_load(f)
init_plugins_from_config(config) | 5,353,294 |
def valid_utility_climate_zone_combos(utility, year):
"""Returns all utility-climate zone combinations"""
utility_cz = get_all_valid_utility_climate_zone_combinations(year, utility)
click.echo(
json.dumps(
utility_cz.groupby("utility")["climate_zone"]
.agg(lambda x: ", ".join(x))
.to_dict(),
indent=2,
)
) | 5,353,295 |
def reward_penalized_log_p(mol):
"""
Reward that consists of log p penalized by SA and # long cycles,
as described in (Kusner et al. 2017). Scores are normalized based on the
statistics of 250k_rndm_zinc_drugs_clean.smi dataset
:param mol: rdkit mol object
:return: float
"""
# normalization constants, statistics from 250k_rndm_zinc_drugs_clean.smi
logP_mean = 2.4570953396190123
logP_std = 1.434324401111988
SA_mean = -3.0525811293166134
SA_std = 0.8335207024513095
cycle_mean = -0.0485696876403053
cycle_std = 0.2860212110245455
log_p = MolLogP(mol)
SA = -calculateScore(mol)
# cycle score
cycle_list = nx.cycle_basis(nx.Graph(
Chem.rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score = -cycle_length
normalized_log_p = (log_p - logP_mean) / logP_std
normalized_SA = (SA - SA_mean) / SA_std
normalized_cycle = (cycle_score - cycle_mean) / cycle_std
return normalized_log_p + normalized_SA + normalized_cycle | 5,353,296 |
def loadmat(filename, variable_names=None):
"""
load mat file from h5py files
:param filename: mat filename
:param variable_names: list of variable names that should be loaded
:return: dictionary of loaded data
"""
data = {}
matfile = h5py.File(filename, 'r')
if variable_names is None:
for key in matfile.keys():
data.update({key: matfile[key][()]})
else:
for key in variable_names:
if not key in matfile.keys():
raise RuntimeError('Variable: "' + key + '" is not in file: ' + filename)
data.update({key: matfile[key][()]})
return data | 5,353,297 |
def date_start_search(line):
"""予定開始の日付を検出し,strで返す."""
# 全角スペース
zen_space = ' '
# 全角0
zen_zero = '0'
nichi = '日'
dollar = '$'
# 全角スペースを0に置き換えることで無理やり対応
line = line.replace(zen_space, zen_zero)
index = line.find(nichi)
# 日と曜日の位置関係から誤表記を訂正
index_first_dollar = line.find(dollar, index + 1)
if index + 1 != index_first_dollar:
index = index_first_dollar
# ex. 1 → 01
#if line[index - 1] == zen_space:
# line[index - 1] = zen_zero
return zenhan.z2h(line[index - 2:index]) | 5,353,298 |
def train_sub1(sess, x, y, bbox_preds, x_sub, y_sub, nb_classes,
nb_epochs_s, batch_size, learning_rate, data_aug, lmbda,
aug_batch_size, rng, img_rows=48, img_cols=48,
nchannels=3):
"""
This function creates the substitute by alternatively
augmenting the training data and training the substitute.
:param sess: TF session
:param x: input TF placeholder
:param y: output TF placeholder
:param bbox_preds: output of black-box model predictions
:param x_sub: initial substitute training data
:param y_sub: initial substitute training labels
:param nb_classes: number of output classes
:param nb_epochs_s: number of epochs to train substitute model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param data_aug: number of times substitute training data is augmented
:param lmbda: lambda from arxiv.org/abs/1602.02697
:param rng: numpy.random.RandomState instance
:return:
"""
# Define TF model graph (for the black-box model)
model_sub = ModelSubstitute('model_s', nb_classes)
preds_sub = model_sub.get_logits(x)
loss_sub = CrossEntropy(model_sub, smoothing=0)
print("Defined TensorFlow model graph for the substitute.")
# Define the Jacobian symbolically using TensorFlow
grads = jacobian_graph(preds_sub, x, nb_classes)
# Train the substitute and augment dataset alternatively
for rho in xrange(data_aug):
print("Substitute training epoch #" + str(rho))
train_params = {
'nb_epochs': nb_epochs_s,
'batch_size': batch_size,
'learning_rate': learning_rate
}
#with TemporaryLogLevel(logging.WARNING, "cleverhans.utils.tf"):
train(sess, loss_sub, x, y, x_sub,
to_categorical(y_sub, nb_classes),
init_all=False, args=train_params, rng=rng)
#var_list=model_sub.get_params())
# If we are not at last substitute training iteration, augment dataset
if rho < data_aug - 1:
print("Augmenting substitute training data.")
# Perform the Jacobian augmentation
lmbda_coef = 2 * int(int(rho / 3) != 0) - 1
# print(x.shape)
# print(x_sub.shape)
# print(y_sub.shape)
#print(grads.shape)
x_sub = jacobian_augmentation(sess, x, x_sub, y_sub, grads,
lmbda_coef * lmbda, aug_batch_size)
print("Labeling substitute training data.")
# Label the newly generated synthetic points using the black-box
y_sub = np.hstack([y_sub, y_sub])
x_sub_prev = x_sub[int(len(x_sub)/2):]
eval_params = {'batch_size': batch_size}
#tmp = batch_eval(sess, [x], [bbox_preds], [x_sub_prev],args=eval_params)
tmp = batch_eval(sess, [x], [bbox_preds], [x_sub_prev],batch_size=batch_size)
print(tmp)
bbox_val = tmp[0]
# Note here that we take the argmax because the adversary
# only has access to the label (not the probabilities) output
# by the black-box model
y_sub[int(len(x_sub)/2):] = np.argmax(bbox_val, axis=1)
return model_sub, preds_sub | 5,353,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.