content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_move() -> tuple:
"""
Utility function to get the player's move.
:return: tuple of the move
"""
return get_tuple('What move to make?') | ef35ab6fcb9cdfd60ecc91ff044b39600659c80f | 3,654,983 |
def set_value(parent, type, name, value) :
"""
Sets a value in the format Mitsuba Renderer expects
"""
curr_elem = etree.SubElement(parent, type)
curr_elem.set("name", name)
curr_elem.set("id" if type in ["ref", "shapegroup"] else "value", value) # The can be an id
return curr_elem | 6ace9a4a5858f3fb80bbe585580345369d7f6e63 | 3,654,984 |
def create_bst(nodes) -> BST:
"""Creates a BST from a specified nodes."""
root = BST(nodes[0])
for i in range(1, len(nodes)):
root.insert(nodes[i])
return root | d93164dcc94f36ea9d5643467cf157d0f387c149 | 3,654,985 |
from typing import Generator
import math
def get_dec_arch(gen: Generator) -> nn.Sequential:
"""
Get decoder architecture associated with given generator.
Args:
gen (Generator): Generator associated with the decoder.
Returns:
nn.Sequential: Decoder architecture.
"""
# As defined in the paper.
len_z = len(gen.latent_space_mean())
h_size = math.floor(len_z / 2)
decoder = nn.Sequential(
nn.Linear(len_z, h_size),
nn.ReLU(inplace=True),
nn.Linear(h_size, len_z),
)
return decoder | dbadcb662e0c9a3089a4f152675d91b0d3e8898d | 3,654,986 |
import xml
def add_page_to_xml(alto_xml, alto_xml_page, page_number=0):
"""
Add new page to end of alto_xml or replace old page.
"""
# If book empty
if (alto_xml == None):
page_dom = xml.dom.minidom.parseString(alto_xml_page)
page_dom.getElementsByTagName("Page")[0].setAttribute("ID", 'page_1')
alto_xml_page = page_dom.toxml(encoding="utf-8")
return(alto_xml_page)
# If not
book_dom = xml.dom.minidom.parseString(alto_xml)
page_dom = xml.dom.minidom.parseString(alto_xml_page)
page = page_dom.getElementsByTagName("Page")[0]
if(page_number==0):
# Find last page
page_number = book_dom.getElementsByTagName("Page").length
# and add page to end
book_dom.getElementsByTagName("Layout")[0].appendChild(page)
page.setAttribute("ID", 'page_%d' % (page_number+1))
# If new page is not last page
else:
old_page = book_dom.getElementsByTagName("Page")[page_number-1]
book_dom.getElementsByTagName("Layout")[0].replaceChild(page, old_page)
page.setAttribute("ID", 'page_%d' % page_number)
return(book_dom.toxml(encoding="utf-8")) | d9303db2e6ce9c672d0548f81ddece36b1e59be3 | 3,654,987 |
def calculate_performance(all_data):
"""
Calculates the performance metrics as found in "benchmarks" folder of
scikit-optimize and prints them in console.
Parameters
----------
* `all_data`: dict
Traces data collected during run of algorithms. For more details, see
'evaluate_optimizer' function.
"""
sorted_traces = defaultdict(list)
for model in all_data:
for dataset in all_data[model]:
for algorithm in all_data[model][dataset]:
data = all_data[model][dataset][algorithm]
# leave only best objective values at particular iteration
best = [[v[-1] for v in d] for d in data]
supervised_learning_type = "Regression" if ("Regressor" in model) else "Classification"
# for every item in sorted_traces it is 2d array, where first dimension corresponds to
# particular repeat of experiment, and second dimension corresponds to index
# of optimization step during optimization
key = (algorithm, supervised_learning_type)
sorted_traces[key].append(best)
# calculate averages
for key in sorted_traces:
# the meta objective: average over multiple tasks
mean_obj_vals = np.mean(sorted_traces[key], axis=0)
minimums = np.min(mean_obj_vals, axis=1)
f_calls = np.argmin(mean_obj_vals, axis=1)
min_mean = np.mean(minimums)
min_stdd = np.std(minimums)
min_best = np.min(minimums)
f_mean = np.mean(f_calls)
f_stdd = np.std(f_calls)
f_best = np.min(f_calls)
def fmt(float_value):
return ("%.3f" % float_value)
output = str(key[0]) + " | " + " | ".join(
[fmt(min_mean) + " +/- " + fmt(min_stdd)] + [fmt(v) for v in [min_best, f_mean, f_stdd, f_best]])
result = table_template + output
print("")
print(key[1])
print(result) | 0b2d185c2cafdddc632ee5b94a6e37c7450f096b | 3,654,988 |
def connected_components(num_nodes, Ap, Aj, components):
"""connected_components(int const num_nodes, int const [] Ap, int const [] Aj, int [] components) -> int"""
return _amg_core.connected_components(num_nodes, Ap, Aj, components) | a3306dd4357b6db91bdbb3033b53effe8f5e376d | 3,654,989 |
def get_drm_version():
"""
Return DRM library version.
Returns:
str: DRM library version.
"""
path = _join(PROJECT_DIR, "CMakeLists.txt")
with open(path, "rt") as cmakelists:
for line in cmakelists:
if line.startswith("set(ACCELIZEDRM_VERSION "):
version = f"v{line.split(' ')[1].strip().strip(')')}"
print(f"Detected DRM library version: {version}")
return version
raise ValueError(f'ACCELIZEDRM_VERSION not found in "{path}"') | b94da9049be428fc38992c34c829e202e98cb69d | 3,654,990 |
def pmx(p1, p2):
"""Perform Partially Mapped Crossover on p1 and p2."""
return pmx_1(p1, p2), pmx_1(p2, p1) | 60ac365efe3fd66eea24859afd9cfa470c061de2 | 3,654,991 |
def get_key_information(index, harness_result: HarnessResult, testbed_parser, esapi_instance: ESAPI):
"""
1. key_exception_dic是以引擎名为key的字典,若能提取错误信息,value为引擎的关键报错信息,若所有引擎均没有报错信息,则value为引擎的完整输出
返回[double_output_id, engine_name, key_exception_dic, api_name, 过滤类型]。过滤类型分为两种:第一类型是指异常结果
存在错误信息,第二类型是指异常结果没有错误信息的, 第三类型是指所有引擎均没有报错(即不一致是由于执行结果不一致导致的)。
其值的取值是[1,2,3],其含义是[第一类型,第二类型,第三类型]
"""
suspicious_output = None
for output in harness_result.outputs:
if output.id == index:
suspicious_output = output
if suspicious_output is None:
raise Exception("Harness result does not contain special index")
key_exception = list_normalized_essential_exception_message(suspicious_output.stderr + suspicious_output.stdout)
key_exception_dic = {}
double_output_id = index
engine_name = testbed_parser.parse_engine_name(suspicious_output.testbed)
no_exception_info_engine_counter = 0
es_api_node_ast_in_testcase = None
# 差分测试不一致的的结果中存在报错信息,第一类型
if key_exception != "":
filter_type = FilerType.TYPE1.value
[api_name, es_api_node_ast_in_testcase] = getExecptionStatementApi.get_exception_statement_api(
esapi_instance,
harness_result.testcase,
suspicious_output.stderr + suspicious_output.stdout,
es_api_node_ast_in_testcase)
if api_name is None:
api_name = "NoApi"
key_exception_dic = {engine_name: key_exception}
# 差分测试不一致的结果中不存在报错信息,第二类型
else:
filter_type = FilerType.TYPE2.value
no_exception_info_engine_counter += 1 # 差分后得到的测试结果无法提取错误信息
api_list = []
for output in harness_result.outputs:
if output.id != index:
exception_engine_name = testbed_parser.parse_engine_name(output.testbed)
exception_info = list_normalized_essential_exception_message(output.stderr + output.stdout)
if exception_info == "":
no_exception_info_engine_counter += 1
key_exception_dic.update({exception_engine_name: exception_info})
[api, es_api_node_ast_in_testcase] = getExecptionStatementApi.get_exception_statement_api(
esapi_instance,
harness_result.testcase,
output.stderr + output.stdout,
es_api_node_ast_in_testcase)
api = "NoApi" if api is None else api
api_list.append(api)
most_frequent_api, most_frequent_count = get_highest_frequency(api_list)
if most_frequent_count < len(api_list) * 1 / 2:
api_name = "NoApi"
else:
api_name = most_frequent_api
# 所有引擎均为报错,仅仅是输出不一致,第三类型
if no_exception_info_engine_counter == len(harness_result.outputs):
# return None
filter_type = FilerType.TYPE3.value
for output in harness_result.outputs:
exception_engine_name = testbed_parser.parse_engine_name(output.testbed)
output = output.stderr + output.stdout
key_exception_dic.update({exception_engine_name: output})
api_name = "NoApi"
return [double_output_id, engine_name, key_exception_dic, api_name, filter_type] | 734838a08760e5203701bc2bb21ff38c6e579873 | 3,654,994 |
from io import StringIO
def open_image(asset):
"""Opens the image represented by the given asset."""
try:
asset_path = asset.get_path()
except NotImplementedError:
return Image.open(StringIO(asset.get_contents()))
else:
return Image.open(asset_path) | 11e2ca552ab898801dba4dea9e8776b93532ac11 | 3,654,995 |
def gather_audio_video_eavesdropping(x) :
"""
@param x : a Analysis instance
@rtype : a list strings for the concerned category, for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ]
"""
result = []
result.extend ( detect_MediaRecorder_Voice_record(x) )
result.extend ( detect_MediaRecorder_Video_capture(x) )
return result | 01a4cc13865e8810a26d8d08ec3cf684d4a1c8a1 | 3,654,996 |
def vdw_radius_single(element):
"""
Get the Van-der-Waals radius of an atom from the given element. [1]_
Parameters
----------
element : str
The chemical element of the atoms.
Returns
-------
The Van-der-Waals radius of the atom.
If the radius is unknown for the element, `None` is returned.
See also
--------
vdw_radius_protor
References
----------
.. [1] A Bondi,
"Van der Waals volumes and radii."
J Phys Chem, 86, 441-451 (1964).
Examples
--------
>>> print(vdw_radius_single("C"))
1.7
"""
return _SINGLE_RADII.get(element.upper()) | 6c705ce2309b470c3b6d8445701e831df35853ec | 3,654,997 |
import typing
def evaluate_ins_to_proto(ins: typing.EvaluateIns) -> ServerMessage.EvaluateIns:
"""Serialize flower.EvaluateIns to ProtoBuf message."""
parameters_proto = parameters_to_proto(ins.parameters)
config_msg = metrics_to_proto(ins.config)
return ServerMessage.EvaluateIns(parameters=parameters_proto, config=config_msg) | e7cbbf7d78f2ac37b6248d61fe7e797b151bba31 | 3,654,998 |
def avatar_synth_df(dir, batch_size, num_threads):
"""
Get data for training and evaluating the AvatarSynthModel.
:param dir: The data directory.
:param batch_size: The minibatch size.
:param num_threads: The number of threads to read and process data.
:return: A dataflow for parameter to bitmoji data
"""
df = AvatarSynthDataFlow(dir)
df = process_avatar_synth_data(df, batch_size, num_threads)
return df | 1fcecb5769d7c38c84bcd02cff8159381e113861 | 3,654,999 |
def assign_material(obj, materialname):
"""This function assigns a material to an objects mesh.
:param obj: The object to assign the material to.
:type obj: bpy.types.Object
:param materialname: The materials name.
:type materialname: str
"""
if materialname not in bpy.data.materials:
if materialname in defs.defaultmaterials:
materials.createPhobosMaterials()
else:
# print("###ERROR: material to be assigned does not exist.")
log("Material to be assigned does not exist.", "ERROR")
return None
# obj.data.materials[0] = bpy.data.materials[materialname]
obj.data.materials.append(bpy.data.materials[materialname]) | 929d4750d5c6e9710fdc8fc735b3792a3b4a63f4 | 3,655,000 |
def _check_stack_axis(axis, dims, default='unnamed'):
""" check or get new axis name when stacking array or datasets
(just to have that in one place)
"""
if axis is None:
axis = default
if axis in dims:
i = 1
while default+"_{}".format(i) in dims:
i+=1
axis = default+"_{}".format(i)
if type(axis) is int:
raise TypeError("axis must be a str (new axis name)")
if axis in dims:
raise ValueError("please provide an axis name which does not \
already exist, or use `concatenate`")
return axis | 4dc74da450d6be4872a5f03e61ec16700b197d94 | 3,655,001 |
from typing import Optional
from typing import Dict
from typing import Union
from typing import List
from typing import Any
def eval_push_time_ratios(problem_size: int = 3000) -> Optional[TimeRatioType]:
"""
Function that calculates the execution time ratios, for the different time complexities.
Here, a process pool is created in order to speed up the process of generating
the lists of time ratios, for each time complexity.
"""
stack: Stack = Stack()
time_ratios: Dict[str, Union[str, List[Number]]] = {
func_name: [] for func_name in TIME_COMPLEXITIES
}
arguments: List[Any] = [
(stack, problem_size, function) for function in TIME_COMPLEXITIES
]
pool: ProcessPoolType = ProcessPool(get_cpu_count(), set_low_priority_to_process)
for response in pool.imap(_push_time_ratio_worker, arguments):
time_ratios.update(response)
time_ratios.update({
'data_struct_name': Stack.__name__.lower(),
'target_name': Stack.push.__name__,
})
return time_ratios | 72f1a32f227d52040e14af77d91e4419e25480c2 | 3,655,002 |
def _load_data():
"""
Internal function to get the data to plot.
"""
# Load homicides
homicides = gv_data.PoliceHomicides.get()
# Calculate concentrated disadvantage
sub_data = []
for cls in [
"PublicAssistance",
"FemaleHouseholders",
"PercentInPoverty",
"PercentUnder18",
]:
subset = []
for year in YEARS:
df = getattr(gv_data, cls).get(year=year)
df["year"] = year
subset.append(df)
sub_data.append(pd.concat(subset).set_index(["census_tract_id", "year"]))
data = sub_data[0]
for df in sub_data[1:]:
data = data.join(df.drop(labels=["geometry"], axis=1))
# Do min/max normalization on each
for col in [
"percent_public_assistance",
"percent_female_householder",
"percent_in_poverty",
"percent_under_18",
]:
data[col + "_normed"] = (data[col] - data[col].min()) / (
data[col].max() - data[col].min()
)
# Normalize sum to 0 to 1
data["index"] = data.filter(regex="_normed").sum(axis=1) / 5.0
return homicides, data | de691b9b1a6b0ce3c75075619024358fd1c09783 | 3,655,004 |
def test_auto_add_dataloader_idx(tmpdir, add_dataloader_idx):
"""test that auto_add_dataloader_idx argument works."""
class TestModel(BoringModel):
def val_dataloader(self):
dl = super().val_dataloader()
return [dl, dl]
def validation_step(self, *args, **kwargs):
output = super().validation_step(*args[:-1], **kwargs)
if add_dataloader_idx:
name = "val_loss"
else:
name = f"val_loss_custom_naming_{args[-1]}"
self.log(name, output["x"], add_dataloader_idx=add_dataloader_idx)
return output
model = TestModel()
model.validation_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=2)
trainer.fit(model)
logged = trainer.logged_metrics
# Check that the correct keys exist
if add_dataloader_idx:
assert "val_loss/dataloader_idx_0" in logged
assert "val_loss/dataloader_idx_1" in logged
else:
assert "val_loss_custom_naming_0" in logged
assert "val_loss_custom_naming_1" in logged | 51ae3f22709db4a9d2b244b49cc6ac638ee7205d | 3,655,005 |
def move(x_pos, y_pos):
"""Return the G-CODE describing motion to x_pos, y_pos."""
out = ""
out += "G1X"+str(x_pos)+"Y"+str(y_pos)+"F"+str(FEEDRATE)+";\n"
out += "M400;\n"
return out | 0bfaf1e53b4a90094adc28eab4c6f1eba5bd32e8 | 3,655,006 |
def load_scrub_optional_upload(storage_folder: str, filename: str) -> str:
"""Loads a option file that was previously saved in the storage folder.
:param storage_folder: A string representing the path of the storage
folder.
:param filename: A string representing the name of the file that is being
loaded.
:return: The file string that was saved in the folder (empty if there is
no string to load).
"""
try:
return general_functions.load_file_from_disk(
loc_folder=storage_folder, filename=filename)
except FileNotFoundError:
return "" | fdb5abf217a767c6ac9309e098cecf9f1b70608b | 3,655,007 |
from typing import Dict
def compute_error_decrease(fun, VX, EToV) -> Dict[int, float]:
"""
Computes estimate of possible error decrease for each element in mesh.
:param fun: Function float -> float
:param VX: dict from point id to its position on x axis.
:param EToV: dict from element id to a tuple of its boundary points.
"""
L2_loss = dict()
for e, (idx1, idx2) in EToV.items():
x1 = VX[idx1]
x2 = VX[idx2]
y1 = fun(x1) # This line should be updated in 1.7
y2 = fun(x2) # This line should be updated in 1.7
x_half = (x1 + x2) / 2
y_half = fun(x_half)
slope0 = (y2 - y1) / (x2 - x1)
slope1 = (y_half - y1) / (x_half - x1)
slope2 = (y2 - y_half) / (x2 - x_half)
L2_loss1 = compute_L2_error(x_half - x1, slope0, slope1)
L2_loss2 = compute_L2_error(x_half - x1, slope0, slope2)
L2_loss[e] = np.sqrt(L2_loss1 + L2_loss2)
return L2_loss | 46b3570c7d0ad5f07faa54a954d63ecbce64c3b7 | 3,655,008 |
def file_name_to_title_name(file_name):
"""
#Arguments
check_mk_url (str): URL to Check_Mk web application, check file names and print for each file in the directory in the correct format
#Examples
file_name_to_title_name('activate_mode')
output = 'Activate Mode: activate_mode.md'
"""
file_name_list = file_name.split('.py')
file_name = file_name_list[0]
title = file_name.replace('_', ' ').title()
filename2 = ': ' + file_name + '.md'
return title + filename2 | 330eae5c34cd55f01aaf520ea9df467ea4042b1e | 3,655,009 |
def save_binary_mask_triple(
rgb_img: np.ndarray, label_img: np.ndarray, save_fpath: str, save_to_disk: bool = False
) -> np.ndarray:
"""Currently mask img background is light-blue. Instead, could set it to white. np.array([255,255,255])
Args:
rgb_img:
label_img:
save_fpath
save_to_disk
Returns:
Array, representing 3 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks, Semantic Masks
"""
img_h, img_w, _ = rgb_img.shape
rgb_with_mask = highlight_binary_mask(label_img, rgb_img.copy())
blank_img = np.ones((img_h, img_w, 3), dtype=np.uint8) * 255
y, x = np.where(label_img == 0)
blank_img[y, x, :] = LIME_GREEN # LIGHT_BLUE
mask_img = highlight_binary_mask(label_img, blank_img)
return form_hstacked_imgs([rgb_img, rgb_with_mask, mask_img], save_fpath, save_to_disk) | ffc9dbd6550200e48548d29e6dedacde6eced3c2 | 3,655,010 |
def get_random_fortune(fortune_file):
"""
Get a random fortune from the specified file. Barfs if the corresponding
`.dat` file isn't present.
:Parameters:
fortune_file : str
path to file containing fortune cookies
:rtype: str
:return: the random fortune
"""
fortunes = list(_read_fortunes(fortune_file))
randomRecord = _random_int(0, len(fortunes) - 1)
return fortunes[randomRecord] | d7486abbacc95a2b737471f899d002fd642f72b7 | 3,655,011 |
async def delete_item(item_id: int, db: Session = Depends(get_db)):
"""
Delete the Item with the given ID provided by User stored in database
"""
db_item = ItemRepo.fetch_by_id(db, item_id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found with the given ID")
await ItemRepo.delete(db, item_id)
return "Item deleted successfully!" | a73ebafa7cc73c24133e4795aabbe2cb0a72172d | 3,655,013 |
def process_messages(deck, messages, encrypt_or_decrypt):
"""(list of int, list of str, str) -> list of str
Return the messages encrypted or decrypted using the specified deck.
The parameter encrypt_or_decrypt will be ENCRYPT to encrpyt the message,
and DECRYPT to decrypt the message
>>>deck = [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 3, 6, 9, 12, 15, 18, 21, 24,
27, 2, 5, 8, 11, 14, 17, 20, 23, 26]
>>>process_messages(deck, ['Patty', 'Cakes'], ENCRYPT)
['AJQAI', 'BLVLT']
>>>deck = [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 3, 6, 9, 12, 15, 18, 21, 24,
27, 2, 5, 8, 11, 14, 17, 20, 23, 26]
>>>process_messages(deck, ['AJQAI', 'BLVLT'], DECRYPT)
['PATTY', 'CAKES']
"""
returned_message = []
for message in messages:
new_message = ''
cleaned_message = clean_message(message) # Cleans the message of
# punctation and makes it all upper case
for letter in cleaned_message:
keystream_value = get_next_keystream_value(deck)
# Generates a keystream value for each letter
if encrypt_or_decrypt == ENCRYPT:
new_message = new_message + encrypt_letter(letter, keystream_value)
else: # Where encrypt_or_decrypt == DECRYPT
new_message = new_message + decrypt_letter(letter, keystream_value)
returned_message.append(new_message)
return returned_message | 4b9f2506edd44c916ac69a4c6d1d12a3b58ff89d | 3,655,015 |
def clip(wavelength, spectra, threshold, substitute=None):
""" Removes or substitutes values above the given threshold.
Args:
wavelength <numpy.ndarray>: Vector of wavelengths.
spectra <numpy.ndarray>: NIRS data matrix.
threshold <float>: threshold value for rejection
substitute <float>: substitute value for rejected values (None removes values from the spectra)
Returns:
wavelength <numpy.ndarray>: Vector of wavelengths.
spectra <numpy.ndarray>: NIR spectra with threshold exceeding values removed.
"""
if substitute == None: # remove threshold violations
mask = np.any(spectra > threshold, axis=1)
spectra = spectra[~mask, :]
wavelength = wavelength[~mask]
else: # substitute threshold violations with a value
spectra[spectra > threshold] = substitute
return wavelength, spectra
return wavelength, spectra | 79d79c6353468f77dacb995cfcca9b717e8ef8e0 | 3,655,016 |
def findMachines(fqpn):
"""
Recursively yield L{MethodicalMachine}s and their FQPNs in and
under the a Python object specified by an FQPN.
The discovery heuristic considers L{MethodicalMachine} instances
that are module-level attributes or class-level attributes
accessible from module scope. Machines inside nested classes will
be discovered, but those returned from functions or methods will not be.
@type within: an FQPN
@param within: Where to start the search.
@return: a generator which yields FQPN, L{MethodicalMachine} pairs.
"""
return findMachinesViaWrapper(wrapFQPN(fqpn)) | 71689b8ffe166dd4b8b8f126d04b3bcf8123257e | 3,655,017 |
def asynchronous_prod_milp_constraint_rule(backend_model, loc_tech, timestep):
"""
BigM limit set on `carrier_prod`, forcing it to either be zero or non-zero,
depending on whether `prod` is zero or one, respectively.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{carrier_prod}[loc::tech::carrier, timestep] \\leq
\\text{bigM} \\times \\boldsymbol{prod_con_switch}[loc::tech, timestep]
\\forall loc::tech \\in loc::techs_{asynchronous_prod_con},
\\forall timestep \\in timesteps
"""
model_dict = backend_model.__calliope_model_data
loc_tech_carrier = model_dict["data"]["lookup_loc_techs"][loc_tech]
return (
backend_model.carrier_prod[loc_tech_carrier, timestep]
<= backend_model.prod_con_switch[loc_tech, timestep] * backend_model.bigM
) | 049454e9a3aafecc8531225bc5f09b666d892fcb | 3,655,018 |
def drawCurveArc(self): #---- only for ELLIPSE -------------------------------------------------------------
"""Given a dxf ELLIPSE object return a blender_curve.
"""
center = self.loc
radius = self.radius
start = self.start_angle
end = self.end_angle
if start > end:
start = start - 360.0
startmatrix = Mathutils.RotationMatrix(start, 3, "Z")
startpoint = startmatrix * Mathutils.Vector((radius, 0, 0))
endmatrix = Mathutils.RotationMatrix(end, 3, "Z")
endpoint = endmatrix * Mathutils.Vector((radius, 0, 0))
# Note: handles must be tangent to arc and of correct length...
a = Curve.New('arc') # create new curve data
p1 = (0, -radius, 0)
p2 = (radius, 0, 0)
p3 = (0, radius, 0)
p4 = (-radius, 0, 0)
p1 = BezTriple.New(p1)
p2 = BezTriple.New(p2)
p3 = BezTriple.New(p3)
p4 = BezTriple.New(p4)
curve = a.appendNurb(p1)
curve.append(p2)
curve.append(p3)
curve.append(p4)
for point in curve:
point.handleTypes = [AUTO, AUTO]
point.radius = 1.0
curve.flagU = 1 # Set curve cyclic
a.update()
ob = Object.New('Curve', 'arc') # make curve object
return ob | d8332b171fe9ef654c0fe71b8ce0b636b23221a8 | 3,655,020 |
import pickle
def read_pickle(filename, protocol=-1, **kwargs):
"""
read grid saved in PICKLE format into a GridData object
:param filename: full path to the filename
:type filename: str
:rtype: ~uquake.core.data.grid.Grid
"""
return pickle.load(open(filename, 'rb')) | 8115b5a91698cc508ea05c3097d8d69b0bb77561 | 3,655,021 |
def linked_ims(im_list, pix_per_um, shape=(2,2),
x_range=None, y_range=None, scale_fig=1, scale_height=1.4,
brightness=1, palette='Turbo256', cmap_range='from zero',
show_fig=True, title_list=[], t_fs=24, ax_fs=16, tk_fs=12, cb_fs=14):
"""
Shows multiple frames with linked panning and zooming.
Uses format_im().
"""
# list of figures
p = []
# creates images
for i, im in enumerate(im_list):
if len(title_list) == len(im_list):
title = title_list[i]
p_new = format_im(im, pix_per_um, x_range=x_range, y_range=y_range,
scale_fig=scale_fig, scale_height=scale_height, title=title,
brightness=brightness, palette=palette, cmap_range=cmap_range,
show_fig=False, t_fs=t_fs, ax_fs=ax_fs, tk_fs=tk_fs, cb_fs=cb_fs)
p += [p_new]
# makes grid plot
p_grid = make_gridplot(p, shape)
# shows figure
if show_fig:
show(p_grid)
return p_grid | 5a6e0cb821a9d9d49243de297d74f5b656825f13 | 3,655,022 |
def read_qmcpack_hamiltonian(filename):
"""Read Hamiltonian from QMCPACK format.
Parameters
----------
filename : string
QMPACK Hamiltonian file.
Returns
-------
hamil : dict
Data read from file.
"""
try:
hc, chol, enuc, nmo, nelec, nmok, qkk2 = (
read_qmcpack_cholesky_kpoint(filename)
)
hamil = {
'hcore': hc,
'chol': chol,
'enuc': enuc,
'nelec': nelec,
'nmo': nmo,
'nmo_pk': nmok,
'qk_k2': qkk2
}
except KeyError:
try:
hc, chol, enuc, nmo, nelec = read_qmcpack_cholesky(filename)
hamil = {
'hcore': hc,
'chol': chol,
'enuc': enuc,
'nmo': nmo,
'nelec': nelec
}
except KeyError:
print("Error reading Hamiltonian file. Hamiltonian not found.")
hamil = None
return hamil | 0fb0a6d0e80ab3180da3cb4d0c6d31ba54749f1d | 3,655,023 |
def run_rnn(file):
# define model params
"""
Run the process to train/test a recurrent neural network using LSTM using a given dataset file.
:param string file: Location of CSV-formatted dataset file
:return: Model with expected (test) targets and associated scores
:rtype: object, dataframe, object
"""
num_epochs = 2
sequence_length = 20
# grab train and test data from CSV
X_train, y_train, X_test, y_test = split_test_training(file, sequence_length)
print(X_train)
# build model
model = build_model()
model.fit(X_train, y_train, epochs=num_epochs, batch_size=64, validation_split=0.2)
# predict
predict = model.predict(X_test)
predict = np.reshape(predict, predict.size)
# evaluate
score = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: ", score[1]*100, "%")
# save model to h5 file (same folder as data)
model_location_folder = get_latest_dataset_folder()
model.save(model_location_folder + '/RNN_' + current_dt + '.h5')
return model, y_test, predict | e159594350d35a207db904b76ea1fbea2509b235 | 3,655,024 |
def load(file, file_format=None, **kwargs):
"""Load data from json, yaml, or pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or file-like object): Filename or a file-like object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
"""
if file_format is None and isinstance(file, str):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if isinstance(file, str):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj | 14970d1f9e477f94f358f6bbb220d4ae1e388ecd | 3,655,026 |
def StepToGeom_MakeAxis2Placement_Convert(*args):
"""
:param SA:
:type SA: Handle_StepGeom_Axis2Placement3d &
:param CA:
:type CA: Handle_Geom_Axis2Placement &
:rtype: bool
"""
return _StepToGeom.StepToGeom_MakeAxis2Placement_Convert(*args) | 003e8c9986214de4f33eceb35ee9975726a0efe9 | 3,655,028 |
import pwd
def results(year: hug.types.text, firstName: hug.types.text, lastName: hug.types.text):
"""Returns the results for a given candidate for a given year"""
engine = create_engine(
'postgresql://%s:%s@%s/%s' %(user,pwd,ip,user),
client_encoding='utf8',echo=False)
conn = engine.connect()
Base = declarative_base()
query = "SELECT * FROM names WHERE election_year = '%s' AND candidate_first_name = '%s' AND candidate_last_name = '%s'" %(str(year),firstName.upper(),lastName.upper())
df = pd.read_sql(query, conn)
candidateId = df['candidate_id'].tolist()[0]
resultQuery = "SELECT * FROM votes WHERE candidate_id = '%s';" %(str(candidateId))
result = pd.read_sql(resultQuery,conn)
officeId, districtId = result['office_code'].tolist()[0], result['district_code'].tolist()[0]
totalQuery = "Select office_code, district_code, county_code, city_code, ward_number, precinct_number, SUM(precinct_votes) AS total_votes FROM votes WHERE office_code = '%s' AND district_code = '%s' AND election_year = '%s' GROUP BY 1,2,3,4,5,6" %(str(officeId),str(districtId),str(year))
totalTable = pd.read_sql(totalQuery,conn)
output = pd.merge(result,totalTable, on = ['office_code', 'district_code', 'county_code', 'city_code', 'ward_number', 'precinct_number'], how="inner")
output['candidate_percentage'] = 100*output['precinct_votes']/output['total_votes']
conn.close()
engine.dispose()
return output.reset_index().to_json(orient="records") | a9c010d8f2633ef1c77f0903b6cf07d315486f5c | 3,655,029 |
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.from_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit(f"`{JAVES_NNAME}`: ** Pass the user's username, id or reply!**")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj, extra | b503868848a78f8bfacd4fc5e211642001104e7d | 3,655,030 |
import torch
from typing import Optional
from typing import Tuple
def stat_scores_multiple_classes(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
argmax_dim: int = 1,
reduction: str = 'none',
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
.. deprecated::
Use :func:`torchmetrics.functional.stat_scores`. Will be removed in v1.4.0.
"""
rank_zero_deprecation(
"This `stat_scores_multiple_classes` was deprecated in v1.2.0 in favor of"
" `from pytorch_lightning.metrics.functional import stat_scores`."
" It will be removed in v1.4.0"
)
if pred.ndim == target.ndim + 1:
pred = to_categorical(pred, argmax_dim=argmax_dim)
num_classes = get_num_classes(pred=pred, target=target, num_classes=num_classes)
if pred.dtype != torch.bool:
pred = pred.clamp_max(max=num_classes)
if target.dtype != torch.bool:
target = target.clamp_max(max=num_classes)
possible_reductions = ('none', 'sum', 'elementwise_mean')
if reduction not in possible_reductions:
raise ValueError("reduction type %s not supported" % reduction)
if reduction == 'none':
pred = pred.view((-1, )).long()
target = target.view((-1, )).long()
tps = torch.zeros((num_classes + 1, ), device=pred.device)
fps = torch.zeros((num_classes + 1, ), device=pred.device)
fns = torch.zeros((num_classes + 1, ), device=pred.device)
sups = torch.zeros((num_classes + 1, ), device=pred.device)
match_true = (pred == target).float()
match_false = 1 - match_true
tps.scatter_add_(0, pred, match_true)
fps.scatter_add_(0, pred, match_false)
fns.scatter_add_(0, target, match_false)
tns = pred.size(0) - (tps + fps + fns)
sups.scatter_add_(0, target, torch.ones_like(match_true))
tps = tps[:num_classes]
fps = fps[:num_classes]
tns = tns[:num_classes]
fns = fns[:num_classes]
sups = sups[:num_classes]
elif reduction == 'sum' or reduction == 'elementwise_mean':
count_match_true = (pred == target).sum().float()
oob_tp, oob_fp, oob_tn, oob_fn, oob_sup = stat_scores(pred, target, num_classes, argmax_dim)
tps = count_match_true - oob_tp
fps = pred.nelement() - count_match_true - oob_fp
fns = pred.nelement() - count_match_true - oob_fn
tns = pred.nelement() * (num_classes + 1) - (tps + fps + fns + oob_tn)
sups = pred.nelement() - oob_sup.float()
if reduction == 'elementwise_mean':
tps /= num_classes
fps /= num_classes
fns /= num_classes
tns /= num_classes
sups /= num_classes
return tps.float(), fps.float(), tns.float(), fns.float(), sups.float() | e4b64a881de64b93f6ca8018484e9de52d6ab786 | 3,655,031 |
import requests
def correct_doi(file_name: str):
"""Attempt extract a DOI from a filename which contains a DOI."""
if file_name.startswith("acs.jced") or file_name.startswith("je"):
doi = f"10.1021/{file_name}"
elif file_name.startswith("j.jct"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("j.fluid"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("j.tca"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("s"):
doi = f"10.1007/{file_name}"
else:
raise NotImplementedError()
doi = doi.replace(".xml", "")
doi_request = requests.get(
f"https://doi.org/{doi}", headers={"Accept": "application/x-bibtex"}
)
doi_request.raise_for_status()
return doi | e01ddf648660e0fd126720042cc16b16ffe078d3 | 3,655,033 |
from typing import Sequence
from typing import Dict
from typing import Tuple
from typing import Any
def create_multi_dataset_generic_benchmark(
train_datasets: Sequence[SupportedDataset],
test_datasets: Sequence[SupportedDataset],
*,
other_streams_datasets: Dict[str, Sequence[SupportedDataset]] = None,
complete_test_set_only: bool = False,
train_transform=None, train_target_transform=None,
eval_transform=None, eval_target_transform=None,
other_streams_transforms: Dict[str, Tuple[Any, Any]] = None,
dataset_type: AvalancheDatasetType = None) -> GenericCLScenario:
"""
Creates a benchmark instance given a list of datasets. Each dataset will be
considered as a separate experience.
Contents of the datasets must already be set, including task labels.
Transformations will be applied if defined.
This function allows for the creation of custom streams as well.
While "train" and "test" datasets must always be set, the experience list
for other streams can be defined by using the `other_streams_datasets`
parameter.
If transformations are defined, they will be applied to the datasets
of the related stream.
:param train_datasets: A list of training datasets.
:param test_datasets: A list of test datasets.
:param other_streams_datasets: A dictionary describing the content of custom
streams. Keys must be valid stream names (letters and numbers,
not starting with a number) while the value must be a list of dataset.
If this dictionary contains the definition for "train" or "test"
streams then those definition will override the `train_datasets` and
`test_datasets` parameters.
:param complete_test_set_only: If True, only the complete test set will
be returned by the benchmark. This means that the ``test_dataset_list``
parameter must be list with a single element (the complete test set).
Defaults to False.
:param train_transform: The transformation to apply to the training data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param train_target_transform: The transformation to apply to training
patterns targets. Defaults to None.
:param eval_transform: The transformation to apply to the test data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param eval_target_transform: The transformation to apply to test
patterns targets. Defaults to None.
:param other_streams_transforms: Transformations to apply to custom
streams. If no transformations are defined for a custom stream,
then "train" transformations will be used. This parameter must be a
dictionary mapping stream names to transformations. The transformations
must be a two elements tuple where the first element defines the
X transformation while the second element is the Y transformation.
Those elements can be None. If this dictionary contains the
transformations for "train" or "test" streams then those transformations
will override the `train_transform`, `train_target_transform`,
`eval_transform` and `eval_target_transform` parameters.
:param dataset_type: The type of the dataset. Defaults to None, which
means that the type will be obtained from the input datasets. If input
datasets are not instances of :class:`AvalancheDataset`, the type
UNDEFINED will be used.
:returns: A :class:`GenericCLScenario` instance.
"""
transform_groups = dict(
train=(train_transform, train_target_transform),
eval=(eval_transform, eval_target_transform))
if other_streams_transforms is not None:
for stream_name, stream_transforms in other_streams_transforms.items():
if isinstance(stream_transforms, Sequence):
if len(stream_transforms) == 1:
# Suppose we got only the transformation for X values
stream_transforms = (stream_transforms[0], None)
else:
# Suppose it's the transformation for X values
stream_transforms = (stream_transforms, None)
transform_groups[stream_name] = stream_transforms
input_streams = dict(
train=train_datasets,
test=test_datasets)
if other_streams_datasets is not None:
input_streams = {**input_streams, **other_streams_datasets}
if complete_test_set_only:
if len(input_streams['test']) != 1:
raise ValueError('Test stream must contain one experience when'
'complete_test_set_only is True')
stream_definitions = dict()
for stream_name, dataset_list in input_streams.items():
initial_transform_group = 'train'
if stream_name in transform_groups:
initial_transform_group = stream_name
stream_datasets = []
for dataset_idx in range(len(dataset_list)):
dataset = dataset_list[dataset_idx]
stream_datasets.append(AvalancheDataset(
dataset,
transform_groups=transform_groups,
initial_transform_group=initial_transform_group,
dataset_type=dataset_type))
stream_definitions[stream_name] = (stream_datasets,)
return GenericCLScenario(
stream_definitions=stream_definitions,
complete_test_set_only=complete_test_set_only) | 74160f178bcd173d53c3e954fe43e2dbeff8c680 | 3,655,036 |
def convert_to_xml_string(string):
"""
For input strings with escaped tags and special characters
issue a set of conversion functions to prepare it prior
to adding it to an article object
"""
string = entity_to_unicode(string)
string = decode_brackets(string)
string = eautils.replace_tags(string, "i", "italic")
string = eautils.replace_tags(string, "u", "underline")
string = eautils.replace_tags(string, "b", "bold")
string = eautils.replace_tags(string, "em", "italic")
string = etoolsutils.escape_unmatched_angle_brackets(string, allowed_tags())
return string | 7211ade270167e3bb681aa2c450da79f276ea169 | 3,655,037 |
def read_shear_catalog_type(stage):
"""
Determine the type of shear catalog a stage is using as input.
Returns a string, e.g. metacal, lensfit.
Also sets shear_catalog_type in the stage's configuration
so that it is available later and is saved in output.
"""
with stage.open_input('shear_catalog', wrapper=True) as f:
shear_catalog_type = f.catalog_type
stage.config['shear_catalog_type'] = shear_catalog_type
return shear_catalog_type | 26dd03f3a2ef66acab47741df044ac8f2a92bbfb | 3,655,038 |
from typing import Sequence
from typing import List
import math
import cmath
def inverse_fft_iterative(
poly: Sequence, has_imaginary: bool = False, imag_threshold: float = 1e-14
) -> List:
"""Perform inverse iterative discrete fast Fourier transform (DFT) of a polynomial with a degree that is `2^t-1`, t being a positive integer (ie `len(poly)` should be an exact power of 2).
Input is point-value form, output is coefficient form.
"""
# For algo detail, cf. CLRS Ch30.3.
# Time complexity: Theta(N log N), but the const in Theta is smaller than that in
# fft_recursive()
n = len(poly)
if n == 1:
return poly
bit_reversed_poly = _bit_reversal_permutation(poly)
for s in range(1, int(math.log2(n) + 1)):
# s is the level of recursion counting from bottom, lowest being 1, 2nd-highest
# (ie the level just below the root = the orig list) being log2(n).
# Length of the target sublists in level s+1 (eg for s=1, target is len of lv2)
target_len = 2 ** s
# Compute omega_{target_len}
principal_root_of_unity = cmath.exp(-(2 * cmath.pi / target_len) * 1j)
for k in range(0, n, target_len):
omega = 1
for j in range(target_len // 2):
body = bit_reversed_poly[k + j]
twiddle = omega * bit_reversed_poly[k + j + target_len // 2]
# Butterfly operation in-place
bit_reversed_poly[k + j] = 1/2 * (body + twiddle)
bit_reversed_poly[k + j + target_len // 2] = 1/2 * (body - twiddle)
omega *= principal_root_of_unity
if not has_imaginary:
# This will return a cleaner inverse by discarding imag parts whose
# absolute value is less than imag_threshold
bit_reversed_poly = [item.real if abs(item.imag) < imag_threshold else item for item in bit_reversed_poly]
return bit_reversed_poly | 8f12b9d4ab8e4c54aca025a0f8117192c5d49e6b | 3,655,039 |
def admin_uri():
"""
Helper fucntion to get the admin url quickly
:returns: admin url, redirect or print friendly
:rtype: string
"""
return '/' + app.global_content['options']['admin-url'].value | 64a95effb177afbb8a68b5462967130cc6d72f2b | 3,655,040 |
def rlencode(x, check = True, dropna = False):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
See https://gist.github.com/nvictus/66627b580c13068589957d6ab0919e66
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
x = np.asarray(x)
n = len(x)
if n == 0:
return (np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=x.dtype))
if check:
starts = np.r_[0, where(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
else:
starts = np.r_[0, where(x[1:] != x[:-1]) + 1]
lengths = np.diff(np.r_[starts, n])
values = x[starts]
if dropna:
mask = ~np.isnan(values)
starts, lengths, values = starts[mask], lengths[mask], values[mask]
return starts, lengths, values | f1d38b11413da3b9d00950b829f193ca2a11f37f | 3,655,041 |
def config_load(config_path):
"""Load a json config from a file."""
return files.json_load(config_path) | 510fb5db9bfa7e73fa8a88a26b540d4d46f8e199 | 3,655,042 |
from pytz import timezone
def tz2utc(date, tz):
"""Offset between local time and UTC.
Parameters
----------
date : various
The local time, in any format acceptable to `date2time`.
tz : string
date will be processed via `pytz`.
Returns
-------
offset : datetime.timedelta
The UTC offset.
"""
return timezone(tz).utcoffset(date2time(date).datetime) | f70d35c13865a4dde75ddac0359e74d420fe55fd | 3,655,043 |
import time
def replace_from_execution_report(replace_id, execution_report):
"""Create OrderCancelReplaceRequest from given execution report
For more info about OrderCancelReplaceRequest look at https://support.xena.exchange/support/solutions/articles/44000222082-ws-trading-api#order_cancel_replace_request
"""
cmd = order_pb2.OrderCancelReplaceRequest()
cmd.MsgType = constants.MsgType_OrderCancelReplaceRequestMsgType
cmd.ClOrdId = replace_id
cmd.OrigClOrdId = execution_report.ClOrdId
cmd.Symbol = execution_report.Symbol
cmd.Side = execution_report.Side
cmd.TransactTime = int(time.time() * 1000000000)
cmd.Account = execution_report.Account
cmd.Price = execution_report.Price
cmd.StopPx = execution_report.StopPx
cmd.CapPrice = execution_report.CapPrice
cmd.OrderQty = execution_report.OrderQty
cmd.PegPriceType = execution_report.PegPriceType
cmd.PegOffsetType = execution_report.PegOffsetType
cmd.PegOffsetValue = execution_report.PegOffsetValue
for element in execution_report.SLTP:
sltp = cmd.SLTP.add()
sltp.OrdType = element.OrdType
sltp.Price = element.Price
sltp.StopPx = element.StopPx
sltp.CapPrice = element.CapPrice
sltp.PegPriceType = element.PegPriceType
sltp.PegOffsetType = element.PegOffsetType
sltp.PegOffsetValue = element.PegOffsetValue
return cmd | 0b4524f4cfee1d795b7eb6b5e88799374adeb950 | 3,655,044 |
def convert_unit(value, factor, offset):
"""Return converted value depending on the provided factor and offset."""
return num2decimal(value) * num2decimal(factor) + num2decimal(offset) | eec519f1977a3881feb18ea33a0f81295bfbf080 | 3,655,045 |
def single_chromosome_graph_scatter(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.scatter(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
marker=dict(size=float(marker_width)),
)
return fig | 9d49d8dc74e140dedc931de22b00c84e695966b5 | 3,655,046 |
def _findall_rmaps_using_reference(filename, observatory="hst"):
"""Return the basename of all reference mappings which mention `filename`."""
return uses_files([filename], observatory, "rmap") | 7dc7a67fb890681d4c94996f4fede550a1010ff5 | 3,655,047 |
def batchGD_bp(X, y, d=3, nH=10, c=3, lr=0.8, T=100, eps=0.0):
"""
BP算法, 每轮迭代使用全部样本
:param X: 训练样本的特征矩阵
:param y: 训练样本的标签向量
:param d: 训练样本的特征维数
:param nH: 隐层的节点数
:param c: 类别数
:param lr: 学习率
:param T: 停机条件1(最大迭代轮数)
:param eps: 停机条件2(相邻两次迭代loss之差的最大允许值), 设为0.0表示不使用这个条件
:return:
"""
W_H = np.random.normal(size=(nH, d)) # np.random.random(size=(nH, d)) # [0.0, 1.0)之间均匀分布
b_H = np.array([0.0] * nH).reshape(nH, 1)
W_c = np.random.normal(size=(c, nH))
b_c = np.array([0.0] * c).reshape(c, 1)
Loss = []; loss = 0; false_num = []
for t in range(T):
loss_last = loss
y_ = []
for idx, x in enumerate(X):
## 前向传播
x = x.reshape(d, 1)
net_H = np.dot(W_H, x) + b_H
z_H = np.tanh(net_H)
net = np.dot(W_c, z_H) + b_c
z = sigmoid(net)
y_.append(z.argmax())
y_x = y[idx].reshape(d, 1)
loss = 0.5 * np.sum(np.square(y_x - z))
## 误差反向传播
# 输出层
delta_c = z * (1 - z) * (z - y_x) # element-wise
grad_Wc = np.dot(delta_c, np.transpose(z_H))
grad_bc = delta_c
W_c -= lr * grad_Wc
b_c -= lr * grad_bc
# 隐层
delta_H = (1 - np.square(z_H)) * (np.dot(np.transpose(W_c), delta_c))
grad_WH = np.dot(delta_H, np.transpose(x))
grad_bH = delta_H
W_H -= lr * grad_WH
b_H -= lr * grad_bH
Loss.append(loss)
## 计算本轮过后错分的样本数
y_ = np.array(y_).reshape((30,))
tOf = (np.argmax(y, axis=1) == y_)
false_num.append(np.where(tOf == False)[0].shape[0])
if false_num[-1] == 0: # or abs(loss_last - loss) <= eps: # 停机条件
return t, Loss, false_num
return T, Loss, false_num | ef20400eb0f1012832780eddf73e7fec34579cd9 | 3,655,048 |
def __slicer(my_str, sub):
"""
Remove everything in a string before a specified substring is found.
Throw exception if substring is not found in string
https://stackoverflow.com/questions/33141595/how-can-i-remove-everything-in-a-string-until-a-characters-are-seen-in-python
Args:
my_str (string): the string to slice.
sub (string): the substring to stop slicing at.
Returns:
str: substring of my_str, without everything before sub.
Raises:
Exception: Sub string specified is not found in my_str.
"""
index = my_str.find(sub)
if index != -1:
return my_str[index:]
else:
# raise Exception('Sub string not found!')
return my_str | 50f9ef952ee2f9319c39948505852a209e434690 | 3,655,050 |
def load_nii(src_path, as_array=False, as_numpy=False):
"""
Load a brain from a nifti file
:param str src_path: The path to the nifty file on the filesystem
:param bool as_array: Whether to convert the brain to a numpy array of
keep it as nifty object
:param bool as_numpy: Whether to convert the image to a numpy array in
memory (rather than a memmap)
:return: The loaded brain (format depends on the above flag)
"""
src_path = str(src_path)
nii_img = nib.load(src_path)
if as_array:
image = nii_img.get_data()
if as_numpy:
image = np.array(image)
return image
else:
return nii_img | a6f34c9164476245a5e9754730bfe748fbe80f5e | 3,655,051 |
def case34_3ph():
"""
Create the IEEE 34 bus from IEEE PES Test Feeders:
"https://site.ieee.org/pes-testfeeders/resources/”.
OUTPUT:
**net** - The pandapower format network.
"""
net = pp.create_empty_network()
# Linedata
# CF-300
line_data = {'c_nf_per_km': 3.8250977, 'r_ohm_per_km': 0.69599766,
'x_ohm_per_km': 0.5177677,
'c0_nf_per_km': 1.86976748, 'r0_ohm_per_km': 1.08727498,
'x0_ohm_per_km': 1.47374703,
'max_i_ka': 0.23, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-300', element='line')
# CF-301
line_data = {'c_nf_per_km': 3.66884364, 'r_ohm_per_km': 1.05015841,
'x_ohm_per_km': 0.52265586,
'c0_nf_per_km': 1.82231544, 'r0_ohm_per_km': 1.48350255,
'x0_ohm_per_km': 1.60203942,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-301', element='line')
# CF-302
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-302', element='line')
# CF-303
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-303', element='line')
# CF-304
line_data = {'c_nf_per_km': 0.90382554, 'r_ohm_per_km': 0.39802955,
'x_ohm_per_km': 0.29436416,
'c0_nf_per_km': 0.90382554, 'r0_ohm_per_km': 0.39802955,
'x0_ohm_per_km': 0.29436416,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-304', element='line')
# Busses
bus0 = pp.create_bus(net, name='Bus 0', vn_kv=24.9, type='n', zone='34_BUS')
bus_800 = pp.create_bus(net, name='Bus 800', vn_kv=24.9, type='n', zone='34_BUS')
bus_802 = pp.create_bus(net, name='Bus 802', vn_kv=24.9, type='n', zone='34_BUS')
bus_806 = pp.create_bus(net, name='Bus 806', vn_kv=24.9, type='n', zone='34_BUS')
bus_808 = pp.create_bus(net, name='Bus 808', vn_kv=24.9, type='n', zone='34_BUS')
bus_810 = pp.create_bus(net, name='Bus 810', vn_kv=24.9, type='n', zone='34_BUS')
bus_812 = pp.create_bus(net, name='Bus 812', vn_kv=24.9, type='n', zone='34_BUS')
bus_814 = pp.create_bus(net, name='Bus 814', vn_kv=24.9, type='n', zone='34_BUS')
bus_850 = pp.create_bus(net, name='Bus 850', vn_kv=24.9, type='n', zone='34_BUS')
bus_816 = pp.create_bus(net, name='Bus 816', vn_kv=24.9, type='n', zone='34_BUS')
bus_818 = pp.create_bus(net, name='Bus 818', vn_kv=24.9, type='n', zone='34_BUS')
bus_820 = pp.create_bus(net, name='Bus 820', vn_kv=24.9, type='n', zone='34_BUS')
bus_822 = pp.create_bus(net, name='Bus 822', vn_kv=24.9, type='n', zone='34_BUS')
bus_824 = pp.create_bus(net, name='Bus 824', vn_kv=24.9, type='n', zone='34_BUS')
bus_826 = pp.create_bus(net, name='Bus 826', vn_kv=24.9, type='n', zone='34_BUS')
bus_828 = pp.create_bus(net, name='Bus 828', vn_kv=24.9, type='n', zone='34_BUS')
bus_830 = pp.create_bus(net, name='Bus 830', vn_kv=24.9, type='n', zone='34_BUS')
bus_854 = pp.create_bus(net, name='Bus 854', vn_kv=24.9, type='n', zone='34_BUS')
bus_852 = pp.create_bus(net, name='Bus 852', vn_kv=24.9, type='n', zone='34_BUS')
bus_832 = pp.create_bus(net, name='Bus 832', vn_kv=24.9, type='n', zone='34_BUS')
bus_858 = pp.create_bus(net, name='Bus 858', vn_kv=24.9, type='n', zone='34_BUS')
bus_834 = pp.create_bus(net, name='Bus 834', vn_kv=24.9, type='n', zone='34_BUS')
bus_842 = pp.create_bus(net, name='Bus 842', vn_kv=24.9, type='n', zone='34_BUS')
bus_844 = pp.create_bus(net, name='Bus 844', vn_kv=24.9, type='n', zone='34_BUS')
bus_846 = pp.create_bus(net, name='Bus 846', vn_kv=24.9, type='n', zone='34_BUS')
bus_848 = pp.create_bus(net, name='Bus 848', vn_kv=24.9, type='n', zone='34_BUS')
bus_860 = pp.create_bus(net, name='Bus 860', vn_kv=24.9, type='n', zone='34_BUS')
bus_836 = pp.create_bus(net, name='Bus 836', vn_kv=24.9, type='n', zone='34_BUS')
bus_840 = pp.create_bus(net, name='Bus 840', vn_kv=24.9, type='n', zone='34_BUS')
bus_862 = pp.create_bus(net, name='Bus 862', vn_kv=24.9, type='n', zone='34_BUS')
bus_838 = pp.create_bus(net, name='Bus 838', vn_kv=24.9, type='n', zone='34_BUS')
bus_864 = pp.create_bus(net, name='Bus 864', vn_kv=24.9, type='n', zone='34_BUS')
bus_888 = pp.create_bus(net, name='Bus 888', vn_kv=4.16, type='n', zone='34_BUS')
bus_890 = pp.create_bus(net, name='Bus 890', vn_kv=4.16, type='n', zone='34_BUS')
bus_856 = pp.create_bus(net, name='Bus 856', vn_kv=24.9, type='n', zone='34_BUS')
# Lines
pp.create_line(net, bus_800, bus_802, length_km=0.786384, std_type='CF-300',
name='Line 0')
pp.create_line(net, bus_802, bus_806, length_km=0.527304, std_type='CF-300',
name='Line 1')
pp.create_line(net, bus_806, bus_808, length_km=9.823704, std_type='CF-300',
name='Line 2')
pp.create_line(net, bus_808, bus_810, length_km=1.769059, std_type='CF-303',
name='Line 3')
pp.create_line(net, bus_808, bus_812, length_km=11.43000, std_type='CF-300',
name='Line 4')
pp.create_line(net, bus_812, bus_814, length_km=9.061704, std_type='CF-300',
name='Line 5')
# pp.create_line(net, bus_814, bus_850, length_km=0.003048, std_type='CF-301',
# name='Line 6')
pp.create_line(net, bus_816, bus_818, length_km=0.521208, std_type='CF-302',
name='Line 7')
pp.create_line(net, bus_816, bus_824, length_km=3.112008, std_type='CF-301',
name='Line 8')
pp.create_line(net, bus_818, bus_820, length_km=14.67612, std_type='CF-302',
name='Line 9')
pp.create_line(net, bus_820, bus_822, length_km=4.187952, std_type='CF-302',
name='Line 10')
pp.create_line(net, bus_824, bus_826, length_km=0.923544, std_type='CF-303',
name='Line 11')
pp.create_line(net, bus_824, bus_828, length_km=0.256032, std_type='CF-301',
name='Line 12')
pp.create_line(net, bus_828, bus_830, length_km=6.230112, std_type='CF-301',
name='Line 13')
pp.create_line(net, bus_830, bus_854, length_km=0.158496, std_type='CF-301',
name='Line 14')
pp.create_line(net, bus_832, bus_858, length_km=1.493520, std_type='CF-301',
name='Line 15')
pp.create_line(net, bus_834, bus_860, length_km=0.615696, std_type='CF-301',
name='Line 16')
pp.create_line(net, bus_834, bus_842, length_km=0.085344, std_type='CF-301',
name='Line 17')
pp.create_line(net, bus_836, bus_840, length_km=0.262128, std_type='CF-301',
name='Line 18')
pp.create_line(net, bus_836, bus_862, length_km=0.085344, std_type='CF-301',
name='Line 19')
pp.create_line(net, bus_842, bus_844, length_km=0.411480, std_type='CF-301',
name='Line 20')
pp.create_line(net, bus_844, bus_846, length_km=1.109472, std_type='CF-301',
name='Line 21')
pp.create_line(net, bus_846, bus_848, length_km=0.161544, std_type='CF-301',
name='Line 22')
pp.create_line(net, bus_850, bus_816, length_km=0.094488, std_type='CF-301',
name='Line 23')
# pp.create_line(net, bus_852, bus_832, length_km=0.003048, std_type='CF-301',
# name='Line 24')
pp.create_line(net, bus_854, bus_856, length_km=7.110984, std_type='CF-303',
name='Line 25')
pp.create_line(net, bus_854, bus_852, length_km=11.22578, std_type='CF-301',
name='Line 26')
pp.create_line(net, bus_858, bus_864, length_km=0.493776, std_type='CF-302',
name='Line 27')
pp.create_line(net, bus_858, bus_834, length_km=1.776984, std_type='CF-301',
name='Line 28')
pp.create_line(net, bus_860, bus_836, length_km=0.816864, std_type='CF-301',
name='Line 29')
pp.create_line(net, bus_860, bus_838, length_km=1.481328, std_type='CF-304',
name='Line 30')
pp.create_line(net, bus_888, bus_890, length_km=3.218688, std_type='CF-300',
name='Line 31')
# Substation
pp.create_transformer_from_parameters(net, bus0, bus_800, sn_mva=2.5, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=1.0, vk_percent=8.062257,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=1.0, vk0_percent=8.062257, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
tap_side='lv', tap_neutral=0, tap_max=2, tap_min=-2,
tap_step_percent=2.5, tap_pos=-2,
name='Substation')
# Regulator 1
pp.create_transformer_from_parameters(net, bus_814, bus_850, sn_mva=1, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088, vk_percent=0.357539,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=0.452171, vk0_percent=0.665505, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 1')
# Regulator 2
pp.create_transformer_from_parameters(net, bus_852, bus_832, sn_mva=1, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088, vk_percent=0.357539,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=0.452171, vk0_percent=0.665505, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 2')
# Traformer
pp.create_transformer_from_parameters(net, bus_832, bus_888, sn_mva=0.5, vn_hv_kv=24.9,
vn_lv_kv=4.16, vkr_percent=1.9, vk_percent=4.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=1.9, vk0_percent=4.5, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
name='Traformer')
# Loads
pp.create_asymmetric_load(net, bus_806, p_a_mw=0, p_b_mw=0.03, p_c_mw=0.025,
q_a_mvar=0, q_b_mvar=0.015, q_c_mvar=0.014, name='Load 806', type='wye')
pp.create_asymmetric_load(net, bus_810, p_a_mw=0, p_b_mw=0.016, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.008, q_c_mvar=0, name='Load 810', type='wye')
pp.create_asymmetric_load(net, bus_820, p_a_mw=0.034, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.017, q_b_mvar=0, q_c_mvar=0, name='Load 820', type='wye')
pp.create_asymmetric_load(net, bus_822, p_a_mw=0.135, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.07, q_b_mvar=0, q_c_mvar=0, name='Load 822', type='wye')
pp.create_asymmetric_load(net, bus_824, p_a_mw=0, p_b_mw=0.005, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.002, q_c_mvar=0, name='Load 824', type='delta')
pp.create_asymmetric_load(net, bus_826, p_a_mw=0, p_b_mw=0.04, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.02, q_c_mvar=0, name='Load 826', type='wye')
pp.create_asymmetric_load(net, bus_828, p_a_mw=0, p_b_mw=0, p_c_mw=0.004,
q_a_mvar=0, q_b_mvar=0, q_c_mvar=0.002, name='Load 828', type='wye')
pp.create_asymmetric_load(net, bus_830, p_a_mw=0.007, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.003, q_b_mvar=0, q_c_mvar=0, name='Load 830', type='wye')
pp.create_asymmetric_load(net, bus_856, p_a_mw=0, p_b_mw=0.004, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.002, q_c_mvar=0, name='Load 856', type='wye')
pp.create_asymmetric_load(net, bus_858, p_a_mw=0.007, p_b_mw=0.002, p_c_mw=0.006,
q_a_mvar=0.003, q_b_mvar=0.001, q_c_mvar=0.003, name='Load 858', type='delta')
pp.create_asymmetric_load(net, bus_864, p_a_mw=0.002, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.001, q_b_mvar=0, q_c_mvar=0, name='Load 864', type='wye')
pp.create_asymmetric_load(net, bus_834, p_a_mw=0.004, p_b_mw=0.015, p_c_mw=0.013,
q_a_mvar=0.002, q_b_mvar=0.008, q_c_mvar=0.007, name='Load 834', type='delta')
pp.create_asymmetric_load(net, bus_860, p_a_mw=0.016, p_b_mw=0.02, p_c_mw=0.11,
q_a_mvar=0.008, q_b_mvar=0.01, q_c_mvar=0.055, name='Load 860', type='delta')
pp.create_asymmetric_load(net, bus_836, p_a_mw=0.03, p_b_mw=0.01, p_c_mw=0.042,
q_a_mvar=0.015, q_b_mvar=0.006, q_c_mvar=0.022, name='Load 836', type='delta')
pp.create_asymmetric_load(net, bus_840, p_a_mw=0.018, p_b_mw=0.022, p_c_mw=0,
q_a_mvar=0.009, q_b_mvar=0.011, q_c_mvar=0, name='Load 840', type='delta')
pp.create_asymmetric_load(net, bus_838, p_a_mw=0, p_b_mw=0.028, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.014, q_c_mvar=0, name='Load 838', type='wye')
pp.create_asymmetric_load(net, bus_844, p_a_mw=0.009, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.005, q_b_mvar=0, q_c_mvar=0, name='Load 844', type='wye')
pp.create_asymmetric_load(net, bus_846, p_a_mw=0, p_b_mw=0.025, p_c_mw=0.012,
q_a_mvar=0, q_b_mvar=0.02, q_c_mvar=0.011, name='Load 846', type='wye')
pp.create_asymmetric_load(net, bus_848, p_a_mw=0, p_b_mw=0.023, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.011, q_c_mvar=0, name='Load 848', type='wye')
pp.create_asymmetric_load(net, bus_860, p_a_mw=0.02, p_b_mw=0.02, p_c_mw=0.02,
q_a_mvar=0.016, q_b_mvar=0.016, q_c_mvar=0.016, name='Load 860 spot', type='wye')
pp.create_asymmetric_load(net, bus_840, p_a_mw=0.009, p_b_mw=0.009, p_c_mw=0.009,
q_a_mvar=0.007, q_b_mvar=0.007, q_c_mvar=0.007, name='Load 840 spot', type='wye')
pp.create_asymmetric_load(net, bus_844, p_a_mw=0.135, p_b_mw=0.135, p_c_mw=0.135,
q_a_mvar=0.105, q_b_mvar=0.105, q_c_mvar=0.105, name='Load 844 spot', type='wye')
pp.create_asymmetric_load(net, bus_848, p_a_mw=0.02, p_b_mw=0.02, p_c_mw=0.02,
q_a_mvar=0.016, q_b_mvar=0.016, q_c_mvar=0.016, name='Load 848 spot', type='delta')
pp.create_asymmetric_load(net, bus_890, p_a_mw=0.15, p_b_mw=0.15, p_c_mw=0.15,
q_a_mvar=0.075, q_b_mvar=0.075, q_c_mvar=0.075, name='Load 890 spot', type='delta')
pp.create_asymmetric_load(net, bus_830, p_a_mw=0.01, p_b_mw=0.01, p_c_mw=0.025,
q_a_mvar=0.005, q_b_mvar=0.005, q_c_mvar=0.01, name='Load 830 spot', type='delta')
# External grid
pp.create_ext_grid(net, bus0, vm_pu=1.0, va_degree=0.0, s_sc_max_mva=10.0,
s_sc_min_mva=10.0, rx_max=1, rx_min=1, r0x0_max=1, x0x_max=1)
# Distributed generators
pp.create_sgen(net, bus_848, p_mw=0.66, q_mvar=0.500, name='DG 1', max_p_mw=0.66, min_p_mw=0, max_q_mvar=0.5, min_q_mvar=0)
pp.create_sgen(net, bus_890, p_mw=0.50, q_mvar=0.375, name='DG 2', max_p_mw=0.50, min_p_mw=0, max_q_mvar=0.375, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.2, type='PV', name='PV 1', max_p_mw=0.2, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_856, p_mw=0.2, type='PV', name='PV 2', max_p_mw=0.2, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.2, type='PV', name='PV 3', max_p_mw=0.2, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.3, type='WP', name='WP 1', max_p_mw=0.3, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_826, p_mw=0.3, type='WP', name='WP 2', max_p_mw=0.3, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.3, type='WP', name='WP 3', max_p_mw=0.3, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
# Shunt capacity bank
pp.create_shunt(net, bus_840, q_mvar=-0.12, name='SCB 1', step=4, max_step=4)
pp.create_shunt(net, bus_864, q_mvar=-0.12, name='SCB 2', step=4, max_step=4)
# storage
pp.create_storage(net, bus_810, p_mw=0.2, max_e_mwh=1.0, sn_mva=1.0, soc_percent=50, min_e_mwh=0.2, name='Storage')
pp.add_zero_impedance_parameters(net)
return net | f65a01b8d52fc829b368de1414c3dd7df29bff76 | 3,655,052 |
from datetime import datetime
from dateutil import tz
def generate_daily_stats():
"""
Generates dummy daily stats for one year
"""
times = [1577836800 + (i * 86400) for i in range(0,366)]
stats_arr = [[]]
for time in times:
vals = [uniform(0,100) for i in range(843)]
stats_arr[0].append({
'min': np.min(vals),
'max': np.max(vals),
'mean': np.mean(vals),
'cnt': 843,
'std': np.std(vals),
'time': int(time),
'iso_time': datetime.utcfromtimestamp(int(time)).replace(tzinfo=tz('UTC')).strftime('%Y-%m-%dT%H:%M:%S%z')
})
clim_stats = {datetime.utcfromtimestamp(result['time']).month: result for result in stats_arr[0]}
return stats_arr, clim_stats | fc1e172ed0eb3bc9711a1b9e38668d46e3939f9b | 3,655,053 |
def create_mask(imsize: tuple, bbox: tuple) -> Image:
"""
Args:
imsize: (w, h)
bboxes: (x0, y0, x1, y1)
"""
mask = Image.new("L", imsize)
draw = ImageDraw.Draw(mask)
draw.rectangle(bbox, fill=255)
return mask | 5064ab08e27725211796967ca26e10760b2ec45f | 3,655,054 |
def build_resolved_spec(api, spec_lookup, cache, force_build, spec, version,
ecosystem_hash):
"""Builds a resolved spec at a specific version, then uploads it.
Args:
* api - The ThirdPartyPackagesNGApi's `self.m` module collection.
* spec_lookup ((package_name, platform) -> ResolvedSpec) - A function to
lookup (possibly cached) ResolvedSpec's for things like dependencies and
tools.
* cache (dict) - A map of (package_name, version, platform) -> CIPDSpec.
The `build_resolved_spec` function fully manages the content of this
dictionary.
* force_build (bool) - If True, don't consult CIPD server to see if the
package is already built. This also disables uploading the source and
built results, to avoid attempting to upload a duplicately-tagged package.
* spec (ResolvedSpec) - The resolved spec to build.
* version (str) - The symver (or 'latest') version of the package to build.
* ecosystem_hash(str) - If specified, tells 3pp hash used for this build.
Returns the CIPDSpec of the built package; If the package already existed on
the remote server, it will return the CIPDSpec immediately (without attempting
to build anything).
"""
keys = [(spec.cipd_pkg_name, version, spec.platform)]
if keys[0] in cache:
return cache[keys[0]]
def set_cache(spec):
for k in keys:
cache[k] = spec
return spec
with api.step.nest('building %s' % (spec.cipd_pkg_name,)):
env = {
'_3PP_PLATFORM': spec.platform,
'_3PP_TOOL_PLATFORM': spec.tool_platform,
'_3PP_CIPD_PACKAGE_NAME': spec.cipd_pkg_name,
# CIPD uses 'mac' instead of 'darwin' for historical reasons.
'GOOS': spec.platform.split('-')[0].replace('mac', 'darwin'),
# CIPD encodes the GOARCH/GOARM pair of ('arm', '6') as 'armv6l'.
# Since GOARCH=6 is the default, we don't need to specify it.
'GOARCH': spec.platform.split('-')[1].replace('armv6l', 'arm'),
}
if spec.platform.startswith('mac-'):
if spec.platform == 'mac-arm64':
# ARM64 support is added in macOS 11.
env['MACOSX_DEPLOYMENT_TARGET'] = '11.0'
# Mac builds don't use Docker/Dockcross, so we handle cross-build
# setup here. Setting CCC_OVERRIDE_OPTIONS passes the target to
# Clang globally, so we don't need to plumb it through each individual
# install script. We use '^' to indicate this option is inserted at
# the beginning of the compiler options list -- this gives the ability
# to override it later if needed.
if resolved_spec.platform_for_host(api) != spec.platform:
env['CROSS_TRIPLE'] = 'aarch64-apple-darwin'
env['CCC_OVERRIDE_OPTIONS'] = '^--target=arm64-apple-macos'
else:
# Make sure to clear these options if not cross-compiling, since
# we may be switching back and forth between building host tools
# and target-platform binaries.
env.pop('CROSS_TRIPLE', None)
env.pop('CCC_OVERRIDE_OPTIONS', None)
else:
env['MACOSX_DEPLOYMENT_TARGET'] = '10.10'
if spec.create_pb.source.patch_version:
env['_3PP_PATCH_VERSION'] = spec.create_pb.source.patch_version
with api.context(env=env):
# Resolve 'latest' versions. Done inside the env because 'script' based
# sources need the $_3PP* envvars.
is_latest = version == 'latest'
git_hash = ''
if is_latest:
version, git_hash = source.resolve_latest(api, spec)
keys.append((spec.cipd_pkg_name, version, spec.platform))
if keys[-1] in cache:
return set_cache(cache[keys[-1]])
cipd_spec = spec.cipd_spec(version)
# See if the specific version is uploaded
if force_build or not cipd_spec.exists_in_cipd():
# Otherwise, build it
_build_impl(
api, cipd_spec, is_latest, spec_lookup, force_build,
(lambda spec, version: build_resolved_spec(
api, spec_lookup, cache, force_build, spec, version,
ecosystem_hash)),
spec, version, git_hash, ecosystem_hash)
return set_cache(cipd_spec) | 789cb1f0492b73af763778d35ed095f7d0a4799c | 3,655,055 |
def set_diff(seq0, seq1):
"""Return the set difference between 2 sequences as a list."""
return list(set(seq0) - set(seq1)) | ff10464acc65b60e9355e8971c45fbca8025fda6 | 3,655,056 |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode):
"""Loads a data file into a list of input features."""
'''
output_mode: classification or regression
'''
if (label_list != None):
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert(len(input_ids) == max_seq_length)
assert(len(input_mask) == max_seq_length)
assert(len(segment_ids) == max_seq_length)
if (label_list != None):
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
else:
label_id = None
features.append(
InputFeatures(tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features | 0d043a98e1a2e2159c0b0653266d65788284fa39 | 3,655,057 |
from .parameter import ModelParameters
import multiprocessing as mp
from .neural import test_dataset
def test_set_wrapper():
"""
Wrapper function to compute test datasets of fixed widths using multiprocessing.
Widths are defined in the parameter file.
Ouputs are in Neural/ folder
"""
a = ModelParameters()
widths = a.test_sigma_widths
p = mp.Pool() # Do all 5 in parallel
p.imap_unordered(test_dataset,widths)
return None | fe6bf41681f1a1344cffcc15d41e77b3f46c3c46 | 3,655,058 |
from typing import List
def get_unapproved_csr_names(kubeconfig_path: str) -> List[str]:
"""
Returns a list of names of all CertificateSigningRequest resources which
are unapproved.
May raise SubprocessError
"""
return [
csr["metadata"]["name"]
for csr in oc_list(kubeconfig_path, "csr")
if not _has_condition(resource=csr, type="Approved", status="True")
] | 287674bb774982b0fc7ab17edeff9c12ea92b7cd | 3,655,059 |
import string
def function(n, m, f):
"""Assumes that n = m = 1. The argument f is a Python function that takesas input an n-bit string alpha and
returns as output an m-bit string f(alpha). See deutschTest for examples of f. This function returns the (n +
m)-qbit gate F that corresponds to f. """
F = np.zeros((2**(n+m),2**(n+m)), dtype=one.dtype)
for a in range(0,2**n):
for b in range(0,2**m):
alpha = string(n,a)
beta = string(m,b)
beta_new = addition(beta,f(alpha))
row_bits = alpha + beta_new
col_bits = alpha + beta
F[integer(row_bits)][integer(col_bits)] = 1 + 0j
return F | f77fe869e278ede37f477b2479012861b0bb5638 | 3,655,060 |
def get_life_stages(verbose: bool = False) -> pd.DataFrame:
"""Get table of life stages.
Parameters
----------
verbose : bool
If True, prints the SQL statement used to query the database.
Returns
-------
pandas DataFrame
"""
return __basic_query(LifeStage, verbose=verbose) | 316bbfedd4d550c5bac0bdaf9d1e04bbea6a378e | 3,655,062 |
def bytes_to(value_in_bytes: float, rnd: int | None = ...) -> str:
"""
:param value_in_bytes: the value in bytes to convert
:param rnd: number of digits to round to
:return: formatted string
"""
sizes = ["bytes", "KB", "MB", "GB", "TB"]
now = int()
while value_in_bytes > 1024:
value_in_bytes /= 1024
now += 1
if rnd is not ...:
value_in_bytes = round(value_in_bytes, rnd)
return f"{value_in_bytes} {sizes[now]}" | fc2bf917fe7520780b84c6ad3ce572e0ed0341ae | 3,655,063 |
import json
def lambda_handler(event, context):
"""Main Function"""
page_iterator = PAGINATOR.paginate(**OPERATION_PARAMETERS)
for page in page_iterator:
functions = page['Functions']
for function in functions:
funct = {
"Name": function['FunctionName'],
"Version": function['Version'],
"CodeSize": function['CodeSize']
}
funct = json.dumps(funct)
ALL_FUNCTIONS.add(funct)
total = 0
for i in sorted(ALL_FUNCTIONS):
i = json.loads(i)
print("{function:48}:{version:8} {size:,.2f}".format(
function=i['Name'], version=i['Version'], size=i['CodeSize']))
total += i['CodeSize']
# Convert bytes to MB
total = total / 1024 / 1024
data = "Lambda code storage: {}".format(str(total))
print(data)
return {
'statusCode': 200,
'body': json.dumps(data)
} | 3ff4796b5adc4de1c91ab6a72ea8f5426fcbf7c9 | 3,655,064 |
def _find_first_print(body):
""" This function finds the first print of something """
for (i, inst) in enumerate(body):
if isinstance(inst, ir.Print):
return i
return -1 | 863490b6fdca04fd093c72c8fc098b90dde6c946 | 3,655,065 |
def list_mix(set_key, encoding, in_set = ""):
""" Returns: Seeded Random Shuffle of Input Set by Input Key. """
if in_set == "": char_set = list(encoding["set"])
else: char_set = in_set
seed(set_key)
return sample(char_set, len(char_set)) | cd672d19e252bb8f6bf717cddfc9cc1b89e3ba38 | 3,655,066 |
def leslie(f, s):
"""Create a Leslie matrix.
Given the length n array of fecundity coefficients ``f`` and the length n-1
array of survival coefficients ``s``, return the associated Leslie matrix.
Args:
f (cupy.ndarray): The "fecundity" coefficients.
s (cupy.ndarray): The "survival" coefficients, has to be 1-D. The
length of ``s`` must be one less than the length of ``f``, and it
must be at least 1.
Returns:
cupy.ndarray: The array is zero except for the first row, which is
``f``, and the first sub-diagonal, which is ``s``. The data-type of
the array will be the data-type of ``f[0]+s[0]``.
.. seealso:: :func:`scipy.linalg.leslie`
"""
if f.ndim != 1:
raise ValueError('Incorrect shape for f. f must be 1D')
if s.ndim != 1:
raise ValueError('Incorrect shape for s. s must be 1D')
n = f.size
if n != s.size + 1:
raise ValueError('Length of s must be one less than length of f')
if s.size == 0:
raise ValueError('The length of s must be at least 1.')
a = cupy.zeros((n, n), dtype=cupy.result_type(f, s))
a[0] = f
cupy.fill_diagonal(a[1:], s)
return a | 65b37856e5e4db4d89574a08b91424f75ad424d1 | 3,655,067 |
from datetime import datetime
def get_clustermgtd_heartbeat(clustermgtd_heartbeat_file_path):
"""Get clustermgtd's last heartbeat."""
# Use subprocess based method to read shared file to prevent hanging when NFS is down
# Do not copy to local. Different users need to access the file, but file should be writable by root only
# Only use last line of output to avoid taking unexpected output in stdout
heartbeat = (
check_command_output(
f"cat {clustermgtd_heartbeat_file_path}",
timeout=DEFAULT_COMMAND_TIMEOUT,
shell=True, # nosec
)
.splitlines()[-1]
.strip()
)
# Note: heartbeat must be written with datetime.strftime to convert localized datetime into str
# datetime.strptime will not work with str(datetime)
# Example timestamp written to heartbeat file: 2020-07-30 19:34:02.613338+00:00
return datetime.strptime(heartbeat, TIMESTAMP_FORMAT) | 307baba7c399e8c1277622c5cd2bd2f613ba1974 | 3,655,070 |
def logout():
"""
Logs out user by deleting token cookie and redirecting to login page
"""
APP.logger.info('Logging out.')
resp = make_response(redirect(url_for('login_page',
_external=True,
_scheme=APP.config['SCHEME'])))
resp.delete_cookie(APP.config['TOKEN_NAME'])
return resp | ab10628ec8e9b7a70edfbdc2f511214df7fcdfc9 | 3,655,071 |
def ATR(df, n, high_column='High', low_column='Low', close_column='Close',
join=None, dropna=False, dtype=None):
"""
Average True Range
"""
high_series = df[high_column]
low_series = df[low_column]
close_prev_series = df[close_column].shift(1)
tr = np.max((
(high_series.values - low_series.values),
np.abs(high_series.values - close_prev_series.values),
np.abs(low_series.values - close_prev_series.values),
), 0)
tr = pd.Series(tr, name=type(join) is list and join[0] or join)
if len(tr) > n:
tr[n] = tr[1:n+1].mean()
nm1 = n - 1
for i in range(n+1, len(tr)):
tr[i] = (tr[i-1] * nm1 + tr[i]) / n
tr[:n] = np.nan
return out(df, tr, bool(join), dropna, dtype) | f3835e289f23c0095d9fd0563a26d4ee4e5423cf | 3,655,073 |
def get_full_history(sender, dialog_id):
"""Download the full history for the selected dialog"""
page = 0
limit = 100
history = []
print('Downloading messages...')
while True:
sleep(REQUEST_DELAY)
offset = page * limit
try:
history[0:0] = sender.history(dialog_id, limit, offset)
print('.', end=' ', flush=True)
except IllegalResponseException:
print('\n{} messages found in selected dialog'.format(len(history)))
break
page += 1
print('')
return history | 1316ebfd592404243392fdbb9e538cf818e3c6ac | 3,655,075 |
def predict_transposition_cost(shape, perm, coefs=None):
"""
Given a shape and a permutation, predicts the cost of the
transposition.
:param shape: shape
:param perm: permutation
:param coefs: trained coefficients or None to get
the default ones
:return: dictionary of features
"""
if coefs is None:
coefs = _ml_transpose_coefs
feat = compute_transposition_features(shape, perm)
res = 0
for k, v in feat.items():
res += v * coefs[k]
return max(0., res / 1000) | 502e14cfcc38357e7f915985a8fd15fb5c798bd4 | 3,655,076 |
def checkBuildAMR(parfile,cellfile,**kwargs):
"""
Purpose
-------
Check that BuildAMRfromParticles.f90 builds the cells around the particles
created by mkClouds.f90 in the right places.
Only cloud cells are plotted. If you want to include the field cells, in
BuildAMRfromParticles.f90's subroutine CountCells() remove the part
" .and. CurrentCell%phase.eq.1" from the if statement (and recompile).
Keywords
--------
Robs: Plot only within Robs kpc of center
Usage
-----
>>> checkBuildAMR('clouds_par.dat','clouds_cell.dat',Robs=.1)
"""
def _iltR(x,y,z,R):
return np.where((x<=R) & (y<=R) & (z<=R))
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111, projection='3d')
x,y,z = np.loadtxt(parfile,unpack=True)
print('n_par =', len(x))
if len(x) > 100000: print('WARNING: This is going to be slow!')
# ax.set_alpha(.01)
if 'Robs' in kwargs:
Robs = kwargs['Robs']
ax.set_xlim([-Robs,Robs])
ax.set_ylim([-Robs,Robs])
ax.set_zlim([-Robs,Robs])
ind = _iltR(x,y,z,Robs)
ax.scatter(x[ind],y[ind],z[ind],s=1,label='Particles')
else:
ax.scatter(x,y,z,s=1,label='Particles')
x,y,z = np.loadtxt(cellfile,unpack=True)
print('n_cell =', len(x))
if 'Robs' in kwargs:
ind = _iltR(x,y,z,Robs)
ax.scatter(x[ind],y[ind],z[ind],s=1,label='Cells (excluding ICM cells)')
else:
ax.scatter(x,y,z,s=1,label='Cells (excluding ICM cells)')
ax.legend() | 7497181ab92b723f68cb4448c0d80bfd0f21c0a2 | 3,655,077 |
def dist(df):
"""
Calculate Euclidean distance on a dataframe.
Input columns are arranged as x0, x1, y0, y1.
"""
return np.sqrt((df.iloc[:,0] - df.iloc[:,2])**2 + (df.iloc[:,1] - df.iloc[:,3])**2) | 87cc27c655bce16bc42b0f715be6462af7535d19 | 3,655,078 |
def B_Calc(T, n=2):
"""
Calculate B (Constant in the mass transfer term).
:param T: cell operation temperature [K]
:type T : float
:param n: number of moles of electrons transferred in the balanced equation occurring in the fuel cell
:type n: int
:return: B as float
"""
try:
return (R * T) / (n * F)
except (TypeError, ZeroDivisionError):
return None | 1f1e8fb60797787c01bfd916b2989c1a640d0a08 | 3,655,079 |
import re
def get_page_likes(response):
"""Scan a page and create a dictionary of the image filenames
and displayed like count for each image. Return the
dictionary."""
# find all flowtow divs
flowtows = response.html.find_all('div', class_='flowtow')
result = dict()
for div in flowtows:
# get the filename from the form hidden input
input = div.find("input", attrs={'name': "filename"})
filename = input['value']
# find the likes element
likesel = div.find(class_='likes')
# grab the integer from this element
m = re.search('\d+', likesel.text)
if m:
likes = int(m.group())
else:
likes = 0
result[filename] = likes
return result | e956e54d18d6540d1a8fd07250a5c758b696bcc5 | 3,655,080 |
def firm(K, eta, alpha, delta):
"""Calculate return, wage and aggregate production.
r = eta * K^(alpha-1) * L^(1-alpha) + (1-delta)
w = eta * K^(alpha) * L^(-alpha)
Y = eta * K^(alpha) * L^(1-alpha) + (1-delta) * K
Args:
K: aggregate capital,
eta: TFP value,
alpha: output elasticity,
delta: depreciation value.
Returns:
return: return (marginal product of capital),
wage: wage (marginal product of labor),
Y: aggregate production.
"""
L = tf.ones_like(K)
r = alpha * eta * K**(alpha - 1) * L**(1 - alpha) + (1 - delta)
w = (1 - alpha) * eta * K**alpha * L**(-alpha)
Y = eta * K**alpha * L**(1 - alpha) + (1 - delta) * K
return r, w, Y | 29be01360e23555d30cccca33cd556f0bd406088 | 3,655,081 |
def get_col(arr, col_name):
""" returns the column from a multi-dimensional array """
return map(lambda x : x[col_name], arr) | faf36e88c73a1f03efce94fd74a1d6b378f74bdb | 3,655,082 |
import urllib
import json
def get_articles(id):
"""function that process the articles and a list of articles objects
"""
get_articles_url = articles_url.format(id, api_key)
with urllib.request.urlopen(get_articles_url) as url:
news_article_results = json.loads(url.read())
news_article_object = None
if news_article_results['articles']:
news_article_object = process_news_source(news_article_results['articles'])
return news_article_object | 503b8260b6aeaaa526837f091c7e94687bfdd0de | 3,655,083 |
def check_slot_exist(start_time,end_time):
"""
Description:
check_slot_exists is responsible for checking that a slot exists
before a volunteer can create it.
Parameters:
Takes two parameters of type datetime:
start_time:datetime
end_time:datetime
return:
returns Boolean type:
True or False:Boolean
"""
slot_data = read_from_local_data_file.read_from_file()
slots = slot_data['items']
for slot in slots:
end_time_slot = slot["end"]["dateTime"]
start_time_slot = slot["start"]["dateTime"]
if start_time >= start_time_slot.split("+",1)[0] and start_time <= end_time_slot.split("+",1)[0]:
if end_time >= start_time_slot.split("+",1)[0] and end_time <= end_time_slot.split("+",1)[0]:
return True
return False | 1492cfb6b118d491166fc1abe202ca84a7fd26ed | 3,655,084 |
from datetime import datetime
def format_relative_date(date):
"""Takes a datetime object and returns the date formatted as a string e.g. "3 minutes ago", like the real site.
This is based roughly on George Edison's code from StackApps:
http://stackapps.com/questions/1009/how-to-format-time-since-xxx-e-g-4-minutes-ago-similar-to-stack-exchange-site/1018#1018"""
now = datetime.datetime.now()
diff = (now - date).seconds
# Anti-repetition! These simplify the code somewhat.
plural = lambda d: 's' if d != 1 else ''
frmt = lambda d: (diff / float(d), plural(diff / float(d)))
if diff < 60:
return '%d second%s ago' % frmt(1)
elif diff < 3600:
return '%d minute%s ago' % frmt(60)
elif diff < 86400:
return '%d hour%s ago' % frmt(3600)
elif diff < 172800:
return 'yesterday'
else:
return date.strftime('M j / y - H:i') | 5dc8614fb3007ee90032cb0e2baa0b2fc910f275 | 3,655,085 |
def Jacobian_rkhs_gaussian(x, vf_dict, vectorize=False):
"""analytical Jacobian for RKHS vector field functions with Gaussian kernel.
Arguments
---------
x: :class:`~numpy.ndarray`
Coordinates where the Jacobian is evaluated.
vf_dict: dict
A dictionary containing RKHS vector field control points, Gaussian bandwidth,
and RKHS coefficients.
Essential keys: 'X_ctrl', 'beta', 'C'
Returns
-------
J: :class:`~numpy.ndarray`
Jacobian matrices stored as d-by-d-by-n numpy arrays evaluated at x.
d is the number of dimensions and n the number of coordinates in x.
"""
if x.ndim == 1:
K, D = con_K(x[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J = (vf_dict['C'].T * K) @ D[0].T
elif not vectorize:
n, d = x.shape
J = np.zeros((d, d, n))
for i, xi in enumerate(x):
K, D = con_K(xi[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J[:, :, i] = (vf_dict['C'].T * K) @ D[0].T
else:
K, D = con_K(x, vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
if K.ndim == 1: K = K[None, :]
J = np.einsum('nm, mi, njm -> ijn', K, vf_dict['C'], D)
return -2 * vf_dict['beta'] * J | 0f45147587c02dbcbf879a3f8ed26d4d9eeaea2e | 3,655,086 |
def load_pickle(file):
"""Gets the file from the cPickle file."""
f = open(file, 'r')
d = cPickle.load(f)
f.close()
logger = get_logger()
logger.info("file %s loaded" % file)
return d | 68caaa36fde8adaad3da60b567488c8e8df1bd69 | 3,655,088 |
def test_register_op_with_extending_steps_works():
"""
Calling the custom pipeline operation with an argument should yield the same
arguments passed back as a result
:return:
"""
test_pipe = Pipeline(STEPS, **PIPELINE_DEF_KWARGS)
def custom_op(doc, context=None, settings=None, **kwargs):
return settings
custom_argument = {'argument': 1}
test_pipe.register_operation('CUSTOM_STEP', custom_op)
test_pipe.steps.append(('CUSTOM_STEP', custom_argument))
results = test_pipe(TEXT)
assert results['CUSTOM_STEP'] == custom_argument | c4de3ab07fb3a6659e413f61d77bd48057a025d0 | 3,655,089 |
from typing import Tuple
def get_dates_for_last_30_days(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the last 30 days.
The first tuple contains t-61, t-31.
The second tuple contains t-30, t.
"""
rca_start_date = end_date - timedelta(days=30)
base_end_date = rca_start_date - timedelta(days=1)
base_start_date = base_end_date - timedelta(days=30)
return (base_start_date, base_end_date), (rca_start_date, end_date) | c174f457ec46fabaf724665a322d697b541e815f | 3,655,090 |
from datetime import datetime
def get_3rd_friday():
"""获取当前月的第三个星期五"""
first_day_in_month = datetime.now().replace(day=1) # 本月第一天
# 获取当前月的所有星期5的日
fridays = [i for i in range(1, 28) if (first_day_in_month + timedelta(days=i - 1)).isoweekday() == 5]
if len(fridays) < 3:
raise Exception(f'获取当前月异常:{fridays}')
# 第三个星期五,是第几天
third_friday = fridays[2]
return datetime.now().replace(day=third_friday) | ae83cbb3648fc24940ef874d492f56e9ece56481 | 3,655,091 |
import requests
def retrieve_article_pdf_from_ads(bibcode, eprint_or_pub="PUB"):
"""
Get the PDF file for a given bibcode
"""
endpoint = f"{eprint_or_pub.upper()}_PDF"
safe_bibcode = quote(bibcode)
pdf_filename = f"{safe_bibcode}_{eprint_or_pub.lower()}.pdf"
url = f"{LINK_GATEWAY_BASE_URL}/{safe_bibcode}/{endpoint}"
r = requests.get(
url,
allow_redirects=True,
)
with open(pdf_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
return pdf_filename | fb747a25e3415531e74a980afb476d8d27cb66a6 | 3,655,092 |
import requests
def get_account_info():
"""account information"""
method = 'GET'
path = '/open/api/v2/account/info'
url = '{}{}'.format(ROOT_URL, path)
params = _sign(method, path)
response = requests.request(method, url, params=params)
return response.json() | 5e4835933935db48d5cdbde3a786cbcd6fc83c31 | 3,655,093 |
def str_product(string):
""" Calculate the product of all digits in a string """
product = 1
for i in string:
product *= int(i)
return product | c0c7442ac53aaf49760feffa7d08408d7520d9b4 | 3,655,094 |
def convolution_filter_grad_backward(inputs, base_axis=1, pad=None, stride=None,
dilation=None, group=1, channel_last=False):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
gdw = inputs[0]
dy = inputs[1]
x0 = inputs[2]
ctx = nn.get_current_context()
dfx = ConvolutionDataGrad(
ctx, base_axis, pad, stride, dilation, group, channel_last)
dfx.xshape = x0.shape
gdy = F.convolution(x0, gdw, None, base_axis, pad,
stride, dilation, group, channel_last)
gx0 = dfx(dy, gdw)
return gdy, gx0 | 5d33b4b4c96be6bc95c119e422fcda40ac698309 | 3,655,095 |
def _get_dload_scale(dload,
xyz_scale: float,
velocity_scale: float,
accel_scale: float,
force_scale: float) -> None:
"""
LOAD asssumes force
"""
if dload.Type == 'LOAD':
scale = force_scale
elif dload.Type == 'DISP':
scale = xyz_scale
elif dload.Type == 'VELO':
scale = velocity_scale
elif dload.Type == 'ACCE':
scale = accel_scale
else:
raise RuntimeError(dload)
return scale | f3a0a7d26d915ebc8231d9e4cab223f27afba2a2 | 3,655,097 |
def job_complete(job):
"""
Should be called whenever a job is completed.
This will update the Git server status and make
any additional jobs ready.
"""
job_complete_pr_status(job)
create_issue_on_fail(job)
start_canceled_on_fail(job)
ParseOutput.set_job_info(job)
ProcessCommands.process_commands(job)
job.update_badge()
all_done = job.event.set_complete_if_done()
if all_done:
event_complete(job.event)
unrunnable = job.event.get_unrunnable_jobs()
for norun in unrunnable:
logger.info("Job %s: %s will not run due to failed dependencies" % (norun.pk, norun))
job_wont_run(norun)
return all_done | 5692ea76a5ab1ac6be0a30f74a8daec4ce5bd6a0 | 3,655,098 |
def direct(sp_script_str, run_dir, nsamp, njobs,
tgt_geo, bath_geo, thy_info, charge, mult,
smin=3.779, smax=11.339, spin_method=1, ranseeds=None):
""" Write input and run output.
:param sp_script_str: submission script for single-point calculation
:type sp_script_str: str
:param run_dir: directory where all OneDMin jobs are run
:type run_dir: str
:param nsamp: number of samples to run PER OneDMin job
:type nsamp: int
:param njobs: number of OneDMin instances to run in parallel
:type njobs: int
:param tgt_geo: geometry of the target molecule
:type tgt_geo: automol geometry data structure
:param bath_geo: geometry of the bath molecule
:type bath_geo: automol geometry data structure
:param thy_info: theory info object (prog, method, basis, orb_lbl)
:type thy_info: tuple(str, str, str, str)
:param charge: charge of the target-molecule complex
:type charge: int
:param mult: multiplicity of the target-molecule complex
:type mult: int
:param smin: minimum allowed intermolecular separation
:type smin: float
:param smax: maximum allowed intermolecular separation
:type smax: float
:param spin_method: parameter for the spin method
:type spin_method: int
:param ranseed: seed-integer for the orientational sampling
:type ranseed: int
:rtype: (float, float)
"""
# Write the main input files for all runs (breaks if ranseeds not given)
input_str_lst = ()
for ranseed in ranseeds:
input_str_lst += (
onedmin_io.writer.input_file(
nsamp, smin, smax,
ranseed=ranseed, spin_method=spin_method),
)
# Write the aux inputs; same for all runs
tgt_str = automol.geom.string(tgt_geo)
bath_str = automol.geom.string(bath_geo)
elstruct_inp_str, onedmin_exe_name = _set_pot_info(thy_info, charge, mult)
aux_dct = {
'target.xyz': tgt_str,
'bath.xyz': bath_str,
'qc.mol': elstruct_inp_str,
'qc.x': sp_script_str
}
# Write the script string for submission (for all runs)
script_str = onedmin_io.writer.submission_script(
njobs, run_dir, onedmin_exe_name)
# Run the code
output_str_lst = from_parallel_input_strings(
script_str, run_dir, input_str_lst,
aux_dct=aux_dct,
input_name=INPUT_NAME,
output_names=OUTPUT_NAMES)
return input_str_lst, elstruct_inp_str, output_str_lst | d8f977e46f71a61c23a28dc1bd3a58dfec43ba9f | 3,655,099 |
def run(ceph_cluster, **kwargs) -> int:
"""
Method that executes the external test suite.
Args:
ceph_cluster The storage cluster participating in the test.
kwargs The supported keys are
config contains the test configuration
Returns:
0 - Success
1 - Failure
"""
LOG.info("Running RBD Sanity tests.")
config = kwargs["config"]
script_dir = config["script_path"]
script = config["script"]
branch = config.get("branch", "pacific")
nodes = config.get("nodes", [])
if nodes:
nodes = get_nodes_by_ids(ceph_cluster, nodes)
else:
# By default, tests would be executed on a single client node
nodes = [ceph_cluster.get_nodes(role="client")[0]]
for node in nodes:
one_time_setup(node, branch=branch)
cmd = f"cd ceph/{script_dir}; sudo bash {script}"
if script == "*":
cmd = f"cd ceph/{script_dir}; for test in $(ls); do sudo bash $test; done"
node.exec_command(cmd=cmd, check_ec=True, timeout=1200)
return 0 | a8107c35049d2edcb5d8f4f844e287ea7fcd1c81 | 3,655,101 |
import requests
import urllib
from bs4 import BeautifulSoup
def search_item(search_term, next=False, page=0, board=0):
"""function to search and return comments"""
if next == False:
page = requests.get("https://www.nairaland.com/search?q=" + urllib.parse.quote_plus(str(search_term)) + "&board="+str(board))
else:
page = requests.get("https://www.nairaland.com/search/"
+ str(search_term) + "/0/"+str(board)+"/0/1" + str(page))
soup = BeautifulSoup(page.content, 'html.parser')
comments = soup.findAll("div", {"class": "narrow"})
return comments | 7e2a72c9df82f204ac852b1c3028c6de8906594b | 3,655,102 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.