content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def list_all_connections(pg_id='root', descendants=True):
"""
Lists all connections for a given Process Group ID
Args:
pg_id (str): ID of the Process Group to retrieve Connections from
descendants (bool): True to recurse child PGs, False to not
Returns:
(list): List of ConnectionEntity objects
"""
return list_all_by_kind('connections', pg_id, descendants) | 6df326ff521f175b3ccfe4b1d2488328fe6e6213 | 3,649,976 |
def _GetTombstoneData(device, tombstone_file):
"""Retrieve the tombstone data from the device
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to retrieve
Returns:
A list of lines
"""
return device.old_interface.GetProtectedFileContents(
'/data/tombstones/' + tombstone_file) | 99322ea3d67e150f4433c713159eb7bc8069271f | 3,649,978 |
import time
def _strTogYear(v):
"""Test gYear value
@param v: the literal string
@return v
@raise ValueError: invalid value
"""
try:
time.strptime(v+"-01-01", "%Y-%m-%d")
return v
except:
raise ValueError("Invalid gYear %s" % v) | a65e04c2d3790d3d55bbc8788d6802e1aae1b78c | 3,649,979 |
def aca_full_pivoting(A, epsilon):
"""ACA with full pivoting as in the lecture
Takes in a matrix, and returns the CUR decomposition
"""
# R0 = A
Rk = A.copy()
I_list = []
J_list = []
while frobenius_norm(Rk) > epsilon*frobenius_norm(A):
i, j = np.unravel_index(np.argmax(np.abs(Rk), axis=None), Rk.shape)
I_list.append(i)
J_list.append(j)
delta = Rk[i, j]
u = Rk[:, j]
v = Rk[i, :].T / delta
Rk = Rk - np.outer(u, v)
R = A[I_list, :]
U = np.linalg.inv(A[I_list, :][:, J_list])
C = A[:, J_list]
return C, U, R | 96bcfd4b8cb560904efc4ab6cfac6473d8dafe47 | 3,649,980 |
def catch_gpu_memory_error( f ):
"""
Decorator that calls the function `f` and catches any GPU memory
error, during the execution of f.
If a memory error occurs, this decorator prints a corresponding message
and aborts the simulation (using MPI abort if needed)
"""
# Redefine the original function by calling it within a try/except
def g(*args, **kwargs):
try:
return f(*args, **kwargs)
except OutOfMemoryError as e:
handle_cuda_memory_error( e, f.__name__ )
# Decorator: return the new function
return(g) | 1201236b7d2217fcfc3fcb95905f8f4e2f89af06 | 3,649,984 |
def horizontal_tail_planform_raymer(horizontal_stabilizer, wing, l_ht,c_ht):
"""Adjusts reference area before calling generic wing planform function to compute wing planform values.
Assumptions:
None
Source:
Raymer
Inputs:
horizontal_stabilizer [SUAVE data structure]
wing [SUAVE data structure] (should be the main wing)
l_ht [m] length from wing mean aerodynamic chord (MAC) to horizontal stabilizer MAC
c_ht [-] horizontal tail coefficient (Raymer specific) .5 = Sailplane, .5 = homebuilt,
.7 = GA single engine, .8 = GA twin engine .5 = agricultural, .9 = twin turboprop,
.7 = flying boat, .7 = jet trainer, .4 = jet fighter, 1. = military cargo/bomber,
1. = jet transport
Outputs:
horizontal_stabilier.areas.reference [m^2]
Other changes to horizontal_stabilizer (see wing_planform)
Properties Used:
N/A
"""
horizontal_stabilizer.areas.reference = wing.chords.mean_aerodynamic*c_ht*wing.areas.reference/l_ht
wing_planform(horizontal_stabilizer)
return 0 | 860a020e3e2b06943df2689bd54707a051fb30b2 | 3,649,985 |
from improved_permissions.roles import ALLOW_MODE
def inherit_check(role_s, permission):
"""
Check if the role class has the following
permission in inherit mode.
"""
role = get_roleclass(role_s)
if role.inherit is True:
if role.get_inherit_mode() == ALLOW_MODE:
return True if permission in role.inherit_allow else False
return False if permission in role.inherit_deny else True
return False | 5dbaa7afee9802ea1eda4cec869dd44395faf0e5 | 3,649,986 |
import random
def giveHint(indexValue, myBoard):
"""Return a random matching card given the index of a card
and a game board"""
validMatches = []
card = myBoard[indexValue]
for c in myBoard:
if (card[0] == c[0]) and (myBoard.index(c) != indexValue):
validMatches.append(myBoard.index(c))
return random.choice(validMatches) | e578f40e7d7e2e17ddac53f9cfdc219e47c861cd | 3,649,987 |
async def make_getmatch_embed(data):
"""Generate the embed description and other components for a getmatch() command.
As with its parent, remember that this currently does not support non team-vs.
`data` is expected to be the output of `get_individual_match_data()`.
The following `dict` is returned:
```
{
"embed_description": str,
"footer": str,
"embed_color": int (as color hex),
}
```
"""
scores = data["individual_scores"]
team_1_score_strings = []
team_2_score_strings = []
for individual_score in scores:
#at first i thought doing this would make the actual score_string more readable
#now i'm not very sure
player_name = individual_score["user_name"]
score_val = individual_score["score"]
maxcombo = individual_score["combo"]
accuracy = individual_score["accuracy"]
count_300 = individual_score["hits"]["300_count"]
count_100 = individual_score["hits"]["100_count"]
count_50 = individual_score["hits"]["50_count"]
count_miss = individual_score["hits"]["miss_count"]
accuracy = '{:.2%}'.format(accuracy)
score_val = "{:,}".format(score_val)
maxcombo = "{:,}".format(maxcombo)
score_string = (f'**{player_name}** - {score_val} ({maxcombo}x) ({accuracy} - {count_300}/{count_100}/{count_50}/{count_miss})')
team_1_score_strings.append(score_string) if individual_score["team"] == "1" else team_2_score_strings.append(score_string)
team_1_score_string = "\n".join(team_1_score_strings)
team_2_score_string = "\n".join(team_2_score_strings)
winner_string = {
"Blue": f"Blue team wins by {'{:,}'.format(data['score_difference'])}!",
"Red": f"Red team wins by {'{:,}'.format(data['score_difference'])}!",
"Tie": "Tie!"}
winner_color = {
"Blue": 0x0000FF,
"Red": 0xFF0000,
"Tie": 0x808080}
embed_desc = (
f'**{winner_string[data["winner"]]}**\n\n'
f'__Blue Team__ ({"{:,}".format(data["team_1_score"])} points, {"{:,}".format(data["team_1_score_avg"])} average)\n'
f'{team_1_score_string}\n\n'
f'__Red Team__ ({"{:,}".format(data["team_2_score"])} points, {"{:,}".format(data["team_2_score_avg"])} average)\n'
f'{team_2_score_string}')
#footer stuff
scoring_types = {
'0': 'Score',
'1': 'Accuracy',
'2': 'Combo',
'3': 'Score v2'}
team_types = {
'0': 'Head-to-head',
'1': 'Tag Co-op',
'2': 'Team VS',
'3': 'Tag Team VS'}
play_modes = {
'0': 'osu!',
'1': 'Taiko',
'2': 'CTB',
'3': 'osu!mania'}
embed_footer = (f'Played at {data["start_time"]} UTC | '
f'Win condition: {scoring_types[data["scoring_type"]]} | '
f'{team_types[data["team_type"]]} | '
f'{play_modes[data["play_mode"]]}')
final = {
"embed_description": embed_desc,
"footer": embed_footer,
"embed_color": winner_color[data["winner"]],
}
return final | c37e0d6ee948259e4ad898d3cafb8e13b6452d80 | 3,649,988 |
def allreduceCommunicate_op(node, comm):
"""Make a new instance of AllReduceCommunicateOp and call the instance.
Parameters:
----
node : Node
The Node to do allreduce
Returns:
----
A new Node instance created by Op.
"""
return AllReduceCommunicateOp(node, comm) | 5096a9014ae349e39c2d59de77845221ffdddb10 | 3,649,989 |
def reduce_fn(state, values):
"""tf.data.Dataset-friendly implementation of mean and variance."""
k, n, ex, ex2 = state
# If this is the first iteration, we pick the first value to be 'k',
# which helps with precision - we assume that k is close to an average
# value and calculate mean and variance with respect to that.
k = tf.cond(tf.equal(n, 0), lambda: values[0], lambda: k)
sum_v = tf.reduce_sum(values, axis=0)
sum_v2 = tf.reduce_sum(tf.square(values), axis=0)
ones = tf.ones_like(values, dtype=tf.int32)
batch_size = tf.reduce_sum(ones, axis=0)
batch_size_f = tf.cast(batch_size, tf.float32)
ex = 0 + sum_v - tf.multiply(batch_size_f, k)
ex2 = 0 + sum_v2 + tf.multiply(
batch_size_f, (tf.square(k) -
tf.multiply(tf.multiply(2.0, k), sum_v)))
return (k, n + batch_size, ex, ex2) | 473bb8cae3e898f3a166250fbdb805ad55aaaea9 | 3,649,990 |
def winged_edge(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_prev, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mp(coedge_to_prev, coedge_to_node, coedge_to_mate, edges)
return _create_graph(face_features, edge_features, coedge_features, edges) | 8f023d4e6133b044c435737e49ae768c83f089ca | 3,649,991 |
def dollar_format(dollars):
"""
Args:
dollars (any): A dollar value (Any value that can be turned into a float can be used - int, Decimal, str, etc.)
Returns:
str: The formatted string
"""
decimal_dollars = Decimal(dollars)
if decimal_dollars < 0:
return "-${:,.2f}".format(-decimal_dollars)
else:
return "${:,.2f}".format(decimal_dollars) | d9f8a9195a92af39df9754e14bae723060c335b1 | 3,649,992 |
from typing import Callable
from typing import Any
def check_aea_project(
f: Callable, check_aea_version: bool = True, check_finger_prints: bool = False
) -> Callable:
"""
Check the consistency of the project as a decorator.
- try to load agent configuration file
- iterate over all the agent packages and check for consistency.
"""
def wrapper(*args: Any, **kwargs: Any) -> Callable:
_check_aea_project(
args,
check_aea_version=check_aea_version,
check_finger_prints=check_finger_prints,
)
return f(*args, **kwargs)
return update_wrapper(wrapper, f) | 31d909116613be819b61be16160bd72227462853 | 3,649,993 |
def find_closest_cross(wire1_path, wire2_path):
"""
Compare the coordinates of two wire paths to find the crossing point
closest (Manhattan Distance) to the origin (0,0).
Returns a list of crossing points, the closest crossing point and its distance to the start point
"""
best_result = -1
crossing_list = []
for i in range(len(wire1_path)):
if wire1_path[i] in wire2_path and wire1_path[i] != [0,0]:
test_result = abs(wire1_path[i][0]) + abs(wire1_path[i][1])
crossing_list.append(wire1_path[i])
if best_result == -1:
best_cross = wire1_path[i][:]
best_result = test_result
elif test_result < best_result:
best_cross = wire1_path[i][:]
best_result = test_result
return crossing_list, best_cross, best_result | c91c5db3bb09cdfc74c4c71c92bf46274eb8d88c | 3,649,994 |
import re
def add_signature_source(service, **_):
"""
Add a signature source for a given service
Variables:
service => Service to which we want to add the source to
Arguments:
None
Data Block:
{
"uri": "http://somesite/file_to_get", # URI to fetch for parsing the rules
"name": "signature_file.yar", # Name of the file we will parse the rules as
"username": null, # Username used to get to the URI
"password": null, # Password used to get to the URI
"header": { # Header sent during the request to the URI
"X_TOKEN": "SOME RANDOM TOKEN" # Exemple of header
},
"private_key": null, # Private key used to get to the URI
"pattern": "^*.yar$" # Regex pattern use to get appropriate files from the URI
}
Result example:
{"success": True/False} # if the operation succeeded of not
"""
try:
data = request.json
except (ValueError, KeyError):
return make_api_response({"success": False},
err="Invalid source object data",
status_code=400)
# Ensure data source doesn't have spaces in name
data['name'] = re.sub('[^0-9a-zA-Z_]+', '', data['name'].replace(" ", "_"))
# Ensure private_key (if any) ends with a \n
if data.get('private_key', None) and not data['private_key'].endswith("\n"):
data['private_key'] += "\n"
service_data = STORAGE.get_service_with_delta(service, as_obj=False)
if not service_data.get('update_config', {}).get('generates_signatures', False):
return make_api_response({"success": False},
err="This service does not generate alerts therefor "
"you cannot add a source to get the alerts from.",
status_code=400)
current_sources = service_data.get('update_config', {}).get('sources', [])
for source in current_sources:
if source['name'] == data['name']:
return make_api_response({"success": False},
err=f"Update source name already exist: {data['name']}",
status_code=400)
current_sources.append(data)
service_delta = STORAGE.service_delta.get(service, as_obj=False)
if service_delta.get('update_config') is None:
service_delta['update_config'] = {"sources": current_sources}
else:
service_delta['update_config']['sources'] = current_sources
_reset_service_updates(service)
# Save the signature
success = STORAGE.service_delta.save(service, service_delta)
if success:
service_event_sender.send(data['name'], {
'operation': Operation.Modified,
'name': data['name']
})
return make_api_response({"success": success}) | 65526852dee90f077e0c8b52fc53e725043ffc1e | 3,649,995 |
def edit_screen_item(self, request, form):
""" Edit a screen. """
layout = ManageScreensLayout(self, request)
if form.submitted(request):
form.update_model(self)
request.message(_('Screen modified.'), 'success')
request.app.pages_cache.flush()
return redirect(layout.manage_model_link)
if not form.errors:
form.apply_model(self)
return {
'layout': layout,
'form': form,
'title': _(
"Screen ${number}",
mapping={'number': self.number}
),
'subtitle': _('Edit screen'),
'cancel': layout.manage_model_link
} | 456837172860c808c2347d556cb8aaa4fcf59fbb | 3,649,996 |
def get_xyz_t():
"""
CIELAB to XYZ の逆関数の中の値を
XYZ のぞれぞれについて求める。
"""
c, l, h = symbols('c, l, h', real=True)
xt = (l + 16) / 116 + (c * cos(h)) / 500
yt = (l + 16) / 116
zt = (l + 16) / 116 - (c * sin(h)) / 200
xyz_t = [xt, yt, zt]
return xyz_t, c, l, h | e823744ada693fb525d57f5a616c89677c8ed0a5 | 3,649,997 |
async def home():
"""
Home page, welcome
Returns:
Rendered template of homepage
"""
return await render_template('home.html') | a981c121c64a99359adac620dfa0f58d31a63956 | 3,649,998 |
import torch
def compute_inverse_interpolation_img(weights, indices, img, b, h_i, w_i):
"""
weights: [b, h*w]
indices: [b, h*w]
img: [b, h*w, a, b, c, ...]
"""
w0, w1, w2, w3 = weights
ff_idx, cf_idx, fc_idx, cc_idx = indices
k = len(img.size()) - len(w0.size())
img_0 = w0[(...,) + (None,) * k] * img
img_1 = w1[(...,) + (None,) * k] * img
img_2 = w2[(...,) + (None,) * k] * img
img_3 = w3[(...,) + (None,) * k] * img
img_out = torch.zeros(b, h_i * w_i, *img.shape[2:]).type_as(img)
ff_idx = torch.clamp(ff_idx, min=0, max=h_i * w_i - 1)
cf_idx = torch.clamp(cf_idx, min=0, max=h_i * w_i - 1)
fc_idx = torch.clamp(fc_idx, min=0, max=h_i * w_i - 1)
cc_idx = torch.clamp(cc_idx, min=0, max=h_i * w_i - 1)
img_out.scatter_add_(1, ff_idx[(...,) + (None,) * k].expand_as(img_0), img_0)
img_out.scatter_add_(1, cf_idx[(...,) + (None,) * k].expand_as(img_1), img_1)
img_out.scatter_add_(1, fc_idx[(...,) + (None,) * k].expand_as(img_2), img_2)
img_out.scatter_add_(1, cc_idx[(...,) + (None,) * k].expand_as(img_3), img_3)
return img_out | 6b69aa5ca372a9c8f976512191d4626919d71311 | 3,649,999 |
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None,
layer_collection=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(
x, norm_type, depth, epsilon, layer_collection=layer_collection)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x | f983888739fa04d0c086e276997cec3919cf3e24 | 3,650,000 |
def build_norm_layer(cfg, num_channels, postfix=''):
""" Build normalization layer
Args:
Returns:
layer (fluid.dygrah.Layer): created norm layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in norm_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
stop_gradient = cfg_.pop('stop_gradient', False)
cfg_.setdefault('epsilon', 1e-5)
layer = norm_layer(num_channels=num_channels, **cfg_)
# for param in layer.parameters():
# param.stop_gradient = stop_gradient
return name, layer | d29437854587f7aeaac3b97c2e98d70b56369402 | 3,650,001 |
import torchvision
def get_split_cifar100_tasks(num_tasks, batch_size):
"""
Returns data loaders for all tasks of split CIFAR-100
:param num_tasks:
:param batch_size:
:return:
"""
datasets = {}
# convention: tasks starts from 1 not 0 !
# task_id = 1 (i.e., first task) => start_class = 0, end_class = 4
cifar_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),])
cifar_train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=cifar_transforms)
cifar_test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=cifar_transforms)
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets | 85c06c07682c74554aa11826431e5fdbd7eb84c8 | 3,650,002 |
def set_simulation_data(
state_energies, T_array, state1_index, state2_index
):
"""
Create and set SimulationData objects for a pair of specified states
"""
# Set default UnitData object
default_UnitData = UnitData(
kb=kB.value_in_unit(unit.kilojoule_per_mole/unit.kelvin),
energy_conversion=1,
length_conversion=1,
volume_conversion=1,
temperature_conversion=1,
pressure_conversion=1,
time_conversion=1,
energy_str='KJ/mol',
length_str='nm',
volume_str='nm^3',
temperature_str='K',
pressure_str='bar',
time_str='ps'
)
# State 1
sim_data1 = SimulationData()
sim_data1.observables = ObservableData(
potential_energy=state_energies[state1_index,:],
)
sim_data1.ensemble = EnsembleData(
ensemble='NVT',
energy=state_energies[state1_index,:],
temperature=T_array[state1_index]
)
sim_data1.units = default_UnitData
# State 2
sim_data2 = SimulationData()
sim_data2.observables = ObservableData(
potential_energy=state_energies[state2_index,:],
)
sim_data2.ensemble = EnsembleData(
ensemble='NVT',
energy=state_energies[state2_index,:],
temperature=T_array[state2_index]
)
sim_data2.units = default_UnitData
return sim_data1, sim_data2 | edff2bd66a359da10f64c175aa125f8749a2064d | 3,650,006 |
def park2_euc(x):
""" Comutes the park2 function """
max_val = 5.925698
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
ret = (2.0/3.0) * np.exp(x1 + x2) - x4*np.sin(x3) + x3
return min(ret, max_val) | 96448c502867d360010238526791144fdc1e7581 | 3,650,007 |
def num_compositions_jit(m, n):
"""
Numba jit version of `num_compositions`. Return `0` if the outcome
exceeds the maximum value of `np.intp`.
"""
return comb_jit(n+m-1, m-1) | 40562a1ee1564e7b2015f5b8e5d2298a18644493 | 3,650,008 |
def fake_get_vim_object(arg):
"""Stubs out the VMwareAPISession's get_vim_object method."""
return fake_vmware_api.FakeVim() | ee7c7b0331f344b1428e48da38d185dc01bf11d9 | 3,650,009 |
import json
def get_old_ids(title):
"""
Returns all the old ids of a particular site given the title of the
Wikipedia page
"""
raw_data = json.loads( readInDataFromURL("https://en.wikipedia.org/w/api.php?action=query&prop=revisions&format=json&rvlimit=100000&titles=" + title) )
old_ids = dict() # initialize
for page_id, revisions in data['query']['pages'].items():
print(revisions)
# for revision in revisions:
# old_ids[revision.]
# try:
# for extlink in page['extlinks']:
# # print to the output file
# print('%s\t%s\t%s'%(page_id, name, extlink['*']), file=outputfile)
# except:
# if options.verbose:
# print('Page %s does not have any external links...'%name)
# print(data)
return old_ids | 9e9bc37ac51d7b3a8491fa41db5867943a170e1e | 3,650,011 |
def max_expectation_under_constraint(f: np.ndarray, q: np.ndarray, c: float, eps: float = 1e-2,
display: bool = False) -> np.ndarray:
"""
Solve the following constrained optimisation problem:
max_p E_p[f] s.t. KL(q || p) <= c
:param f: an array of values f(x), np.array of size n
:param q: a discrete distribution q(x), np.array of size n
:param c: a threshold for the KL divergence between p and q.
:param eps: desired accuracy on the constraint
:param display: plot the function
:return: the argmax p*
"""
np.seterr(all="warn")
if np.all(q == 0):
q = np.ones(q.size) / q.size
x_plus = np.where(q > 0)
x_zero = np.where(q == 0)
p_star = np.zeros(q.shape)
lambda_, z = None, 0
q_p = q[x_plus]
f_p = f[x_plus]
f_star = np.amax(f)
theta = partial(theta_func, q_p=q_p, f_p=f_p, c=c)
d_theta_dl = partial(d_theta_dl_func, q_p=q_p, f_p=f_p)
if f_star > np.amax(f_p):
theta_star = theta_func(f_star, q_p=q_p, f_p=f_p, c=c)
if theta_star < 0:
lambda_ = f_star
z = 1 - np.exp(theta_star)
p_star[x_zero] = 1.0 * (f[x_zero] == np.amax(f[x_zero]))
p_star[x_zero] *= z / p_star[x_zero].sum()
if lambda_ is None:
if np.isclose(f_p, f_p[0]).all():
return q
else:
# Binary search seems slightly (10%) faster than newton
# lambda_ = binary_search(theta, eps, a=f_star, display=display)
lambda_ = newton_iteration(theta, d_theta_dl, eps, x0=f_star + 1, a=f_star, display=display)
# numba jit binary search is twice as fast as python version
# lambda_ = binary_search_theta(q_p=q_p, f_p=f_p, c=c, eps=eps, a=f_star)
beta = (1 - z) / (q_p @ (1 / (lambda_ - f_p)))
if beta == 0:
x_uni = np.where((q > 0) & (f == f_star))
if np.size(x_uni) > 0:
p_star[x_uni] = (1 - z) / np.size(x_uni)
else:
p_star[x_plus] = beta * q_p / (lambda_ - f_p)
return p_star | 88a67ae4eece82c08bc683dc015904f2d307c54f | 3,650,012 |
def payload_to_plain(payload=None):
"""
Converts the myADS results into the plain text message payload
:param payload: list of dicts
:return: plain text formatted payload
"""
formatted = u''
for p in payload:
formatted += u"{0} ({1}) \n".format(p['name'], p['query_url'].format(p['qtype'], p['id']))
for r in p['results']:
first_author = _get_first_author_formatted(r)
if type(r.get('title', '')) == list:
title = r.get('title')[0]
else:
title = r.get('title', '')
formatted += u"\"{0},\" {1} ({2})\n".format(title, first_author, r['bibcode'])
formatted += u"\n"
return formatted | 53050791335dd8d259bf6b55bd36d3e8bc3f5fb0 | 3,650,013 |
import json
def get_credentials_from_request(cloud, request):
"""
Extracts and returns the credentials from the current request for a given
cloud. Returns an empty dict if not available.
"""
if request.META.get('HTTP_CL_CREDENTIALS_ID'):
return get_credentials_by_id(
cloud, request, request.META.get('HTTP_CL_CREDENTIALS_ID'))
# In case a base class instance is sent in, attempt to retrieve the actual
# subclass.
if type(cloud) is models.Cloud:
cloud = models.Cloud.objects.get_subclass(slug=cloud.slug)
if isinstance(cloud, models.OpenStack):
os_username = request.META.get('HTTP_CL_OS_USERNAME')
os_password = request.META.get('HTTP_CL_OS_PASSWORD')
if os_username or os_password:
os_project_name = request.META.get('HTTP_CL_OS_PROJECT_NAME')
os_project_domain_name = request.META.get(
'HTTP_CL_OS_PROJECT_DOMAIN_NAME')
os_user_domain_name = request.META.get(
'HTTP_CL_OS_USER_DOMAIN_NAME')
d = {'os_username': os_username, 'os_password': os_password}
if os_project_name:
d['os_project_name'] = os_project_name
if os_project_domain_name:
d['os_project_domain_name'] = os_project_domain_name
if os_user_domain_name:
d['os_user_domain_name'] = os_user_domain_name
return d
else:
return {}
elif isinstance(cloud, models.AWS):
aws_access_key = request.META.get('HTTP_CL_AWS_ACCESS_KEY')
aws_secret_key = request.META.get('HTTP_CL_AWS_SECRET_KEY')
if aws_access_key or aws_secret_key:
return {'aws_access_key': aws_access_key,
'aws_secret_key': aws_secret_key,
}
else:
return {}
elif isinstance(cloud, models.Azure):
azure_subscription_id = request.META.get(
'HTTP_CL_AZURE_SUBSCRIPTION_ID')
azure_client_id = request.META.get('HTTP_CL_AZURE_CLIENT_ID')
azure_secret = request.META.get('HTTP_CL_AZURE_SECRET')
azure_tenant = request.META.get('HTTP_CL_AZURE_TENANT')
azure_resource_group = request.META.get('HTTP_CL_AZURE_RESOURCE_GROUP')
azure_storage_account = request.META.get(
'HTTP_CL_AZURE_STORAGE_ACCOUNT')
azure_vm_default_username = request.META.get(
'HTTP_CL_AZURE_VM_DEFAULT_USERNAME')
if (azure_subscription_id and azure_client_id and azure_secret and
azure_tenant):
return {'azure_subscription_id': azure_subscription_id,
'azure_client_id': azure_client_id,
'azure_secret': azure_secret,
'azure_tenant': azure_tenant,
'azure_resource_group': azure_resource_group,
'azure_storage_account': azure_storage_account,
'azure_vm_default_username': azure_vm_default_username
}
else:
return {}
elif isinstance(cloud, models.GCP):
gcp_credentials_json = request.META.get('HTTP_CL_GCP_CREDENTIALS_JSON')
if gcp_credentials_json:
return json.loads(gcp_credentials_json)
else:
return {}
else:
raise Exception("Unrecognised cloud provider: %s" % cloud) | 29fa45d17a0473643b2db448dfbe2de7837c4dd7 | 3,650,014 |
from functools import reduce
def nCr(n, r):
"""n-choose-r.
Thanks for the "compact" solution go to:
http://stackoverflow.com/questions/2096573/counting-combinations-and-permutations-efficiently
"""
return reduce(
lambda x, y: x * y[0] / y[1],
izip(xrange(n - r + 1, n + 1),
xrange(1, r + 1)),
1) | 06ab7a4e12a35cf49f7ddf3e75780576d3b8972c | 3,650,015 |
from pythia.pyre.inventory import facility
from pylith.bc.DirichletTimeDependent import DirichletTimeDependent
def bcFactory(name):
"""Factory for boundary condition items.
"""
return facility(name, family="boundary_condition", factory=DirichletTimeDependent) | 65bb203b901c1648ee504bfd5bfd0956e9f849d4 | 3,650,016 |
def decode(value):
"""Decode utf-8 value to string.
Args:
value: String to decode
Returns:
result: decoded value
"""
# Initialize key variables
result = value
# Start decode
if value is not None:
if isinstance(value, bytes) is True:
result = value.decode('utf-8')
# Return
return result | 9704678f6ff96de3b711758922c28f5ecbd11bc7 | 3,650,017 |
def sequence_rec_sqrt(x_init, iter, dtype=int):
"""
Mathematical sequence: x_n = x_{n-1} * sqrt(n)
:param x_init: initial values of the sequence
:param iter: iteration until the sequence should be evaluated
:param dtype: data type to cast to (either int of float)
:return: element at the given iteration and array of the whole sequence
"""
# exponential growth
def iter_function(x_seq, i, x_init):
return x_seq[i - 1, :] * np.sqrt(i + 1) # i+1 because sqrt(1) = 1
return sequence(x_init, iter, iter_function, dtype) | a7e695ee605caad5cef7881a2eeafbee8a25bf15 | 3,650,018 |
def convert_string_to_type(string_value, schema_type):
"""
Attempts to convert a string value into a schema type.
This method may evaluate code in order to do the conversion
and is therefore not safe!
"""
# assume that the value is a string unless otherwise stated.
if schema_type == "float":
evaluated_value = float(string_value)
elif schema_type == "int":
evaluated_value = int(string_value)
elif schema_type == "bool":
if string_value == "False":
evaluated_value = False
elif string_value == "True":
evaluated_value = True
else:
raise TankError("Invalid boolean value %s! Valid values are True and False" % string_value)
elif schema_type == "list":
evaluated_value = eval(string_value)
elif schema_type == "dict":
evaluated_value = eval(string_value)
else:
# assume string-like
evaluated_value = string_value
return evaluated_value | 4d99470f7094a36567851bb23c1edd49686149cf | 3,650,019 |
def get_local_coordinate_system(time_dep_orientation: bool, time_dep_coordinates: bool):
"""
Get a local coordinate system.
Parameters
----------
time_dep_orientation :
If True, the coordinate system has a time dependent orientation.
time_dep_coordinates :
If True, the coordinate system has a time dependent coordinates.
Returns
-------
weldx.transformations.LocalCoordinateSystem:
A local coordinate system
"""
if not time_dep_coordinates:
coords = Q_(np.asarray([2.0, 5.0, 1.0]), "mm")
else:
coords = Q_(
np.asarray(
[[2.0, 5.0, 1.0], [1.0, -4.0, 1.2], [0.3, 4.4, 4.2], [1.1, 2.3, 0.2]]
),
"mm",
)
if not time_dep_orientation:
orientation = tf.rotation_matrix_z(np.pi / 3)
else:
orientation = tf.rotation_matrix_z(np.pi / 2 * np.array([1, 2, 3, 4]))
if not time_dep_orientation and not time_dep_coordinates:
return tf.LocalCoordinateSystem(orientation=orientation, coordinates=coords)
time = pd.DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03", "2000-01-04"])
return tf.LocalCoordinateSystem(
orientation=orientation, coordinates=coords, time=time
) | daa8259e92a31884d798915522d4e538f316fc91 | 3,650,021 |
def _get_tooltip(tooltip_col, gpd):
"""Show everything or columns in the list."""
if tooltip_col is not None:
tooltip = folium.GeoJsonTooltip(fields=tooltip_col)
else:
tooltip = tooltip_col
return tooltip | 8a2dc564ef65aa0eaf8a9e85457876ad0e6989ec | 3,650,022 |
def encryption(message: str, key: int) -> str:
"""Return the ciphertext by xor the message with a repeating key"""
return b"".join(
[bytes([message[i] ^ key[i % len(key)]]) for i in range(len(message))]
) | 674e4a27491a9f6c918f2129276349ba426cd676 | 3,650,023 |
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 1e-7 * np.sin(2. * np.pi * 10. * times)
data *= window
return data | edbdf5e059b8f4c3559386497961a1c65133a80b | 3,650,024 |
def var(x, axis=None, ddof=0, keepdims=False):
"""
Computes the variance along the specified axis.
The variance is the average of the squared deviations from the mean, i.e.,
:math:`var = mean(abs(x - x.mean())**2)`.
Returns the variance, which is computed for the flattened array by default,
otherwise over the specified axis.
Note:
Numpy arguments `dtype`, `out` and `where` are not supported.
Args:
x (Tensor): A Tensor to be calculated.
axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
The default is to compute the variance of the flattened array. Default: `None`.
ddof (int): Means Delta Degrees of Freedom. Default: 0.
The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast correctly against the input array.
If the default value is passed, then keepdims will not be passed through to the var method of
sub-classes of tensor, however any non-default value will be. If the sub-class’ method does not
implement keepdims any exceptions will be raised. Default: `False`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Returns:
Standard deviation tensor.
Examples:
>>> import mindspore.numpy as np
>>> input_x = np.array([1., 2., 3., 4.])
>>> output = np.var(input_x)
>>> print(output)
1.25
"""
x = _to_tensor(x)
return x.var(axis, ddof, keepdims) | b39bf29caf4f47882fb3be900c2924a90b25a880 | 3,650,025 |
def check_inputs(supplied_inputs):
"""Check that the inputs are of some correct type and returned as AttributeDict."""
inputs = None
if supplied_inputs is None:
inputs = AttributeDict()
else:
if isinstance(supplied_inputs, DataFactory('dict')):
inputs = AttributeDict(supplied_inputs.get_dict())
elif isinstance(supplied_inputs, dict):
inputs = AttributeDict(supplied_inputs)
elif isinstance(supplied_inputs, AttributeDict):
inputs = supplied_inputs
else:
raise ValueError(f'The supplied type {type(inputs)} of inputs is not supported. Supply a dict, Dict or an AttributeDict.')
return inputs | a5369767c23a96b44da2bff2c0ac49456e3452f1 | 3,650,026 |
def _parse_none(arg, fn=None):
"""Parse arguments with support for conversion to None.
Args:
arg (str): Argument to potentially convert.
fn (func): Function to apply to the argument if not converted to None.
Returns:
Any: Arguments that are "none" or "0" are converted to None;
otherwise, returns the original value.
"""
if arg.lower() in ("none", "0"):
return None
return arg if fn is None else fn(arg) | 4ebd283eb9e2218e523ba185c4500c9879d5719d | 3,650,027 |
def generate_constraint(category_id, user):
"""
generate the proper basic data structure to express a constraint
based on the category string
"""
return {'year': category_id} | f55151a5b4b17bbf6eb697e1b1489ee4897f5db0 | 3,650,028 |
def get_RIB_IN_capacity(cvg_api,
multipath,
start_value,
step_value,
route_type,
port_speed,):
"""
Args:
cvg_api (pytest fixture): snappi API
temp_tg_port (pytest fixture): Ports mapping info of T0 testbed
multipath: ecmp value for BGP config
start_value: Start value of the number of BGP routes
step_value: Step value of the number of BGP routes to be incremented
route_type: IPv4 or IPv6 routes
port_speed: speed of the port used in test
"""
def tgen_capacity(routes):
conv_config = cvg_api.convergence_config()
config = conv_config.config
for i in range(1, 3):
config.ports.port(name='Test_Port_%d' % i, location=temp_tg_port[i-1]['location'])
c_lag = config.lags.lag(name="lag%d" % i)[-1]
lp = c_lag.ports.port(port_name='Test_Port_%d' % i)[-1]
lp.ethernet.name = 'lag_eth_%d' % i
if len(str(hex(i).split('0x')[1])) == 1:
m = '0'+hex(i).split('0x')[1]
else:
m = hex(i).split('0x')[1]
lp.protocol.lacp.actor_system_id = "00:10:00:00:00:%s" % m
lp.ethernet.name = "lag_Ethernet %s" % i
lp.ethernet.mac = "00:10:01:00:00:%s" % m
config.devices.device(name='Topology %d' % i)
config.options.port_options.location_preemption = True
layer1 = config.layer1.layer1()[-1]
layer1.name = 'port settings'
layer1.port_names = [port.name for port in config.ports]
layer1.ieee_media_defaults = False
layer1.auto_negotiation.rs_fec = True
layer1.auto_negotiation.link_training = False
layer1.speed = port_speed
layer1.auto_negotiate = False
def create_v4_topo():
eth = config.devices[0].ethernets.add()
eth.port_name = config.lags[0].name
eth.name = 'Ethernet 1'
eth.mac = "00:00:00:00:00:01"
ipv4 = eth.ipv4_addresses.add()
ipv4.name = 'IPv4 1'
ipv4.address = temp_tg_port[0]['ip']
ipv4.gateway = temp_tg_port[0]['peer_ip']
ipv4.prefix = int(temp_tg_port[0]['prefix'])
rx_flow_name = []
for i in range(2, 3):
if len(str(hex(i).split('0x')[1])) == 1:
m = '0'+hex(i).split('0x')[1]
else:
m = hex(i).split('0x')[1]
ethernet_stack = config.devices[i-1].ethernets.add()
ethernet_stack.port_name = config.lags[i-1].name
ethernet_stack.name = 'Ethernet %d' % i
ethernet_stack.mac = "00:00:00:00:00:%s" % m
ipv4_stack = ethernet_stack.ipv4_addresses.add()
ipv4_stack.name = 'IPv4 %d' % i
ipv4_stack.address = temp_tg_port[i-1]['ip']
ipv4_stack.gateway = temp_tg_port[i-1]['peer_ip']
ipv4_stack.prefix = int(temp_tg_port[i-1]['prefix'])
bgpv4 = config.devices[i-1].bgp
bgpv4.router_id = temp_tg_port[i-1]['peer_ip']
bgpv4_int = bgpv4.ipv4_interfaces.add()
bgpv4_int.ipv4_name = ipv4_stack.name
bgpv4_peer = bgpv4_int.peers.add()
bgpv4_peer.name = 'BGP %d' % i
bgpv4_peer.as_type = BGP_TYPE
bgpv4_peer.peer_address = temp_tg_port[i-1]['peer_ip']
bgpv4_peer.as_number = int(TGEN_AS_NUM)
route_range = bgpv4_peer.v4_routes.add(name="Network_Group%d" % i) #snappi object named Network Group 2 not found in internal db
route_range.addresses.add(address='200.1.0.1', prefix=32, count=number_of_routes)
as_path = route_range.as_path
as_path_segment = as_path.segments.add()
as_path_segment.type = as_path_segment.AS_SEQ
as_path_segment.as_numbers = aspaths
rx_flow_name.append(route_range.name)
return rx_flow_name
def create_v6_topo():
eth = config.devices[0].ethernets.add()
eth.port_name = config.lags[0].name
eth.name = 'Ethernet 1'
eth.mac = "00:00:00:00:00:01"
ipv6 = eth.ipv6_addresses.add()
ipv6.name = 'IPv6 1'
ipv6.address = temp_tg_port[0]['ipv6']
ipv6.gateway = temp_tg_port[0]['peer_ipv6']
ipv6.prefix = int(temp_tg_port[0]['ipv6_prefix'])
rx_flow_name = []
for i in range(2, 3):
if len(str(hex(i).split('0x')[1])) == 1:
m = '0'+hex(i).split('0x')[1]
else:
m = hex(i).split('0x')[1]
ethernet_stack = config.devices[i-1].ethernets.add()
ethernet_stack.port_name = config.lags[i-1].name
ethernet_stack.name = 'Ethernet %d' % i
ethernet_stack.mac = "00:00:00:00:00:%s" % m
ipv6_stack = ethernet_stack.ipv6_addresses.add()
ipv6_stack.name = 'IPv6 %d' % i
ipv6_stack.address = temp_tg_port[i-1]['ipv6']
ipv6_stack.gateway = temp_tg_port[i-1]['peer_ipv6']
ipv6_stack.prefix = int(temp_tg_port[i-1]['ipv6_prefix'])
bgpv6 = config.devices[i-1].bgp
bgpv6.router_id = temp_tg_port[i-1]['peer_ip']
bgpv6_int = bgpv6.ipv6_interfaces.add()
bgpv6_int.ipv6_name = ipv6_stack.name
bgpv6_peer = bgpv6_int.peers.add()
bgpv6_peer.name = 'BGP+_%d' % i
bgpv6_peer.as_type = BGP_TYPE
bgpv6_peer.peer_address = temp_tg_port[i-1]['peer_ipv6']
bgpv6_peer.as_number = int(TGEN_AS_NUM)
route_range = bgpv6_peer.v6_routes.add(name="Network Group %d" % i)
route_range.addresses.add(address='3000::1', prefix=64, count=number_of_routes)
as_path = route_range.as_path
as_path_segment = as_path.segments.add()
as_path_segment.type = as_path_segment.AS_SEQ
as_path_segment.as_numbers = aspaths
rx_flow_name.append(route_range.name)
return rx_flow_name
conv_config.rx_rate_threshold = 90/(multipath)
if route_type == 'IPv4':
rx_flows = create_v4_topo()
flow = config.flows.flow(name='IPv4_Traffic_%d' % routes)[-1]
elif route_type == 'IPv6':
rx_flows = create_v6_topo()
flow = config.flows.flow(name='IPv6_Traffic_%d' % routes)[-1]
else:
raise Exception('Invalid route type given')
flow.tx_rx.device.tx_names = [config.devices[0].name]
flow.tx_rx.device.rx_names = rx_flows
flow.size.fixed = 1024
flow.rate.percentage = 100
flow.metrics.enable = True
flow.metrics.loss = True
return conv_config
def run_traffic(routes):
logger.info('|-------------------- RIB-IN Capacity test, No.of Routes : {} ----|'.format(routes))
conv_config = tgen_capacity(routes)
cvg_api.set_config(conv_config)
""" Starting Protocols """
logger.info("Starting all protocols ...")
cs = cvg_api.convergence_state()
cs.protocol.state = cs.protocol.START
cvg_api.set_state(cs)
wait(TIMEOUT, "For Protocols To start")
""" Starting Traffic """
logger.info('Starting Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.START
cvg_api.set_state(cs)
wait(TIMEOUT, "For Traffic To start")
try:
for j in range(start_value, 100000000000, step_value):
tx_frate, rx_frate = [], []
run_traffic(j)
flow_stats = get_flow_stats(cvg_api)
logger.info('Loss% : {}'.format(flow_stats[0].loss))
for flow in flow_stats:
tx_frate.append(flow.frames_tx_rate)
rx_frate.append(flow.frames_rx_rate)
logger.info("Tx Frame Rate : {}".format(tx_frate))
logger.info("Rx Frame Rate : {}".format(rx_frate))
if float(flow_stats[0].loss) > 0.001:
if j == start_value:
raise Exception('Traffic Loss Encountered in first iteration, reduce the start value and run the test')
logger.info('Loss greater than 0.001 occured')
logger.info('Reducing the routes and running test')
b = j-step_value
logger.info('Stopping Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Traffic To stop")
break
logger.info('Stopping Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Traffic To stop")
l = []
l.append(b+int(step_value/8))
l.append(b+int(step_value/4))
l.append(b+int(step_value/2))
l.append(b+step_value-int(step_value/4))
l.append(b+step_value-int(step_value/8))
for i in range(0,len(l)):
run_traffic(l[i])
flow_stats = get_flow_stats(cvg_api)
logger.info('Loss% : {}'.format(flow_stats[0].loss))
if float(flow_stats[0].loss) <= 0.001:
max_routes = start_value
pass
else:
max_routes = l[i]-int(step_value/8)
break
logger.info('Stopping Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Traffic To stop")
""" Stopping Protocols """
logger.info("Stopping all protocols ...")
cs = cvg_api.convergence_state()
cs.protocol.state = cs.protocol.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Protocols To STOP")
except Exception as e:
logger.info(e)
finally:
columns = ['Test Name', 'Maximum no. of Routes']
logger.info("\n%s" % tabulate([['RIB-IN Capacity Test',max_routes]], headers=columns, tablefmt="psql")) | e13c85d9e6ebdbfba84e20a81324da8156e7c934 | 3,650,029 |
from typing import List
from typing import Set
def ladder_length(beginWord: str, endWord: str, wordList: List[str]) -> int:
"""
双端交替迫近目标层,根据一层数量最多节点确定为目标层
:param beginWord:
:param endWord:
:param wordList:
:return:
>>> ladder_length('hit', 'cog', ["hot","dot","dog","lot","log","cog"])
5
>>> ladder_length('hit', 'cog', ["hot","dot","dog","lot","log"])
0
>>> ladder_length("hit","cog",["hot","dot","dog","lot","log"])
"""
if not beginWord or not endWord or endWord not in wordList:
return 0
all_chars: List[str] = [chr(i) for i in range(ord('a'), ord('z') + 1)]
curr_word_set: Set[str] = {beginWord} # 当前层的节点
end_word_set: Set[str] = {endWord} # 目标层的节点
word_set: Set[str] = set(wordList) # 加速单词是否在字典中的判断
level: int = 1
while curr_word_set:
# 避免同层节点临接
level += 1
for cw in curr_word_set:
# beginWord不重复出现在wordList(word_set)
if cw != beginWord:
word_set.remove(cw)
tmp_set: Set[str] = set()
for curr_word in curr_word_set:
for i, w in enumerate(curr_word):
for letter in all_chars:
if w == letter:
continue
changed: str = curr_word[:i] + letter + curr_word[i + 1:]
if changed in end_word_set:
return level
if changed in word_set:
tmp_set.add(changed)
# 让层节点最多的层作为目标层
if len(tmp_set) <= len(end_word_set):
curr_word_set = tmp_set
else:
# 逆转方向
curr_word_set = end_word_set
end_word_set = tmp_set
return 0 | 020f3ffd2e009b682a47ff9aad8d1d6025c29f37 | 3,650,031 |
def setup_option(request):
"""Создаем объект для удобство работы с переменными в тестовых методах
"""
setup_parameters = {}
if request.config.getoption('--site_url'):
setup_parameters['site_url'] = request.config.getoption('--site_url')
return setup_parameters | 49908ee8e1422cc4fd05c6d93a96c00d734cf6d1 | 3,650,032 |
import time
import torch
def train_one_epoch(img_input,model,optimizer,writer,epoch,args):
"""
Finish
1.train for one epoch
2.print process, total loss, data time in terminal
3.save loss, lr, output img in tensorboard
Note
1.you can change the save frequency
"""
loss_train = 0
model.train()
length = len(img_input)
print("iteration:",length)
train_time = time.time()
begin = time.time()
'''loss control'''
loss_for_control = torch.zeros([6,args.paf_num+args.heatmap_num])
weight_con = torch.ones([1,args.paf_num+args.heatmap_num])
weight_con = weight_con.cuda()
'''start training'''
for each_batch, (img, target_heatmap, target_paf) in enumerate(img_input):
data_time = time.time() - begin
img = img.cuda()
target_heatmap = target_heatmap.cuda()
target_paf = target_paf.cuda()
# heat_mask = heat_mask.cuda()
# paf_mask = paf_mask.cuda()
_, saved_for_loss = model(img)
#loss = CMUnet_loss.get_loss(saved_for_loss,target_heatmap,target_paf,args,weight_con)
loss = resnet_loss.get_loss(saved_for_loss,target_heatmap,target_paf,args,weight_con)
# for i in range(args.paf_stage):
# for j in range(args.paf_num):
# loss_for_control[i][j] += loss['stage_{0}_{1}'.format(i,j)]
# for i in range(len(saved_for_loss)-args.paf_stage):
# for j in range(args.heatmap_num):
# loss_for_control[i][j] += loss['stage_{0}_{1}'.format(i,j)]
optimizer.zero_grad()
loss["final"].backward()
optimizer.step()
loss_train += loss["final"]
if each_batch % args.print_fre == 0:
print_to_terminal(epoch,each_batch,length,loss,loss_train,data_time)
#print_to_terminal(epoch,each_batch,length,loss,loss_train,data_time)
#writer.add_scalar("train_loss_iterations", loss_train, each_batch + epoch * length)
begin = time.time()
'''for short test'''
# if each_batch == 5:
# break
#weight_con = Online_weight_control(loss_for_control)
loss_train /= length
train_time = time.time() - train_time
print('total training time:',train_time)
return loss_train | b26e2933dd3575e45c33ba6bf801f5a92fc72ab7 | 3,650,033 |
def get_unique_tokens(texts):
"""
Returns a set of unique tokens.
>>> get_unique_tokens(['oeffentl', 'ist', 'oeffentl'])
{'oeffentl', 'ist'}
"""
unique_tokens = set()
for text in texts:
for token in text:
unique_tokens.add(token)
return unique_tokens | f9c174b264082b65a328fd9edf9421e7ff7808a2 | 3,650,034 |
def _symmetric_difference(provided: dict, chosen: dict) -> dict:
"""
Returns the fields that are not in common between provided and chosen JSON schema.
:param provided: the JSON schema to removed the chosen schema from.
:param chosen: the JSON schema to remove from the provided schema.
:return: a JSON schema with the chosen JSON schema removed.
"""
remove_keys = []
for k, vp in provided.items():
vc = chosen.get(k)
if vc is not None:
if isinstance(vp, dict):
vc = chosen.get(k, {})
assert isinstance(vc, dict), type_not_matching_str
provided[k] = _symmetric_difference(vp, vc)
elif isinstance(vp, list):
vc = chosen.get(k, [])
assert isinstance(vc, list), type_not_matching_str
provided[k] = [i for i in vp if i not in vc] # quadratic performance, optimize
else:
remove_keys.append(k)
for k in remove_keys:
provided.pop(k)
return provided | 5900c6de35c0665ab2c0ec10c4df4dc87b75483a | 3,650,035 |
def moved_in(nn_orig, nn_proj, i, k):
"""Determine points that are neighbours in the projection space,
but were not neighbours in the original space.
nn_orig
neighbourhood matrix for original data
nn_proj
neighbourhood matrix for projection data
i
index of the point considered
k
size of the neighbourhood considered
Return a list of indices for points which are 'moved in' to point i
"""
pp = list(nn_proj[i, 1:k + 1])
oo = list(nn_orig[i, 1:k + 1])
for j in oo:
if (j in oo) and (j in pp):
pp.remove(j)
return pp | b63a9b0f53554032fc920aeaf6d3d76b93dd8ab3 | 3,650,037 |
import re
def _get_lines_changed(line_summary):
"""
Parse the line diff summary into a list of numbers representing line numbers added or changed
:param line_summary: the summary from a git diff of lines that have changed (ex: @@ -1,40 +1,23 @@)
:return: a list of integers indicating which lines changed for that summary
"""
lines = re.search(r"\@\@.*?\+(.+?) \@\@", line_summary).group(1)
if "," in lines:
start, count = [int(x) for x in lines.split(",")]
return list(range(start, start + count))
return [int(lines)] | 01d1b51ef480a0d7dcdc916fe68aac08ce81d23f | 3,650,038 |
def tj_agri_sup():
"""
Real Name: b'Tj Agri Sup'
Original Eqn: b'MIN(Tj Agri Dem *Agri Tajan Dam Coef, (Tj Outflow-Tj Dom Sup-Tj Env Sup-Tj Ind Sup))'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return np.minimum(tj_agri_dem() * agri_tajan_dam_coef(),
(tj_outflow() - tj_dom_sup() - tj_env_sup() - tj_ind_sup())) | 07c6029dc062f20756b3f72289640a29526c41bf | 3,650,039 |
def correlation_coefficient(y_true, y_pred):
"""The CC, is the Pearson’s correlation coefficient and treats the saliency
and ground truth density maps, as random variables measuring the linear
relationship between them.Values are first divided by their sum for each
image to yield a distribution that adds to 1.
Args:
y_true (tensor, float32): A 4d tensor that holds the ground truth
saliency maps with values between 0 and 255.
y_pred (tensor, float32): A 4d tensor that holds the predicted saliency
maps with values between 0 and 1.
Returns:
tensor, float32: A 0D tensor that holds the averaged error.
"""
sum_y_true = tf.reduce_sum(y_true, axis=[1, 2, 3], keep_dims=True)
sum_y_pred = tf.reduce_sum(y_pred, axis=[1, 2, 3], keep_dims=True)
y_true /= (sum_y_true + 1e-7)
y_pred /= (sum_y_pred + 1e-7)
N = shape_r_out * shape_c_out
sum_prod = tf.reduce_sum(y_true * y_pred, axis=[1, 2, 3])
sum_x = tf.reduce_sum(y_true, axis=[1, 2, 3])
sum_y = tf.reduce_sum(y_pred * y_pred, axis=[1, 2, 3])
sum_x_square = tf.reduce_sum(tf.square(y_true), axis=[1, 2, 3])
sum_y_square = tf.reduce_sum(tf.square(y_pred), axis=[1, 2, 3])
num = sum_prod - ((sum_x * sum_y) / N)
den = tf.sqrt((sum_x_square - tf.square(sum_x) / N) * (sum_y_square - tf.square(sum_y) / N))
return -tf.reduce_mean(num / den) | 9d0f7825219a5957edfbf464ca9b62182b81bb3c | 3,650,040 |
def init_args():
"""Init command line args used for configuration."""
parser = init_main_args()
return parser.parse_args() | c2939b8d6fbefa7a6b792d13c98a805a3e53785f | 3,650,041 |
import warnings
def _fit_binary(estimator, X, y, classes=None, **kwargs):
"""Fit a single binary estimator with kwargs."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." % str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y, **kwargs)
return estimator | 24e37aa50cada6cce4ab52c1be85cace3ad4c417 | 3,650,042 |
import csv
def data_index(person, dim):
"""
Output sequence of eye gaze (x, y) positions from the dataset for a person and a dimension of that person (task, session, etc)
Index starts at 0.
The vectors are [x, y, flag], flag being if it's null
"""
session = "S1" if dim % 2 == 0 else "S2"
# S1_Balura_Game S1_Fixations S1_Horizontal_Saccades S1_Random_Saccades S1_Reading S1_Video_1 S1_Video_2
for exc in exceptions:
person += (exc-1 <= person)
num = str(person+1).rjust(3, "0")
#global info, tasks, tasks_code
dir = "data/Round_1/id_1" + num + "/" + session + "/" + session + tasks[dim//2] + \
"/S_1" + num + "_" + session + "_" + tasks_code[dim//2] + \
".csv"
pos = []
mask = []
with open(dir) as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
vecs = []
pads = []
for i, row in enumerate(spamreader):
if i < 1:
continue
row = ''.join(row).split(",")
if (i-1) % config['Hz'] == 0 and (i-1) != 0:
vecs = np.stack(vecs)
pads = np.stack(pads)
pos.append(vecs)
mask.append(pads)
vecs = []
pads = []
if (i-1) % (config['Hz'] // config['second_split']) == 0:
flag = (row[1] == 'NaN' or row[2] == 'NaN')
arr = np.array([0, 0, flag]) if flag else np.array([float(row[1]), float(row[2]), flag])
vecs.append(arr)
arr2 = np.array([0]*(info.feature_size-1)+[info.feature_size]) if flag else np.ones(info.feature_size)
# the info.feature_size instead of 1 is to rescale and give it equal "weight"
pads.append(arr2)
pos=np.stack(pos)
mask=np.stack(mask)
return pos, mask, [tasks[dim//2]] | e8b37aaeb2c228f0749aece26609fb04e0d4a226 | 3,650,043 |
def getStatic():
"""
These are "static" params for a smoother application flow and fine tuning of some params
Not all functions are implemented yet
Returns the necessary Params to run this application
"""
VISU_PAR = {
# =============================================================================
# More general Params
# =============================================================================
# does not consider samples which are longer than this value in [s]
"delteSampleAbove[s]": 5,
# flag for extractring/considering long Samples
"extractLongs" : False,
# does not consider samples which are longer than this value in [s]
"toShort[s]": 0.003,
# flag for extractring/considering too short Samples
"extractShort" : False,
# this might indicate a loop !!
"bpmConfidence" : 1,
# flag for extractring/considering potential Loops
"extractLoops" : False,
#compress all features to a range from (0,..,1) ->getFeatureStack()
"compress": True,
# invert all negative feature values with a total negative correlation ->getPandasCorrelation()
"invNegative" : True,
# =============================================================================
# Application Modes
# =============================================================================
# scriptMode := ("clustering", "get_N_Closest", "analyseWithGT", "optimizer")
# "clustering" := group samples into 'n Cluster' not regarding their GT
# "get_N_Closest" := select N most similar samples to a reference sample not regarding their GT
# requires path of a JSON file which contains the features of one sample (compareFilePath)
# requires a number (N) (n_mostSimilar)
# "analyseWithGT" := analyse a set of features and evaluate with GT-Labels
# it is still possible to cluster within this option and save a landmap and restructure files
# "optimizer" := trys a new subset of features and save the new subset, Needs GTs
#
# the hiearchy of the application mode is: analyseWithGT (when true, most params below are usefull)
# clustering (There will be no option to select features compared to GT)
# get_N_Closest There will be no option to select features compared to GT)
# -> The best Features calculated and saved will be used ->(getBestFile,getBestFeatureSelektion)
"scriptMode" : "get_N_Closest",
#for get_N_Closest -> This should only contain one file and only the features for one Sample,
"compareFilePath" : "../json_data/singleFile/Dirt-SamplesSingle2020-10-06.17:26:55.json",
"n_mostSimilar": 25,
# path to json files
"dirName" : "../json_data/",
# saved Features of a sample-library
"fileName": "Dirt-Samples2020-09-14.20:53:18.json",
# =============================================================================
# Feature selection and Feature subset creation modes
# =============================================================================
# A fixed set of Features to select by (the names my vary from old JSON-Files to new ones)
"predefinedFeatures" : False,
# You can select Features by yourself if you want. It will refers to the predefined featrues
# the default set can be generated from the Dirst-samples with suboptimalSearchs default values.
"defineYoureOwnFeatureSet" : ['Har-Log_-FACM_10', 'MFCC-4', 'MFCC-7', 'Har-RecChr_-FACM_12','TriChr_Centroid', 'ZeroCrossingRate', 'MFCC-8'],
# "defineYoureOwnFeatureSet" : ["Har-TriChr_-FACM_12", "MFCC-10"],
# Select all features with correlation > suboptimalSearch.second to GT-Labels
# And discard all features with cross correlation > suboptimalSearch.third
"suboptimalSearch" : (True,0.3, 0.8),
# Only take the nBest Features from suboptimaSearch (-1 := all)
"nBest" : 7,
# Consider all Features or take an approach of above.
"calcAllFeatures": False,
#("HillClimber", "Random") optimize features with a) hillclimber b) totaly random
# maxxHill is the maximum iterationof the hillclimber/ max repeat for Random
# probHill is the probability for each individual feature to get selected
# modeHill := ("small", "big", "medium") affects HillClimber
# small -> small steps (1-2 changes at a time)
# big -> every permutation has equal probability
# bigChoice -> bigger steps than "small" but not everything possibe like "big"
"optimizer" : "HillClimber",
"maxHill" : 500,
"probHill": 0.0000001,
"modeHill" : "medium",
# amount of cluster to consider with Hierarch
"nCluster" : 40,
# (Hierarch/OPTICS/AffinityPropagation/SpectralClustering) 1st is hierarchial clustering, 2nd is Density based->getClusteringLabels()
"clusterAlgo" : "Hierarch",
# The mode for hierarchichal clustering. ward = minimum variance, average = minimum of average, complete = maximum of each cluster, single = minimum of each cluster
"hierarchMode" : "average",
# =============================================================================
# Output Params (save files to folder | draw landmap)
# =============================================================================
# save folder for copying all audio files
"saveFolder" : '../estimateSongs/',
# restructure all files within their new assigned cluster Group/
# if mode is n_mostSimilar, it is an folder which contains the n_mostSimilar samples
"copyFilesToFolder" : True,
# draw a distance landmap with graphviz.
"graphviz": False,
# graphvizMode := ("clusterBased", "oneFilePerCluster", "minimalSpan") :
# "minimalSpan" = draw one big landmap without clusters as minimal span tree (not recommended for all Files)
# "clusterBased" = draw seperate clusters in one big landmap |
# "oneFilePerCluster" = generate one landmap file per cluster)
"graphvizMode" : "minimalSpan"
}
# Same Params for Spectral Clustering. This approach be will not be taken further
SpectralClusterParam = {"assign_labels":"kmeans", #{‘kmeans’, ‘discretize’} default kmeans,
"eigen_solver": "amg",
}
VISU_PAR = {**VISU_PAR, **SpectralClusterParam}
return VISU_PAR | f82ed9c4156b8199be924fc1ed62398fcbad9e0c | 3,650,044 |
def current_device():
"""Return the index of the current active device.
Returns
-------
int
The index of device.
"""
return dragon.cuda.GetDevice() | 453b81673e198ddd3a5870843d16b9cc395802d4 | 3,650,045 |
import time
async def access_logger(app, handler):
"""Simple logging middleware to report info about each request/response.
"""
async def logging_handler(request):
start_time = time.time()
request_name = hex(int(start_time * 10000))[-6:]
client_ip, _ = request.transport.get_extra_info(
'peername', ('UNKNOWN', None))
# log request
LOGGER.info(
'Request %s: "%s %s" from %s',
request_name,
request.method,
request.rel_url,
client_ip)
def log_response(response):
# pylint: disable=protected-access
content_length = response._headers.get('Content-Length',
'UNKNOWN')
if content_length == 'UNKNOWN':
LOGGER.info(
'Response %s: %s status, %s size, in %.3fs',
request_name,
response._status,
content_length,
time.time() - start_time)
else:
LOGGER.info(
'Response %s: %s status, %sB size, in %.3fs',
request_name,
response._status,
content_length,
time.time() - start_time)
try:
response = await handler(request)
log_response(response)
return response
except web.HTTPError as e:
log_response(e)
raise e
return logging_handler | 55d4ac318a65d6f4256467f7909b5a6ee2115a6d | 3,650,046 |
from typing import Tuple
def main(source: str) -> Tuple[astroid.Module, TypeInferer]:
"""Parse a string representing source text, and perform a typecheck.
Return the astroid Module node (with the type_constraints attribute set
on all nodes in the tree) and TypeInferer object.
"""
module = astroid.parse(source)
type_inferer = TypeInferer()
type_inferer.environment_transformer().visit(module)
type_inferer.type_inference_transformer().visit(module)
return module, type_inferer | f8e9b9a0ac9ff4334cce9ca7c888d3ff11570661 | 3,650,047 |
def to_literal_scalar(a_str):
"""Helper function to enforce literal scalar block (ruamel.yaml)."""
return ruamel.yaml.scalarstring.LiteralScalarString(a_str) | 7cdb3d37bad184b7c6e68b374d1b6fd7e4c744c4 | 3,650,048 |
from typing import Optional
def get_first_free_address(subnet_id: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirstFreeAddressResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['subnetId'] = subnet_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('phpipam:index/getFirstFreeAddress:getFirstFreeAddress', __args__, opts=opts, typ=GetFirstFreeAddressResult).value
return AwaitableGetFirstFreeAddressResult(
id=__ret__.id,
ip_address=__ret__.ip_address,
subnet_id=__ret__.subnet_id) | ea4a599a7f3ac65e296cce4c8fc3a764202bba26 | 3,650,049 |
def pagenav(object_list, base_url, order_by, reverse, cur_month, is_paginated, paginator):
"""Display page navigation for given list of objects"""
return {'object_list': object_list,
'base_url': base_url,
'order_by': order_by,
'reverse': reverse,
'cur_month': cur_month,
'is_paginated': is_paginated,
'paginator': paginator} | eb61fb76dd32b8d0b3e264e77ce912766d3e38da | 3,650,050 |
def read_input(path: str):
"""
Read game board file from path.
Return list of str.
>>> read_input("skyscrapers1.txt")
['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']
"""
with open(path, 'r') as f:
game_lst = f.readlines()
for idx, line in enumerate(game_lst):
game_lst[idx] = line.strip('\n')
return game_lst | a4bf08525ca3fe4b0b1efab1901830b4d7c45f05 | 3,650,051 |
def run_tweeter():
""" Captures image and sends tweet """
capture_image_and_tweet()
return schedule.CancelJob | 2cf3895270e5f5f64ecb2e943548f0a290c35b02 | 3,650,052 |
import time
from functools import reduce
from operator import add
def get_retro_results(
outdir,
recos_basedir,
events_basedir,
recompute_estimate=False,
overwrite=False,
):
"""Extract all rectro reco results from a reco directory tree, merging with original
event information from correspoding source events directory tree. Results are
populated to a Pandas DataFrame, saved to disk, and this is returned to the user.
Parameters
----------
outdir : string
recos_basedir : string
events_basedir : string
recompute_estimate : bool, optional
overwrite : bool, optional
"""
t0 = time.time()
outdir = abspath(expand(outdir))
if not isdir(outdir):
mkdir(outdir)
outfile_path = join(outdir, 'reconstructed_events.feather')
if not overwrite and isfile(outfile_path):
raise IOError('Output file path already exists at "{}"'.format(outfile_path))
cluster = LocalCluster(threads_per_worker=1, diagnostics_port=None)
client = Client(cluster)
try:
# Walk directory hierarchy
futures = []
for reco_dirpath, _, files in walk(recos_basedir, followlinks=True):
is_leafdir = False
for f in files:
if f[-3:] == 'pkl' and f[:3] in ('slc', 'evt'):
is_leafdir = True
break
if not is_leafdir:
continue
rel_dirpath = relpath(path=reco_dirpath, start=recos_basedir)
if events_basedir is not None:
event_dirpath = join(events_basedir, rel_dirpath)
if not isdir(event_dirpath):
raise IOError('Event directory does not exist: "{}"'
.format(event_dirpath))
abs_reco_dirpath = abspath(reco_dirpath)
filenum = basename(abs_reco_dirpath)
flavdir = basename(dirname(abs_reco_dirpath))
futures.append(
client.submit(
extract_from_leaf_dir,
recodir=reco_dirpath,
eventdir=event_dirpath,
flavdir=flavdir,
filenum=filenum,
recompute_estimate=recompute_estimate,
)
)
results = [f.result() for f in as_completed(futures)]
finally:
cluster.close()
client.close()
del client
del cluster
# Convert to a single list containing all events
all_events = reduce(add, results, [])
# Convert to pandas DataFrame
all_events = pd.DataFrame(all_events)
# Save to disk
all_events.to_feather(outfile_path)
print('\nAll_events saved to "{}"\n'.format(outfile_path))
nevents = len(all_events)
dt = time.time() - t0
print('\nTook {:.3f} s to extract {} events'.format(dt, nevents))
return all_events | e3753b86ed4efa60057f1e3a0c70c34193447718 | 3,650,053 |
import copy
def split_surface_v(obj, t, **kwargs):
""" Splits the surface at the input parametric coordinate on the v-direction.
This method splits the surface into two pieces at the given parametric coordinate on the v-direction,
generates two different surface objects and returns them. It does not modify the input surface.
:param obj: surface
:type obj: BSpline.Surface or NURBS.Surface
:param t: parametric coordinate on the v-direction
:type t: float
:return: a list of surface as the split pieces of the initial surface
:rtype: Multi.MultiSurface
"""
# Validate input
if not isinstance(obj, Abstract.Surface):
raise TypeError("Input shape must be an instance of any Surface class")
if t == 0.0 or t == 1.0:
raise ValueError("Cannot split on the corner points")
utilities.check_uv(t)
# Keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear)
# Find multiplicity of the knot
ks = span_func(obj.degree_v, obj.knotvector_v, obj.ctrlpts_size_v, t) - obj.degree_v + 1
s = helpers.find_multiplicity(t, obj.knotvector_v)
r = obj.degree_v - s
# Create backups of the original surface
temp_obj = copy.deepcopy(obj)
# Split the original surface
temp_obj.insert_knot(v=t, rv=r, check_r=False)
# Knot vectors
knot_span = span_func(temp_obj.degree_v, temp_obj.knotvector_v, temp_obj.ctrlpts_size_v, t) + 1
surf1_kv = list(temp_obj.knotvector_v[0:knot_span])
surf1_kv.append(t)
surf2_kv = list(temp_obj.knotvector_v[knot_span:])
for _ in range(0, temp_obj.degree_v + 1):
surf2_kv.insert(0, t)
# Control points
surf1_ctrlpts = []
for v_row in temp_obj.ctrlpts2d:
temp = v_row[0:ks + r]
surf1_ctrlpts.append(temp)
surf2_ctrlpts = []
for v_row in temp_obj.ctrlpts2d:
temp = v_row[ks + r - 1:]
surf2_ctrlpts.append(temp)
# Create a new surface for the first half
surf1 = temp_obj.__class__()
surf1.degree_u = temp_obj.degree_u
surf1.degree_v = temp_obj.degree_v
surf1.ctrlpts2d = surf1_ctrlpts
surf1.knotvector_v = surf1_kv
surf1.knotvector_u = temp_obj.knotvector_u
# Create another surface fot the second half
surf2 = temp_obj.__class__()
surf2.degree_u = temp_obj.degree_u
surf2.degree_v = temp_obj.degree_v
surf2.ctrlpts2d = surf2_ctrlpts
surf2.knotvector_v = surf2_kv
surf2.knotvector_u = temp_obj.knotvector_u
# Create a MultiSurface
ret_val = Multi.MultiSurface()
ret_val.add(surf1)
ret_val.add(surf2)
# Return the new surfaces
return ret_val | 6603fb5e4c45fa60817168d776ac005475bd37a5 | 3,650,054 |
from typing import OrderedDict
def oidc_userprofile_test(request):
"""
OIDC-style userinfo
"""
user = request.user
profile, g_o_c = UserProfile.objects.get_or_create(user=user)
data = OrderedDict()
data['sub'] = user.username
data['name'] = "%s %s" % (user.first_name, user.last_name)
data['nickname'] = profile.nickname
data['given_name'] = user.first_name
data['family_name'] = user.last_name
data['email'] = user.email
data['email_verified'] = profile.email_verified
data['phone_number'] = profile.mobile_phone_number
data['phone_verified'] = profile.phone_verified
data['picture'] = profile.picture_url
data['gender'] = profile.gender
data['birthdate'] = str(profile.birth_date)
data['patient'] = get_fhir_id(user)
data['iat'] = user.date_joined
data['call_member'] = settings.CALL_MEMBER
data['call_member_plural'] = settings.CALL_MEMBER
data['call_organization'] = settings.CALL_ORGANIZATION
data['call_organization_plural'] = settings.CALL_ORGANIZATION_PLURAL
data['ial'] = profile.identity_assurance_level
return JsonResponse(data) | aeae1962615ac9894b1b555814851c33efa85b45 | 3,650,055 |
def split_idx( idx,a,b):
"""
Shuffle and split a list of indexes into training and test data with a fixed
random seed for reproducibility
run: index of the current split (zero based)
nruns: number of splits (> run)
idx: list of indices to split
"""
rs = np.random.RandomState()
rs.shuffle(idx)
start = int(a / 10. * len(idx))
end = int((b+a) / 10. * len(idx))
train_idx = idx[0:start]
test_idx = idx[start:end]
val_idx = idx[end:]
return train_idx, val_idx, test_idx
# return train_idx, test_idx | e5c9850a0bbcdb187d12dff4cd9df6c9faddfacc | 3,650,056 |
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
val: float or int
src: tuple
dst: tuple
example: print(scale(99, (0.0, 99.0), (-1.0, +1.0)))
"""
return (float(val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0] | 26cfaccaeea861ccecb36697838710c0ab706520 | 3,650,057 |
def add(c1, c2):
"""Add two encrypted counters"""
a1, b1 = c1
a2, b2 = c2
return (a1 + a2, b1 + b2) | d3e519524fac558622f692a46ffb8fed9899176f | 3,650,058 |
async def wait_all_tasks_blocked(cushion=0.0):
"""Block until there are no runnable tasks.
This is useful in testing code when you want to give other tasks a
chance to "settle down". The calling task is blocked, and doesn't wake
up until all other tasks are also blocked for at least ``cushion``
seconds. (Setting a non-zero ``cushion`` is intended to handle cases
like two tasks talking to each other over a local socket, where we
want to ignore the potential brief moment between a send and receive
when all tasks are blocked.)
Note that ``cushion`` is measured in *real* time, not the Trio clock
time.
If there are multiple tasks blocked in :func:`wait_all_tasks_blocked`,
then the one with the shortest ``cushion`` is the one woken (and
this task becoming unblocked resets the timers for the remaining
tasks). If there are multiple tasks that have exactly the same
``cushion``, then all are woken.
You should also consider :class:`trio.testing.Sequencer`, which
provides a more explicit way to control execution ordering within a
test, and will often produce more readable tests.
Example:
Here's an example of one way to test that Trio's locks are fair: we
take the lock in the parent, start a child, wait for the child to be
blocked waiting for the lock (!), and then check that we can't
release and immediately re-acquire the lock::
async def lock_taker(lock):
await lock.acquire()
lock.release()
async def test_lock_fairness():
lock = trio.Lock()
await lock.acquire()
async with trio.open_nursery() as nursery:
nursery.start_soon(lock_taker, lock)
# child hasn't run yet, we have the lock
assert lock.locked()
assert lock._owner is trio.lowlevel.current_task()
await trio.testing.wait_all_tasks_blocked()
# now the child has run and is blocked on lock.acquire(), we
# still have the lock
assert lock.locked()
assert lock._owner is trio.lowlevel.current_task()
lock.release()
try:
# The child has a prior claim, so we can't have it
lock.acquire_nowait()
except trio.WouldBlock:
assert lock._owner is not trio.lowlevel.current_task()
print("PASS")
else:
print("FAIL")
"""
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return await GLOBAL_RUN_CONTEXT.runner.wait_all_tasks_blocked(cushion)
except AttributeError:
raise RuntimeError("must be called from async context") | 35b144f4a214cb1f02bb1448f78a54ed93ac66aa | 3,650,059 |
def get_chisq_grid(data, type, forecast=False, errors=None):
"""
Generates 2d meshgrid for chisq values of a given type (i.e. BBN, CMB etc)
"""
masses = np.unique(data['mass'])
omegabs = np.unique(data['OmegaB'])
MASS, OMEGAB = np.meshgrid(masses, omegabs)
OMEGABDAT = data['OmegaB'].reshape(len(masses), -1).T
YP = data['Yp'].reshape(len(masses), -1).T
DH = data['D/H'].reshape(len(masses), -1).T
NEFF = data['Neff'].reshape(len(masses), -1).T
return chisq(YP, DH, OMEGABDAT, NEFF, type, forecast, errors) | 3ea6fcf16d6c506733f5164e8808c5d5dce6c969 | 3,650,060 |
import ctypes
def spectrl2(units, location, datetime, weather, orientation,
atmospheric_conditions, albedo):
"""
Calculate solar spectrum by calling functions exported by
:data:`SPECTRL2DLL`.
:param units: set ``units`` = 1 for W/m\ :sup:`2`/micron
:type units: int
:param location: latitude, longitude and UTC-timezone
:type location: float
:param datetime: year, month, day, hour, minute and second
:type datetime: int
:param weather: ambient-pressure [mB] and ambient-temperature [C]
:type weather: float
:param orientation: tilt and aspect [degrees]
:type orientation: float
:param atmospheric_conditions: alpha, assym, ozone, tau500 and watvap
:type atmospheric_conditions: float
:param albedo: 6 wavelengths and 6 reflectivities
:type albedo: float
:returns: spectral decomposition, x-coordinate
:rtype: float
:raises: :exc:`~solar_utils.exceptions.SPECTRL2_Error`,
:exc:`~solar_utils.exceptions.SOLPOS_Error`
Returns the diffuse, direct, extraterrestrial and global spectral components
on the tilted surface in as a function of x-coordinate specified by units.
===== ===============================================================
units output units
===== ===============================================================
1 irradiance (W/sq m/micron) per wavelength (microns)
2 photon flux (10.0E+16 /sq cm/s/micron) per wavelength (microns)
3 photon flux density (10.0E+16 /sq cm/s/eV) per energy (eV)
===== ===============================================================
See
`NREL SPECTRL2 Documentation <http://rredc.nrel.gov/solar/models/spectral/spectrl2/documentation.html>`_
for more detail.
.. seealso::
:func:`solposAM`
**Examples:**
>>> units = 1
>>> location = [33.65, -84.43, -5.0]
>>> datetime = [1999, 7, 22, 9, 45, 37]
>>> weather = [1006.0, 27.0]
>>> orientation = [33.65, 135.0]
>>> atmospheric_conditions = [1.14, 0.65, -1.0, 0.2, 1.36]
>>> albedo = [0.3, 0.7, 0.8, 1.3, 2.5, 4.0] + ([0.2] * 6)
>>> (specdif, specdir, specetr, specglo,
specx) = spectrl2(units, location, datetime, weather, orientation,
atmospheric_conditions, albedo)
"""
# load the DLL
ctypes.cdll.LoadLibrary(SOLPOSAMDLL) # requires 'solpos.dll'
spectrl2_dll = ctypes.cdll.LoadLibrary(SPECTRL2DLL)
_spectrl2 = spectrl2_dll.spectrl2
# cast Python types as ctypes
_location = (ctypes.c_float * 3)(*location)
_datetime = (ctypes.c_int * 6)(*datetime)
_weather = (ctypes.c_float * 2)(*weather)
_orientation = (ctypes.c_float * 2)(*orientation)
_atmospheric_conditions = (ctypes.c_float * 5)(*atmospheric_conditions)
_albedo = (ctypes.c_float * 12)(*albedo)
# allocate space for results
specdif = (ctypes.c_float * 122)()
specdir = (ctypes.c_float * 122)()
specetr = (ctypes.c_float * 122)()
specglo = (ctypes.c_float * 122)()
specx = (ctypes.c_float * 122)()
angles = (ctypes.c_float * 2)()
airmass = (ctypes.c_float * 2)()
settings = (ctypes.c_int * 2)()
shadowband = (ctypes.c_float * 3)()
# call DLL
err_code = _spectrl2(
units, _location, _datetime, _weather, _orientation,
_atmospheric_conditions, _albedo, specdif, specdir, specetr, specglo,
specx, angles, airmass, settings, shadowband
)
# return results if successful, otherwise raise exception
if err_code == 0:
return specdif, specdir, specetr, specglo, specx
elif err_code < 0:
data = {'units': units,
'tau500': atmospheric_conditions[3],
'watvap': atmospheric_conditions[4],
'assym': atmospheric_conditions[1]}
raise SPECTRL2_Error(err_code, data)
else:
# convert err_code to bits
_code = _int2bits(err_code)
data = {'location': location,
'datetime': datetime,
'weather': weather,
'angles': angles,
'airmass': airmass,
'settings': settings,
'orientation': orientation,
'shadowband': shadowband}
raise SOLPOS_Error(_code, data) | aa8bd3878bc3f230d89e1d9545621a43e2d2fa6c | 3,650,062 |
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for an unbiased random walk.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or solute molecule).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left/down motion 10.0
2 (1-0) 1 (0-1) right/up motion 10.0
3 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append two transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left/bottom cell, right/top cell, orientation)
# - Tuple representing new pair state
# (left/bottom cell, right/top cell, orientation)
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append(Transition((0, 1, 0), (1, 0, 0), 10.0, "left/down motion"))
xn_list.append(Transition((1, 0, 0), (0, 1, 0), 10.0, "right/up motion"))
return xn_list | 702c7a7083546797578e5463841c7b59548dcca2 | 3,650,063 |
def error_message(error, text):
"""
Gives default or custom text for the error.
--------------------
Inputs <datatype>:
- error <Error Object>: The error code
- text <string>: Custom error text if error has no message
Returns <datatype>:
- error description <string>: The custom error description or default
"""
try:
return error.description['message']
except TypeError:
return text | 466fec2d2abefc9f05a3f0adf569fba1c63ea4c1 | 3,650,064 |
def maskguard(maskarray, niter=1, xyonly=False, vonly=False):
"""
Pad a mask by specified number of pixels in all three dimensions.
Parameters
----------
maskarray : `~numpy.ndarray`
The 3-D mask array with 1s for valid pixels and 0s otherwise.
niter : int, optional
Number of iterations for expanding mask by binary dilation.
Default: 1
xyonly : boolean, optional
Whether to expand only in the two sky coordinates
Default: False
vonly : boolean, optional
Whether to expand only in the spectral coordinate
Default: False (ignored if xyonly==True)
Returns
-------
maskarray : `~numpy.ndarray`
A copy of the input maskarray after padding.
"""
s = ndimage.generate_binary_structure(3, 1)
if xyonly:
s[0,:] = False
s[2,:] = False
elif vonly:
s[1]=s[0]
maskarray = ndimage.binary_dilation(maskarray, structure=s, iterations=niter)
return maskarray | 098964878e313b08c73f1a3c1a66a2b7f1664090 | 3,650,065 |
def validdest(repo, old, new):
"""Is the new bookmark destination a valid update from the old one"""
repo = repo.unfiltered()
if old == new:
# Old == new -> nothing to update.
return False
elif not old:
# old is nullrev, anything is valid.
# (new != nullrev has been excluded by the previous check)
return True
elif repo.obsstore:
return new.node() in obsolete.foreground(repo, [old.node()])
else:
# still an independent clause as it is lazier (and therefore faster)
return old.descendant(new) | 8206b1ec130582864979ea9fb617c60b6175deff | 3,650,066 |
def no_rbac_suffix_in_test_filename(filename):
"""Check that RBAC filenames end with "_rbac" suffix.
P101
"""
if "patrole_tempest_plugin/tests/api" in filename:
if filename.endswith('rbac_base.py'):
return
if not filename.endswith('_rbac.py'):
return 0, "RBAC test filenames must end in _rbac suffix" | 6ebfcede8b6e30f24f5ecc1f9d3f0985bd4c44fa | 3,650,067 |
def import_results(results_file, valid_codes=None, session=None):
"""Take a iterable which yields result lines and add them to the database.
If session is None, the global db.session is used.
If valid_codes is non-None, it is a set containing the party codes which are
allowed in this database. If None, this set is queried from the database.
.. note::
This can take a relatively long time when adding several hundred
results. Should this become a bottleneck, there are some optimisation
opportunities.
"""
session = session if session is not None else db.session
valid_codes = (
valid_codes if valid_codes is not None else
_query_valid_party_codes(session)
)
diagnostics = []
# This is a relatively straightforward but sub-optimal way to implement a
# bulk insert. The main issue is that the DB is queried once per result to
# see if the constituency exists. It would be preferable to do a single
# query over all of the given constituency names to determine which ones are
# present. This would make the flow of this function less obvious. For the
# moment, leave the sub-optimal implementation but should we need to
# re-visit this function as we deal with greater numbers of results the
# strategy above should be tried.
for line_idx, line in enumerate(results_file):
try:
add_constituency_result_line(
line, valid_codes=valid_codes, session=session)
except ValueError as e:
diagnostics.append(Diagnostic(
line, e.args[0] % e.args[1:], line_idx + 1
))
# Log the fact that this import happened
log('\n'.join([
'Imported {} result line(s), {} diagnostic(s)'.format(
line_idx+1, len(diagnostics)),
] + [str(d) for d in diagnostics]))
return diagnostics | e7faea1b78418b6fdb599612fdc72fe20fe45bc6 | 3,650,068 |
def fit_lens_data_with_tracer(lens_data, tracer, padded_tracer=None):
"""Fit lens data with a model tracer, automatically determining the type of fit based on the \
properties of the galaxies in the tracer.
Parameters
-----------
lens_data : lens_data.LensData or lens_data.LensDataHyper
The lens-images that is fitted.
tracer : ray_tracing.AbstractTracerNonStack
The tracer, which describes the ray-tracing and strong lens configuration.
padded_tracer : ray_tracing.Tracer or None
A tracer with an identical strong lens configuration to the tracer above, but using the lens data's \
padded grid_stack such that unmasked model-images can be computed.
"""
if tracer.has_light_profile and not tracer.has_pixelization:
return LensProfileFit(lens_data=lens_data, tracer=tracer, padded_tracer=padded_tracer)
elif not tracer.has_light_profile and tracer.has_pixelization:
return LensInversionFit(lens_data=lens_data, tracer=tracer)
elif tracer.has_light_profile and tracer.has_pixelization:
return LensProfileInversionFit(lens_data=lens_data, tracer=tracer,
padded_tracer=padded_tracer)
else:
raise exc.FittingException('The fit routine did not call a Fit class - check the '
'properties of the tracer') | c94454462e4e9fd770eebf39a9574daa0e6a9025 | 3,650,069 |
def sround(a, *ndigits):
"""Termwise round(a) for an iterable.
An optional second argument is supported, and passed through to the
built-in ``round`` function.
As with the built-in, rounding is correct taking into account the float
representation, which is base-2.
https://docs.python.org/3/library/functions.html#round
"""
op = _make_termwise_stream_unop(round, ndigits[0]) if ndigits else _round
return op(a) | ee75d82fa3bdfb50afb279cce87d6d6ec6120adf | 3,650,070 |
def part_b(lines):
""" For each valid line consider the stack of opening characters that didn't get closed.
Compute a score for each line per the question, then return the median value of these scores.
"""
scores = []
for line in lines:
is_line_valid, stack = assess_line(line)
if is_line_valid:
scores.append(score_completion(stack))
scores.sort()
return scores[len(scores) // 2] | e745a3be40f5a83f0e8ce3de4c647bd5984e7511 | 3,650,071 |
def _get_activation(
spec):
"""Get a rematlib Layer corresponding to a given activation function."""
if spec == mobile_search_space_v3.RELU:
result = layers.ReLU()
elif spec == mobile_search_space_v3.RELU6:
result = layers.ReLU6()
elif spec == mobile_search_space_v3.SWISH6:
result = layers.Swish6()
elif spec == mobile_search_space_v3.SIGMOID:
result = layers.Sigmoid()
else:
raise ValueError('Unrecognized activation function: {}'.format(spec))
return result | d2e67564eb366128b6dfe9f0c1c919ceb0e949ac | 3,650,072 |
def RT2tq(poses, square=False):
"""
!!NOT TESETED!!
:param poses: N x 3 x 4, (R|T)
:return: (N, 7)
"""
N,_,_ = poses.shape
R = poses[:,:,:3]
T = poses[:,:,3:] # Nx3x1
q = quaternion.as_float_array(quaternion.from_rotation_matrix(R)) #Nx4
t= T.squeeze(-1)
tq = np.concatenate([t,q], axis=-1)
return tq | 5241aa7110df8074fe203b1cbe33cb7bf509c2f3 | 3,650,074 |
import json
def make_callback(subscription_path, project_id):
"""Return a callback closure"""
def callback(message):
"""Handle Pub/Sub resurrection message.
Ignore (and ACK) messages that are not well-formed.
Try handle any other message, ACKing it eventually (always).
"""
logger.info('Handling message from subscription "%s"', subscription_path)
# parse the message, ACK on failure to avoid duplicate deliveries
try:
instance_desc = json.loads(message.data)
except:
logger.exception('Failed parsing JSON message - ignoring it\n%s', message)
else:
resurrect_instance(project_id, instance_desc)
finally:
logger.info('ACKing message\n%s', message)
message.ack()
return callback | ac16d67ee9e7b89d69b79702e4121b1983df2bb8 | 3,650,075 |
def data_to_bytes(data, encoding):
"""\
Converts the provided data into bytes. If the data is already a byte
sequence, it will be left unchanged.
This function tries to use the provided `encoding` (if not ``None``)
or the default encoding (ISO/IEC 8859-1). It uses UTF-8 as fallback.
Returns the (byte) data, the data length and the encoding of the data.
:param data: The data to encode
:type data: str or bytes
:param encoding: str or ``None``
:rtype: tuple: data, data length, encoding
"""
if isinstance(data, bytes):
return data, len(data), encoding or consts.DEFAULT_BYTE_ENCODING
data = str(data)
if encoding is not None:
# Use the provided encoding; could raise an exception by intention
data = data.encode(encoding)
else:
try:
# Try to use the default byte encoding
encoding = consts.DEFAULT_BYTE_ENCODING
data = data.encode(encoding)
except UnicodeError:
try:
# Try Kanji / Shift_JIS
encoding = consts.KANJI_ENCODING
data = data.encode(encoding)
except UnicodeError:
# Use UTF-8
encoding = 'utf-8'
data = data.encode(encoding)
return data, len(data), encoding | 78d0813075c24d2a85412648fa45d720227ae853 | 3,650,077 |
def get_session_store(state: State = Depends(get_app_state)) -> SessionStore:
"""Get a singleton SessionStore to keep track of created sessions."""
session_store = getattr(state, _SESSION_STORE_KEY, None)
if session_store is None:
session_store = SessionStore()
setattr(state, _SESSION_STORE_KEY, session_store)
return session_store | 4204371079babbfdc15327bb62b3c1c306e27f39 | 3,650,078 |
def extractCurrentlyTLingBuniMi(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].startswith('[BNM]'):
return buildReleaseMessageWithType(item, 'Bu ni Mi wo Sasagete Hyaku to Yonen. Elf de Yarinaosu Musha Shugyou', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[DD]'):
return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[HCLS]'):
return buildReleaseMessageWithType(item, 'High Comprehension Low Strength', vol, chp, frag=frag, postfix=postfix)
tagmap = [
('Abyss Domination', 'Abyss Domination', 'translated'),
('Nine Yang Sword Saint', 'Nine Yang Sword Saint', 'translated'),
('Mysterious World Beast God', 'Mysterious World Beast God', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 9b91a6a2329cb4e2572f181b16ddc6b2f0fb3553 | 3,650,080 |
from dif import dif_stats
def dif_stats(filename, # [<'my/file.txt',...> => name of scored data file]
student_id = 'Student_ID', # [<'Student_ID', ...> => student id column label]
group = ['Sex', {'focal':0, 'ref':1}], # [<e.g.'Sex', {'focal':'female', 'ref':'male'}]> => column label with assignment to focal and reference]
raw_score = 'RawScore', # [<'RawScore',...> => raw score column label]
items = 'All', # [<'All', ['item1', 'item3',...]> => items for which to get stats]
stats = 'All', # [<'All', [see list in docs]> => desired statistics]
strata = ('all_scores', 4), # [(<'all_scores', int>, int) => number of raw score strata, with backup if insufficient]
getrows = None, # [<None, {'Get':_,'Labels':_,'Rows':_}> => select rows using extract() syntax]
getcols = None, # [<None, {'Get':_,'Labels':_,'Cols':_}> => select cols using extract() syntax]
delimiter = '\t', # [<',', '\t'> => column delimiter]
):
"""Calculate DIF stats for each in a range of items.
Returns
-------
dif() returns an item by statistic Damon object with
a column containing number of score categories. Display
results using:
>>> print tabulate(dif(...).whole, 'firstrow')
Comments
--------
"dif" (DIF) stands for "differential item functioning" and reflects
the degree to which items have different difficulties for two
groups of persons, a "focal" and a "reference" group, after
adjusting for the ability of each person. It is used to flag
items that "play favorites" with student groups, e.g., that are
easy for girls and hard for boys even though the two groups
otherwise have similar ability.
There are a profusion of DIF statistics, organized mainly by whether
they are intended for dichotomous or polytomous items. The Rasch
model has its own way of estimating DIF (not included in this
function) which yields similar results. dif() supports three
categories of DIF statistics plus related variances, z-scores,
chi-squares and so on. Any number of combinations of these statistics
have been proposed for flagging DIF items.
'MH' => Mantel-Haenszel, for dichotomous data
'M' => Mantel, for dichotomous and polytomous data
'SMD' => standardized mean difference, usually for polytomous
Formulas are pulled from Zwick & Thayer (1996) and Wood (2011).
A commonly used statistic is the 'Flag' statistic, which gives a code
for whether an item should be flagged. ETS's a, b, c DIF flags
are reported numerically as 0, 1, 2. See discussion below.
The dif_stats() function applies only to unidimensional data.
Multidimensional DIF can be evaluated in Damon to a limited
degree using the "stability" statistic in conjunction with
coord()'s seed parameters.
dif() requires a student-by-item data file or array with a group
membership column and a column of student raw scores. Thus, column
headers should contain a student id column, a group column, a raw score
column, and a series of item columns. Any other columns in your
dataset should be filtered out using the getcols parameter.
References
----------
Zwick, R., Thayer, D. (Autumn, 1996). "Evaluating the Magnitude of Differential
Item Functioning in Polytomous Items". Journal of Educational and
Behavioral Statistics, Vol. 21, No. 3, pp 187-201.
http://www.jstor.org/stable/1165267
Wood, S. W. (2011). "Differential item functioning procedures for polytomous
items when examinee sample sizes are small." doctoral PhD diss, University
of Iowa, 2011.
http://ir.uiowa.edu/etd/1110.
Parameters
----------
"filename" is the string name of a person x item file containing
integer scores of how each student did on each item, a column
containing test-level raw scores for each student, and a column
assigning each student to a group. All non-numerical cells are
treated as missing. All numerical scores are treated as valid.
Numerical scores must be integers whose minimum value is zero.
Data must be tabular and field-delimited.
filename = '/path/to/my_file.txt'
=> file is 'my_file.txt'
-----------
"student_id' is the header label of the column containing unique
student identifiers.
student_id = 'Student_ID' => Student identifiers are in the
column labels 'Student_ID'.
-----------
"group" contains the header label of the group column and
assigns one group to be "focal" and the other to be the "reference".
group = ['Sex', {'focal':'female', 'ref':'male'}]
=> Student gender identifiers are
in the column labeled 'Sex'.
Students labeled "female" will
be the focal group. Students
labeled "male" will be the
reference group.
Note: As is typical with DIF statistics, while there can be
more than two groups, only two are compared at a time.
-----------
"raw_score" is the header label of the raw score column.
raw_score = 'RawScore' => Test-level student raw scores
are in the column labeled
'RawScore'
-----------
"items" is the list of items for which DIF statistics should be
calculated.
items = 'All' => Calculate DIF for all items
in the dataset.
items = ['item1', 'item5'] => Calculate DIF for only items
1 and 5.
-----------
"stats" is the list of DIF stats to be calculated for each
item. If a given statistic cannot be calculated for a given
item, the cell is left blank.
stats = 'All' => Calculate all possible DIF
statistics for all items (see
list below).
stats = ['MH_d-dif', 'MH_z', 'M_z', 'SMD_z']
=> Calculate just the Mantel-Haenszel
delta-DIF (defined by ETS), the
Mantel-Haenszel z statistic (both
for dichotomous items), the Mantel
z-statistic (for dichotomous and
polytomous items), and the
standardized mean difference
z-statistic.
List of available DIF-related statistics ("MH" means Mantel-
Haenszel, "M" means Mantel, "SMD" means standardized mean difference.
Mantel-Haenszel (dichotomous data)
'MH_alpha' => odds ratio, dich, 0 -> +inf
'MH_dif' => log-odds ratio, dich, -inf -> +inf
'MH_d-dif' => delta-DIF = -2.35*log-odds, dich, -inf -> +inf,
negative implies bias toward reference group.
(d-dif > 1.5 implies DIF)
'MH_var' => variance of MH_dif (SE = sqrt(var))
'MH_d-var' => variance of MH_d-dif
'MH_z' => absolute z-statistic (dif/sqrt(var)), z > 2.0 => p < 0.05
'MH_pval' => p-value associated with z, pval < 0.05 => significance
'MH_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'MH_chisq_pval' => p-value associated with chisq, pval < 0.05 => significance
Mantel (dichotomous and polytomous data)
'M_dif' => observed - expected frequencies
'M_var' => variance of M_diff (SE = sqrt(var))
'M_z' => signed z-statistic, dif/sqrt(var), z > 2.0 => p < 0.05
'M_pval' => p-value associated with z, pval < 0.05 => significance
'M_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'M_chisq_pval' => p-value associated with chisq, pval < 0.05 => significance
Standardized mean difference (mainly for polytomous data)
'SMD_dif' => difference between reference and focal groups
'SMD_var' => variance of SMD_dif (SE = sqrt(var))
'SMD_z' => signed z-statistic, dif/sqrt(var), z > 2.0 => p < 0.05
'SMD_pval' => p-value associated with z, pval < 0.05 => significance
'SMD_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'SMD_chisq_pval'=> p-value associated with chisq, pval < 0.05 => significance
Other stats
'SD' => standard deviation of person scores for that item
'SMD/SD' => absolute SMD/SD > 0.25 implies DIF if SMD_chisq_pval < 0.05
'Flag' => flag a DIF item based on the rules described below.
'Counts' => Count valid scores for each item, overall and by group.
As mentioned, all statistics that are dependent on sample size (e.g., z,
chi-square) will show larger values as sample size increases and their
standard errors go to zero. Therefore, DIF decisions should be based
on other considerations.
One useful rule suggested by Zwick, Thayer, and Mazzeo and used by
ETS is as follows. Flag DIF:
for dichotomous items:
Flag = 2 if:
'MH_d-dif' is greater than 1.5 and significantly greater than 1.0.
Flag = 0 if:
'MH_d-dif' is less than 1.0 or the p-value is greater than 0.05.
Flag = 1, otherwise.
These correspond to ETS a, b, c DIF flags:
'a'=>0, 'b'=>1, 'c'=>2
for polytomous items:
Flag = 2 if:
'SMD/SD' is greater than 0.25 and 'M_chisq_pval' is less than 0.05.
Flag = 0, otherwise.
There is no flag = 1 here.
(Note: Zwick refers to this as a Mantel-Haenszel chi-square p-value
but the formula resembles the polytomous Mantel chi-square p-value,
which is what is used here.)
-----------
"strata" is the number of ability strata or levels into which
to divide student test raw scores for purposes of matching
students of similar abilities. If the number of strata do
not divide evenly into the number of potential raw scores,
the remainder are stuck in the lowest stratum. "strata" requires
a backup strata specification in case the primary specification
leads to a count of one or less for a given item:
strata = (primary, backup)
Examples:
strata = ('all_scores', 4) => Let each possible raw
score be its own stratum.
This is desirable so long as
the sample of persons is large
enough that all cells in
the resulting stratum x score
table have fairly large counts.
If 'all_scores' yields insufficient
data for a given item, use a
stratum of 4 for that item.
strata = (20, 10) => Divide the raw scores into
20 strata and match students
who belong to the same stratum.
If this leads to insufficient data,
use 10 for that item.
Some DIF programs allow no more than five or so stratification
levels in order to avoid insufficient counts. This degrades the
DIF statistics a little, but not generally enough to be a problem.
-----------
"getrows" controls the rows that are loaded from the datafile,
making it possible to filter out unneeded rows, e.g., to get a
student subsample. The syntax is drawn from Damon's extract()
method and can be a bit fancy. To get a full description of
what you can do with getrows, see:
>>> help(core.Damon.extract)
Simple examples:
getrows = None => Retain all rows as they are.
Non-intuitively, this really means
"get all rows".
getrows = {'Get':'AllExcept','Labels':'key','Rows':['row_x', 'row_y']}
=> Extract all rows except those
labeled 'row_x' and 'row_y'.
getrows = {'Get':'NoneExcept','Labels':'index','Rows':[range(1, 20, 2)]}
=> Extract only row 1 up to, but not
including, row 20. 2 is a step parameter, and
means get every other row within the range.
Counting starts from 0. The 'index' parameter
means 'Rows' refers to positions, not 'keys'.
-----------
"getcols" controls the columns that are loaded from the datafile,
making it possible to filter out unneeded columns, e.g., data
columns that are not items or the student raw score. The syntax
is drawn from Damon's extract() method and can be a bit fancy.
To get a full description of what you can do with getcols, see:
>>> help(core.Damon.extract)
Simple examples:
getcols = None => Retain all columns as they are.
Non-intuitively, this really means
"get all columns".
getcols = {'Get':'AllExcept','Labels':'key','Cols':['col_x', 'col_y']}
=> Extract all columns except those
labeled 'col_x' and 'col_y'.
getcols = {'Get':'NoneExcept','Labels':'index','Cols':[range(2, 41)]}
=> Extract only columns 2 up to, but not
including, 41. Counting starts from 0.
Note the 'index' parameter.
-----------
"delimiter" is the character used to delimit columns in
the dataset.
delimiter = ',' => File is comma-delimited.
delimiter = '\t' => File is tab-delimited.
Examples
--------
[under construction]
Paste Function
--------------
dif_stats(filename, # [<'my/file.txt',...> => name of scored data file]
student_id = 'Student_ID', # [<'Student_ID', ...> => student id column label]
group = ['Sex', {'focal':0, 'ref':1}], # [<e.g.'Sex', {'focal':'female', 'ref':'male'}]> => column label with assignment to focal and reference]
raw_score = 'RawScore', # [<'RawScore',...> => raw score column label]
items = 'All', # [<'All', ['item1', 'item3',...]> => items for which to get stats]
stats = 'All', # [<'All', [see list in docs]> => desired statistics]
strata = ('all_scores', 4), # [(<'all_scores', int>, int) => number of raw score strata, with backup if insufficient]
getrows = None, # [<None, {'Get':_,'Labels':_,'Rows':_}> => select rows using extract() syntax]
getcols = None, # [<None, {'Get':_,'Labels':_,'Cols':_}> => select cols using extract() syntax]
delimiter = '\t', # [<',', '\t'> => column delimiter]
)
"""
args = locals()
return dif_stats(**args) | 0ed6b94e63d5eacc40aeaf4f2181012ef8aacc22 | 3,650,081 |
def delete_all_devices_for_user():
"""
delete all active devices for the given user
"""
try:
username = get_jwt_identity()
with session_scope() as session:
user = user_service.get_user(username, session)
device_count = user.devices.count()
if device_count == 0:
resp = {
"status": "error",
"msg": "no devices found for '%s'" % username
}
return make_response(jsonify(resp), status.HTTP_404_NOT_FOUND)
LOGGER.info("Deleting all devices for '%s'" % username)
for device in user.devices:
device_service.delete_device(user.username, device.device_id, session)
LOGGER.info("Deleted " + device.device_name + ", with device id = " + device.device_id + "!")
LOGGER.info("Deleted all devices for '%s'" % username)
resp = {
"status": "success",
"msg": "deleted %d devices for '%s'" % (device_count, username)
}
return make_response(jsonify(resp), status.HTTP_200_OK)
except Exception as e:
resp = {
"status": "error",
"msg": "%s" % str(e)
}
return make_response(jsonify(resp), status.HTTP_500_INTERNAL_SERVER_ERROR) | 2ac4c0f40e72dc54ca78a109ead9d09f15481b92 | 3,650,082 |
def _GetNormalizationTuple(url):
"""Parse a URL into a components tuple.
Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Args:
url:A URL string.
Returns:
A 6-tuple: (scheme, netloc, path, params, query, fragment).
"""
url = encoding_util.EncodeToAscii(url)
up = urlparse(url, 'http')
authority = up[1]
path = up[2]
if not authority:
end_index = path.find('/')
if end_index == -1:
end_index = len(path)
authority = path[:end_index]
path = path[end_index:]
path = path.rstrip('/') # Ignore trailing slashes on the path.
return (up[0], authority, path, up[3], up[4], up[5]) | cbc8dad95202a9a17f75dac754b6ec00e3efcdfd | 3,650,083 |
def gCallback(dataset, geneid, colors):
"""Callback to set initial value of green slider from dict.
Positional arguments:
dataset -- Currently selected dataset.
geneid -- Not needed, only to register input.
colors -- Dictionary containing the color values.
"""
colorsDict = colors
try:
colorVal = colorsDict[dataset][4:-1].split(',')[1]
return int(colorVal)
except KeyError:
return 0 | 5a97fd16ea362b3b53f33f52a449c4dccc617e44 | 3,650,084 |
def intForcesMoments(sliceZnes,method, direction):
"""
Loops over the sliceZnes and performs an integration of Forces and moments
for each slice (Scalar integrals, variables are depending on the method).
Returns a ([dir, dirNormalized,fxNr,fyNr,fzNr,mxNr,myNr,mzNr]*Nslices array)
"""
#direction, norm_direction, fx,fy,fz,mx,my,mz
forcesMoments=np.zeros((8,len(sliceZnes)))
ds = sliceZnes[0].dataset
fr = ds.frame
#Retrieves Forces and Moments variables
xAxisNr=ds.variable(direction).index
if method == "Pressure":
fxNr=ds.variable('px').index+1
fyNr=ds.variable('py').index+1
fzNr=ds.variable('pz').index+1
else:
fxNr=ds.variable('taux').index+1
fyNr=ds.variable('tauy').index+1
fzNr=ds.variable('tauz').index+1
mxNr=ds.variable('mx').index+1
myNr=ds.variable('my').index+1
mzNr=ds.variable('mz').index+1
#Populates the returned array with the direction and integrated values
for i,slc in enumerate(sliceZnes):
forcesMoments[(0,i)]= slc.values(xAxisNr)[0]
for j,v in enumerate([fxNr,fyNr,fzNr,mxNr,myNr,mzNr]):
intCmde=("Integrate ["+"{}".format(slc.index + 1)+"] VariableOption='Scalar'"\
+ " XOrigin=0 YOrigin=0 ZOrigin=0"\
+" ScalarVar=" + "{}".format(v)\
+ " Absolute='F' ExcludeBlanked='F' XVariable=1 YVariable=2 ZVariable=3 "\
+ "IntegrateOver='Cells' IntegrateBy='Zones'"\
+ "IRange={MIN =1 MAX = 0 SKIP = 1}"\
+ " JRange={MIN =1 MAX = 0 SKIP = 1}"\
+ " KRange={MIN =1 MAX = 0 SKIP = 1}"\
+ " PlotResults='F' PlotAs='Result' TimeMin=0 TimeMax=0")
tp.macro.execute_extended_command(command_processor_id='CFDAnalyzer4',
command=intCmde)
forcesMoments[(j+2,i)]=fr.aux_data['CFDA.INTEGRATION_TOTAL']
#Normalized direction:
forcesMoments[1]=(forcesMoments[0]-forcesMoments[0].min())/(forcesMoments[0].max()-forcesMoments[0].min())
return (forcesMoments) | 16e0a3adc3a3b171fd02b07f241ed8623b16c7e3 | 3,650,085 |
from typing import List
def _other_members(other_members: List[parser.MemberInfo], title: str):
"""Returns "other_members" rendered to markdown.
`other_members` is used for anything that is not a class, function, module,
or method.
Args:
other_members: A list of `MemberInfo` objects.
title: Title of the table.
Returns:
A markdown string
"""
items = []
for other_member in other_members:
description = [other_member.doc.brief]
for doc_part in other_member.doc.docstring_parts:
if isinstance(doc_part, parser.TitleBlock):
# Use list_view here because description will be part of a table.
description.append(str(doc_part))
else:
description.append(doc_part)
items.append(
parser.ITEMS_TEMPLATE.format(
name=other_member.short_name,
anchor=f'<a id="{other_member.short_name}"></a>',
description='\n'.join(description),
))
return '\n' + parser.TABLE_TEMPLATE.format(
title=title, text='', items=''.join(items)) + '\n' | 77c02e8532dd01bab0b9ea0f9d14634dc3523cd2 | 3,650,086 |
def full_url(parser, token):
"""Spits out the full URL"""
url_node = url(parser, token)
f = url_node.render
url_node.render = lambda context: _get_host_from_context(context) + f(context)
return url_node | d54e9cf5acee1b6283f3166e9479e8c9e8bb5047 | 3,650,087 |
def Chi2CoupleDiffFunc(nzbins, nzcorrs, ntheta, mask,
data1, xi_obs_1, xi_theo_1,
data2, xi_obs_2, xi_theo_2,
inDir_cov12, file_name_cov12):
"""
Estimate chi^2 for difference between two data vectors
Note: this assumes two data vectors have two separated covariance matrices
the cross-correlation between two data vectors is also desired
the masks for two data vector need to be identical
"""
# load the full covariance matrix:
covmat_block_1 = io_cs.LoadCovarianceFunc(data1, nzbins, nzcorrs, xi_theo_1)
covmat_block_2 = io_cs.LoadCovarianceFunc(data2, nzbins, nzcorrs, xi_theo_2)
covmat_block_12 = io_cs.LoadCrossCovarianceFunc(inDir_cov12, file_name_cov12, ntheta, nzbins, nzcorrs, xi_theo_1, xi_theo_2)
# build a combined cov-mat
covmat = covmat_block_1 + covmat_block_2 - covmat_block_12 - covmat_block_12.transpose()
# trim covariance matrix to chosen scales:
mask_indices = np.where(mask == 1)[0]
covmat = covmat[np.ix_(mask_indices, mask_indices)]
# precompute Cholesky transform for chi^2 calculation:
# don't invert that matrix...
# use the Cholesky decomposition instead:
cholesky_transform = cholesky(covmat, lower=True)
vec = (xi_theo_1[mask_indices] - xi_obs_1[mask_indices]) - (xi_theo_2[mask_indices] - xi_obs_2[mask_indices])
yt = solve_triangular(cholesky_transform, vec, lower=True)
chi2 = yt.dot(yt)
return chi2, len(vec) | c0cd8a683447b0572a93914e633fb8f770c3a6fd | 3,650,088 |
def minimax(just_mapping, mapping):
"""
Scale the mapping to minimize the maximum error from just intonation.
"""
least_error = float("inf")
best_mapping = mapping
for i in range(len(just_mapping)):
for j in range(i+1, len(just_mapping)):
candidate = mapping / (mapping[i] + mapping[j]) * (just_mapping[i] + just_mapping[j])
error = abs(just_mapping - candidate).max()
if error < least_error:
least_error = error
best_mapping = candidate
return best_mapping | b2226de7a916e3075327cd30c64e7412e186027d | 3,650,089 |
from datetime import datetime
def app_used_today():
"""Check the session and the backend database for a record of app use from the last 24 hours."""
now = UTC.localize(datetime.datetime.utcnow())
last_app_use = get_last_app_use_date()
day_length_in_seconds = 60 * 60 * 24
if last_app_use and (last_app_use.timestamp() + day_length_in_seconds) > now.timestamp():
return True
return False | 290bb4b87e74f5134effeb37da36cedcca05c4aa | 3,650,090 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.