content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def parseParams(opt):
"""Parse a set of name=value parameters in the input value.
Return list of (name,value) pairs.
Raise ValueError if a parameter is badly formatted.
"""
params = []
for nameval in opt:
try:
name, val = nameval.split("=")
except ValueError:
raise ValueError("Bad name=value format for '%s'" % nameval)
params.append((name, val))
return params | 5,356,800 |
def test_list_short_enumeration_2_nistxml_sv_iv_list_short_enumeration_3_5(mode, save_output, output_format):
"""
Type list/short is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/short/Schema+Instance/NISTSchema-SV-IV-list-short-enumeration-3.xsd",
instance="nistData/list/short/Schema+Instance/NISTXML-SV-IV-list-short-enumeration-3-5.xml",
class_name="NistschemaSvIvListShortEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,356,801 |
def alert_remove(alert_id):
"""Remove the specified alert"""
key = get_api_key()
# Get the list
api = shodan.Shodan(key)
try:
api.delete_alert(alert_id)
except shodan.APIError as e:
raise quo.Outliers(e.value)
quo.echo("Alert deleted") | 5,356,802 |
def asnumpy(a, dtype=None, order=None):
"""Returns a dense numpy array from an arbitrary source array.
Args:
a: Arbitrary object that can be converted to :class:`numpy.ndarray`.
order ({'C', 'F', 'A'}): The desired memory layout of the output
array. When ``order`` is 'A', it uses 'F' if ``a`` is
fortran-contiguous and 'C' otherwise.
Returns:
numpy.ndarray: Converted array on the host memory.
"""
from ._sparse_array import SparseArray
if isinstance(a, SparseArray):
a = a.todense()
return np.array(a, dtype=dtype, copy=False, order=order) | 5,356,803 |
def render_array_items(
item_renderer: ITEM_RENDERER_TYPE,
summary_renderer: ITEM_RENDERER_TYPE,
index: INDEX_TYPE,
array: np.ndarray,
edge_items: int,
) -> ITEM_GENERATOR_TYPE:
"""Render array, dispatching to `render_array_summarised` if required.
:param item_renderer: item renderer
:param summary_renderer: summary item renderer
:param index: index
:param array: array to render
:param edge_items: number of edge items when summarising
:return:
"""
if edge_items and len(array) > 2 * edge_items:
yield from render_array_items_summarized(
item_renderer, summary_renderer, index, array, edge_items
)
else:
for i, item in enumerate(array):
yield from item_renderer(extend_index(index, i), item, edge_items) | 5,356,804 |
def get_next_cpi_date():
"""
Get next CPI release date
"""
df = pd.read_html(r"https://www.bls.gov/schedule/news_release/cpi.htm")[0][:-1]
df["Release Date"] = pd.to_datetime(df["Release Date"], errors='coerce')
df = df[df["Release Date"] >= current_date].iloc[0]
df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')
return df | 5,356,805 |
async def on_ready():
"""
確認機器人上線
"""
print("Bot is ready.") | 5,356,806 |
def date_arithmetic() -> Tuple[datetime, datetime, int]:
""" This function is used to calculate
what is the date after 3 days is given
and the differences between two given dates """
date1: str = "Feb 27, 2020"
date_2020: datetime = datetime.strptime(
date1, "%b %d, %Y") + timedelta(3)
date2: str = "Feb 27, 2019"
date_2019: datetime = datetime.strptime(
date2, "%b %d, %Y") + timedelta(3)
date3: str = "Feb 1, 2019"
date4: str = "Sep 30, 2019"
days_passed: int = datetime.strptime(
date3, "%b %d, %Y") - datetime.strptime(date4, "%b %d, %Y")
three_days_after_02272020: datetime = date_2020.strftime("%b %d, %Y")
three_days_after_02272019: datetime = date_2019.strftime("%b %d, %Y")
days_passed_02012019_09302019: int = abs(days_passed.days)
return three_days_after_02272020, three_days_after_02272019, days_passed_02012019_09302019 | 5,356,807 |
def update_investment_projects_for_gva_multiplier_task(gva_multiplier_id):
"""
Updates the normalised gross_value_added for all investment projects
with the associated GVA Multiplier.
"""
try:
instance = GVAMultiplier.objects.get(id=gva_multiplier_id)
except GVAMultiplier.DoesNotExist:
logger.warning(
f'Unable to find GVA Multiplier [{gva_multiplier_id}]'
' - Unable to update associated investment projects',
)
else:
_update_investment_projects_for_gva_multiplier(instance) | 5,356,808 |
def get_words_from_line_list(text):
"""
Applies Translations and returns the list of words from the text document
"""
text = text.translate(translation_table)
word_list = [x for x in text.split() if x not in set(stopwords.words('english'))]
return word_list | 5,356,809 |
def _get_name(dist):
"""Attempts to get a distribution's short name, excluding the name scope."""
return getattr(dist, 'parameters', {}).get('name', dist.name) | 5,356,810 |
def seta_count(ent):
"""Enrich the match."""
data = {'body_part': 'seta'}
location = []
for token in ent:
label = token._.cached_label
if label == 'seta':
data['seta'] = REPLACE.get(token.lower_, token.lower_)
elif label == 'number_word':
data['count'] = int(REPLACE.get(token.lower_, -1))
elif token.lower_ in MISSING:
data['count'] = 0
elif label == 'group':
data['group'] = token.lower_
elif match := IS_INT.match(token.text):
data['count'] = to_positive_int(match.group(0))
if data.get('count', data.get('low')) is None:
data['present'] = True
if location:
data['type'] = ' '.join(location)
ent._.new_label = 'seta_count'
ent._.data = data | 5,356,811 |
def compute_mean_std(all_X):
"""Return an approximate mean and std for every feature"""
concatenated = np.concatenate(all_X, axis=0).astype(np.float64)
mean = np.mean(concatenated, axis=0)
std = np.std(concatenated, axis=0)
std[std == 0] = 1
return mean, std | 5,356,812 |
def frontend_handler(socketio_request_json):
"""Handler of SocketIO request for frontend."""
SocketIOApi.execute(socketio_request_json) | 5,356,813 |
def GenerateConfig(context):
"""Generates configuration."""
key_ring = {
'name': 'keyRing',
'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings',
'properties': {
'parent': 'projects/' + context.env['project'] + '/locations/' + context.properties['region'],
'keyRingId': context.env['deployment'] + '-key-ring'
}
}
crypto_key = {
'name': 'cryptoKey',
'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings.cryptoKeys',
'properties': {
'parent': '$(ref.keyRing.name)',
'cryptoKeyId': context.env['deployment'] + '-crypto-key',
'purpose': 'ENCRYPT_DECRYPT'
}
}
resources = [key_ring, crypto_key]
outputs = [{
'name': 'primaryVersion',
'value': '$(ref.cryptoKey.primary)'
}]
return { 'resources': resources, 'outputs': outputs } | 5,356,814 |
def plot_embedding(X, y, d, title=None, imgName=None):
"""
Plot an embedding X with the class label y colored by the domain d.
:param X: embedding
:param y: label
:param d: domain
:param title: title on the figure
:param imgName: the name of saving image
:return:
"""
if params.fig_mode is None:
return
# normalization
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
# Plot colors numbers
plt.figure(figsize=(10,10))
ax = plt.subplot(111)
for i in range(X.shape[0]):
# plot colored number
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.bwr(d[i]/1.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
# If title is not given, we assign training_mode to the title.
if title is not None:
plt.title(title)
else:
plt.title(params.training_mode)
#plt.subplot(212)
#for i in range(X.shape[0]):
# plot colored number
# plt.plot(X[i, 0], X[i, 1],
# color=plt.cm.bwr(y[i])
# )
#plt.xticks([]), plt.yticks([])
if params.fig_mode == 'display':
# Directly display if no folder provided.
plt.show()
if params.fig_mode == 'save':
# Check if folder exist, otherwise need to create it.
folder = os.path.abspath(params.save_dir)
if not os.path.exists(folder):
os.makedirs(folder)
if imgName is None:
imgName = 'plot_embedding' + str(int(time.time()))
# Check extension in case.
if not (imgName.endswith('.jpg') or imgName.endswith('.png') or imgName.endswith('.jpeg')):
imgName = os.path.join(folder, imgName + '.jpg')
print('Saving ' + imgName + ' ...')
plt.savefig(imgName)
plt.close()
def save_checkpoint(checkpoint_path, model, optimizer):
state = {'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict()}
torch.save(state, checkpoint_path)
print('model saved to %s' % checkpoint_path)
def load_checkpoint(checkpoint_path, model, optimizer):
state = torch.load(checkpoint_path)
model.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
print('model loaded from %s' % checkpoint_path) | 5,356,815 |
def get_valid_collapsed_products(products, limit):
"""wraps around collapse_products and respecting a limit
to ensure that uncomplete products are not collapsed
"""
next_min_scanid = get_next_min_scanid(products, limit)
collapsed_products = []
for scanid, scan in groupby(products, itemgetter('ScanID')):
if scanid == next_min_scanid:
continue
collapsed_products.extend(collapse_products(list(scan)))
return collapsed_products, next_min_scanid | 5,356,816 |
def get_imd():
"""Fetches data about LA IMD status.
The "local concentration" measure is used -
this gives higher weight to particularly deprived areas
Source: http://www.gov.uk/government/statistics/english-indices-of-deprivation-2019
"""
imd = pd.read_csv(
PROJECT_DIR / "inputs/data/societal-wellbeing_imd2019_indicesbyla.csv",
usecols=[1, 2],
skiprows=7,
)
return imd | 5,356,817 |
def get_instance_snapshots(pageToken=None):
"""
Returns all instance snapshots for the user's account.
See also: AWS API Documentation
:example: response = client.get_instance_snapshots(
pageToken='string'
)
:type pageToken: string
:param pageToken: A token used for advancing to the next page of results from your get instance snapshots request.
:rtype: dict
:return: {
'instanceSnapshots': [
{
'name': 'string',
'arn': 'string',
'supportCode': 'string',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'state': 'pending'|'error'|'available',
'progress': 'string',
'fromInstanceName': 'string',
'fromInstanceArn': 'string',
'fromBlueprintId': 'string',
'fromBundleId': 'string',
'sizeInGb': 123
},
],
'nextPageToken': 'string'
}
"""
pass | 5,356,818 |
def read_bgr(file):
"""指定ファイルからBGRイメージとして読み込む.
# Args:
file: イメージファイル名.
# Returns:
成功したらイメージ、失敗したら None.
"""
return cv2.imread(file, cv2.IMREAD_COLOR) | 5,356,819 |
def _remove_none_from_post_data_additional_rules_list(json):
"""
removes hidden field value from json field "additional_rules" list,
which is there to ensure field exists for editing purposes
:param json: this is data that is going to be posted
"""
data = json
additional_rules = json.get("additional_rules", None)
if additional_rules and "None" in additional_rules:
new_additional_rules = []
for rule in additional_rules:
if rule != "None":
new_additional_rules.append(rule)
data["additional_rules"] = new_additional_rules
return data | 5,356,820 |
def add_email(request, pk):
"""
This Endpoint will add the email id into
the person contact details.
It expects personId in URL param.
"""
try:
request_data = request.data
email = request_data.get("email")
person = Person.objects.filter(id=pk).last()
if email:
Email.objects.create(
email=email,
person_id=person.id
)
serializer = PersonDetailSerializer(person)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
except:
print(traceback.format_exc())
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) | 5,356,821 |
def coords_extracter():
"""Exctract coords to send command to robot.
To be executed inside of xarm_hand_control module."""
SKIPPED_COMMANDS = 5
COEFF = 22
current = [0]
def coords_to_command(data: Any):
current[0] += 1
if current[0] < SKIPPED_COMMANDS:
return
current[0] = 0
if np.linalg.norm(data[0:2], 2) < 0.05:
return
x = data[0] * COEFF / 1000
z = data[1] * COEFF / 1000
# speed = np.linalg.norm(data, ord=2) * COEFF * 50
# speed = int(speed)
# # speed = np.log(speed) * COEFF
# mvacc = speed * 10
speed = 500
mvacc = speed * 10
command = Command(
x=x,
y=0.0,
z=z,
speed=speed,
acc=mvacc,
is_radian=True,
is_cartesian=True,
is_relative=True,
)
# print(command)
send_command(command)
return coords_to_command | 5,356,822 |
def NO_MERGE(writer, segments):
"""This policy does not merge any existing segments.
"""
return segments | 5,356,823 |
def detect_video( yolo, all_classes):
"""Use yolo v3 to detect video.
# Argument:
video: video file.
yolo: YOLO, yolo model.
all_classes: all classes name.
"""
# ipv4 address
ipv4_url = 'http://192.168.171.44:8080'
# read video
cam = f'{ipv4_url}/video'
camera= cv2.VideoCapture(cam)
while True:
res, frame = camera.read()
frame = cv2.resize(frame, (640, 480))
if not res:
break
image = detect_image(frame, yolo, all_classes)
cv2.imshow("Video Stream", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows() | 5,356,824 |
def save_features():
"""
Writes extracted feature vectors into a binary or text file, per args.
:return: none
"""
extractor = args.extractor
features = []
if extractor == 'multi':
features = extract_multi()
elif extractor == 'single':
features = extract_single()
# print("Output shape: ", features.shape) # comment out if you don't care to know output shape
extension = str(args.ext)
compress = args.compressed
out_path = str(args.out_path)
# TODO: get rid of boilerplate code
outfile = "" + out_path
out_full = outfile + "." + extension
if extension == "hdf5":
# (Recommended, default) save to .hdf5
f = h5py.File("" + out_path + ".hdf5", "w")
f.create_dataset(name=str(args.out_path), data=features)
if compress:
with open(out_full) as f_in:
outfile_gz = out_full + ".gz"
with gzip.open(outfile_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
elif extension == "npy": # god please don't actually do this
# Save to .npy binary (numpy) - incompressible (as of now)
np.save(file=outfile, allow_pickle=True, arr=features)
if compress:
with open(out_full) as f_in:
outfile_gz = out_full + ".gz"
with gzip.open(outfile_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
elif extension == "csv":
# Save to .csv (or, .csv.gz if args.compressed==True)
# This option is natively compressible.
if compress:
extension += ".gz"
outfile = "" + out_path + "." + extension
# TODO: This needs to return a string, no explicit save
np.savetxt(fname=outfile, X=features, fmt='%1.5f')
return features
# TODO: (distant future) npz for the optional list of concat. 1d arrays | 5,356,825 |
def build_ntwk(p, s_params):
"""
Construct a network object from the model and
simulation params.
"""
np.random.seed(s_params['RNG_SEED'])
# set membrane properties
n = p['N_PC'] + p['N_INH']
t_m = cc(
[np.repeat(p['T_M_PC'], p['N_PC']), np.repeat(p['T_M_INH'], p['N_INH'])])
e_l = cc(
[np.repeat(p['E_L_PC'], p['N_PC']), np.repeat(p['E_L_INH'], p['N_INH'])])
v_th = cc(
[np.repeat(p['V_TH_PC'], p['N_PC']), np.repeat(p['V_TH_INH'], p['N_INH'])])
v_r = cc(
[np.repeat(p['V_R_PC'], p['N_PC']), np.repeat(p['V_R_INH'], p['N_INH'])])
t_rp = cc(
[np.repeat(p['T_R_PC'], p['N_PC']), np.repeat(p['T_R_INH'], p['N_INH'])])
# set latent nrn positions
lb = [-s_params['BOX_W']/2, -s_params['BOX_H']/2]
ub = [s_params['BOX_W']/2, s_params['BOX_H']/2]
# sample evenly spaced place fields
## E cells
pfxs_e, pfys_e = cxn.apx_lattice(lb, ub, p['N_PC'], randomize=True)
## I cells
pfxs_i, pfys_i = cxn.apx_lattice(lb, ub, p['N_INH'], randomize=True)
## join E & I place fields
pfxs = cc([pfxs_e, pfxs_i])
pfys = cc([pfys_e, pfys_i])
# make upstream ws
if p['W_PC_PL'] > 0:
w_pc_pl_flat = np.random.lognormal(
*lognormal_mu_sig(p['W_PC_PL'], p['S_PC_PL']), p['N_PC'])
else:
w_pc_pl_flat = np.zeros(p['N_PC'])
if p['W_PC_G'] > 0:
w_pc_g_flat = np.random.lognormal(
*lognormal_mu_sig(p['W_PC_G'], p['S_PC_G']), p['N_PC'])
else:
w_pc_g_flat = np.zeros(p['N_PC'])
ws_up_temp = {
'E': {
('PC', 'PL'): np.diag(w_pc_pl_flat),
('PC', 'G'): np.diag(w_pc_g_flat),
},
}
targs_up = cc([np.repeat('PC', p['N_PC']), np.repeat('INH', p['N_INH'])])
srcs_up = cc([np.repeat('PL', p['N_PC']), np.repeat('G', p['N_PC'])])
ws_up = join_w(targs_up, srcs_up, ws_up_temp)
# make rcr ws
w_pc_pc = cxn.make_w_pc_pc(pfxs[:p['N_PC']], pfys[:p['N_PC']], p)
w_inh_pc = cxn.make_w_inh_pc(
pfxs_inh=pfxs[-p['N_INH']:],
pfys_inh=pfys[-p['N_INH']:],
pfxs_pc=pfxs[:p['N_PC']],
pfys_pc=pfys[:p['N_PC']],
p=p)
w_pc_inh = cxn.make_w_pc_inh(
pfxs_pc=pfxs[:p['N_PC']],
pfys_pc=pfys[:p['N_PC']],
pfxs_inh=pfxs[-p['N_INH']:],
pfys_inh=pfys[-p['N_INH']:],
p=p)
ws_rcr_temp = {
'E': {
('PC', 'PC'): w_pc_pc,
('INH', 'PC'): w_inh_pc,
},
'I': {
('PC', 'INH'): w_pc_inh,
},
}
targs_rcr = cc([np.repeat('PC', p['N_PC']), np.repeat('INH', p['N_INH'])])
ws_rcr = join_w(targs_rcr, targs_rcr, ws_rcr_temp)
# make ntwk
ntwk = LIFNtwk(
t_m=t_m,
e_l=e_l,
v_th=v_th,
v_r=v_r,
t_r=t_rp,
es_syn={'E': p['E_E'], 'I': p['E_I']},
ts_syn={'E': p['T_E'], 'I': p['T_I']},
ws_up=ws_up,
ws_rcr=ws_rcr)
ntwk.pfxs = pfxs
ntwk.pfys = pfys
ntwk.types_up = srcs_up
ntwk.types_rcr = targs_rcr
ntwk.n_pc = p['N_PC']
ntwk.n_inh = p['N_INH']
ntwk.n_g = p['N_PC']
ntwk.n_inp = p['N_PC']
ntwk.n_rcr = p['N_PC'] + p['N_INH']
ntwk.n_up = 2 * p['N_PC']
ntwk.types_up_slc = {
'PL': slice(0, p['N_PC']),
'G': slice(p['N_PC'], 2*p['N_PC'])
}
ntwk.types_rcr_slc = {
'PC': slice(0, p['N_PC']),
'INH': slice(p['N_PC'], p['N_PC'] + p['N_INH'])
}
return ntwk | 5,356,826 |
def divideData(data):
"""Given test and validation sets, divide
the data for three different sets"""
testListFile = []
fin = file('data/multi-woz/testListFile.json')
for line in fin:
testListFile.append(line[:-1])
fin.close()
valListFile = []
fin = file('data/multi-woz/valListFile.json')
for line in fin:
valListFile.append(line[:-1])
fin.close()
trainListFile = open('data/trainListFile', 'w')
test_dials = []
val_dials = []
train_dials = []
# dictionaries
word_freqs_usr = OrderedDict()
word_freqs_sys = OrderedDict()
count_train, count_val, count_test = 0, 0, 0
for dialogue_name in data:
# print dialogue_name
dial_item = data[dialogue_name]
domains = []
for dom_k, dom_v in dial_item['goal'].items():
if dom_v and dom_k not in IGNORE_KEYS_IN_GOAL: # check whether contains some goal entities
domains.append(dom_k)
dial = get_dial(data[dialogue_name])
if dial:
dialogue = {}
dialogue['dialogue_idx'] = dialogue_name
dialogue['domains'] = list(set(domains)) #list(set([d['domain'] for d in dial]))
last_bs = []
dialogue['dialogue'] = []
for turn_i, turn in enumerate(dial):
# usr, usr_o, sys, sys_o, sys_a, domain
turn_dialog = {}
turn_dialog['system_transcript'] = dial[turn_i-1]['sys'] if turn_i > 0 else ""
turn_dialog['turn_idx'] = turn_i
turn_dialog['belief_state'] = [{"slots": [s], "act": "inform"} for s in turn['bvs']]
turn_dialog['turn_label'] = [bs["slots"][0] for bs in turn_dialog['belief_state'] if bs not in last_bs]
turn_dialog['transcript'] = turn['usr']
turn_dialog['system_acts'] = dial[turn_i-1]['sys_a'] if turn_i > 0 else []
turn_dialog['domain'] = turn['domain']
last_bs = turn_dialog['belief_state']
dialogue['dialogue'].append(turn_dialog)
if dialogue_name in testListFile:
test_dials.append(dialogue)
count_test += 1
elif dialogue_name in valListFile:
val_dials.append(dialogue)
count_val += 1
else:
trainListFile.write(dialogue_name + '\n')
train_dials.append(dialogue)
count_train += 1
print("# of dialogues: Train {}, Val {}, Test {}".format(count_train, count_val, count_test))
# save all dialogues
with open('data/dev_dials.json', 'wb') as f:
json.dump(val_dials, f, indent=4)
with open('data/test_dials.json', 'wb') as f:
json.dump(test_dials, f, indent=4)
with open('data/train_dials.json', 'wb') as f:
json.dump(train_dials, f, indent=4)
# return word_freqs_usr, word_freqs_sys | 5,356,827 |
def createAaronWorld():
"""
Create an empty world as an example to build future projects from.
"""
# Set up a barebones project
project = makeBasicProject()
# Create sprite sheet for the player sprite
player_sprite_sheet = addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
# add a sprite we can use for the rocks
a_rock_sprite = addSpriteSheet(project, "rock.png", "rock", "static")
a_dog_sprite = addSpriteSheet(project, "dog.png", "dog", "static")
# Add a background image
default_bkg = makeBackground("placeholder.png", "placeholder")
project.backgrounds.append(default_bkg)
a_scene = makeScene(f"Scene", default_bkg)
project.scenes.append(a_scene)
actor = makeActor(a_rock_sprite, 9, 8)
a_scene['actors'].append(actor)
dog_actor = makeActor(a_dog_sprite, 5, 5)
dog_script = []
element = makeElement()
element["command"] = "EVENT_ACTOR_EMOTE"
element["args"] = {
"actorId": "player",
"emoteId": "1"
}
dog_script.append(element)
element = makeElement()
element["command"] = "EVENT_END"
dog_script.append(element)
dog_actor["script"] = dog_script
a_scene['actors'].append(dog_actor)
# Add some music
project.music.append(makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
return project | 5,356,828 |
def builtin_ljustify(s, w, p):
"""Left-justify a string to a given width with a given padding character."""
sv = s.convert(BStr()).value
pv = p.convert(BStr()).value
return BStr(sv.ljust(w.value, pv)) | 5,356,829 |
def package_files(directory):
"""package_files
recursive method which will lets you set the
package_data parameter in the setup call.
"""
paths = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths | 5,356,830 |
def get_effective_router(appname):
"""Returns a private copy of the effective router for the specified application"""
if not routers or appname not in routers:
return None
return Storage(routers[appname]) | 5,356,831 |
def build(ctx, cclean=False, sys='ninja'):
"""
Build C++ code and install the artifacts.
"""
if not check_option(sys, ['makefile', 'ninja']):
return
sys_build = {
'makefile' : {
'Generate' : '-G"Unix Makefiles"',
'Install' : 'make install',
},
'ninja' : {
'Generate' : '-G"Ninja" -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON',
'Install' : 'ninja install',
},
}
project_name, project_pwd = get_project_name_and_folder()
if cclean:
clean(ctx)
build_folder = project_pwd + '/build/'
build_folder_exists = os.path.isdir(build_folder) and os.path.exists(build_folder)
sys_build_folder = project_pwd + '/build/_' + sys
sys_build_folder_exists = os.path.isdir(sys_build_folder) and os.path.exists(sys_build_folder)
build_commands = [
'cd ' + project_pwd,
'mkdir build',
'cd build',
'mkdir _' + sys,
'cd _' + sys,
'cmake ' + sys_build[sys]['Generate'] + ' ../..',
'cmake --build .',
]
if build_folder_exists:
build_commands.remove('mkdir build')
if sys_build_folder_exists:
build_commands.remove('mkdir _' + sys)
print_color(colors.BLUE, ">>> Building! <<<" )
ctx.run(' && '.join(build_commands))
install_commands = [
'cd ' + project_pwd,
'cd build',
'cd _' + sys,
sys_build[sys]['Install']
]
print_color(colors.BLUE, ">>> Installing! <<<" )
ctx.run(' && '.join(install_commands))
print_color(colors.BLUE, ">>> Builded and installed. <<<") | 5,356,832 |
def is_activated(user_id):
"""Checks if a user has activated their account. Returns True or false"""
cur = getDb().cursor()
cur.execute('SELECT inactive FROM users where user_id=%s', (user_id,))
inactive = cur.fetchone()[0]
cur.close()
return False if inactive is 1 else True | 5,356,833 |
def _encode(dictionary):
"""Encodes any arbitrary dictionary into a pagination token.
Args:
dictionary: (dict) Dictionary to basee64-encode
Returns:
(string) encoded page token representing a page of items
"""
# Strip ugly base64 padding.
return base64.urlsafe_b64encode(json.dumps(dictionary)).rstrip('=') | 5,356,834 |
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup() | 5,356,835 |
def parse_checkfile(filename):
"""Load a module containing extra Check subclasses. This is a nuisance;
programmatic module loading is different in Py2 and Py3, and it's not
pleasant in either.
"""
global checkfile_counter
modname = '_cc_%d' % (checkfile_counter,)
checkfile_counter += 1
fl = open(filename)
try:
if sys.version_info.major == 2:
import imp
mod = imp.load_module(modname, fl, filename, ('.py', 'U', imp.PY_SOURCE))
# For checking the contents...
classtype = types.ClassType
else: # Python3
import importlib.util
spec = importlib.util.spec_from_file_location(modname, filename)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
# For checking the contents...
classtype = type
for key in dir(mod):
val = getattr(mod, key)
if type(val) is classtype and issubclass(val, Check):
if val is Check:
continue
if val in checkclasses:
continue
checkclasses.insert(0, val)
finally:
fl.close() | 5,356,836 |
def test_downsample_in_time_totally_flagged(hera_uvh5, future_shapes):
"""Test the downsample_in_time method with totally flagged integrations"""
uv_object = hera_uvh5
if future_shapes:
uv_object.use_future_array_shapes()
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add more flags and try again. When all the input points are flagged,
# data and nsample should have the same results as no flags but the output
# should be flagged
inds01 = uv_object.antpair2ind(0, 1)
if future_shapes:
uv_object.flag_array[inds01[:2], 0, 0] = True
else:
uv_object.flag_array[inds01[:2], 0, 0, 0] = True
uv_object2 = uv_object.copy()
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that the new sample is flagged
out_flag = uv_object.get_flags(0, 1)
assert out_flag[0, 0, 0]
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2 | 5,356,837 |
def stop_stream_encryption(StreamName=None, EncryptionType=None, KeyId=None):
"""
Disables server-side encryption for a specified stream.
Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING . After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE . Stopping encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING . Once the status of the stream is ACTIVE , records written to the stream are no longer encrypted by Kinesis Data Streams.
API Limits: You can successfully disable server-side encryption 25 times in a rolling 24-hour period.
Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are no longer subject to encryption. After you disabled encryption, you can verify that encryption is not applied by inspecting the API response from PutRecord or PutRecords .
See also: AWS API Documentation
Exceptions
:example: response = client.stop_stream_encryption(
StreamName='string',
EncryptionType='NONE'|'KMS',
KeyId='string'
)
:type StreamName: string
:param StreamName: [REQUIRED]\nThe name of the stream on which to stop encrypting records.\n
:type EncryptionType: string
:param EncryptionType: [REQUIRED]\nThe encryption type. The only valid value is KMS .\n
:type KeyId: string
:param KeyId: [REQUIRED]\nThe GUID for the customer-managed AWS KMS key to use for encryption. This value can be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either an alias or a key, or an alias name prefixed by 'alias/'.You can also use a master key owned by Kinesis Data Streams by specifying the alias aws/kinesis .\n\nKey ARN example: arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\nAlias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\nGlobally unique key ID example: 12345678-1234-1234-1234-123456789012\nAlias name example: alias/MyAliasName\nMaster key owned by Kinesis Data Streams: alias/aws/kinesis\n\n
:returns:
Kinesis.Client.exceptions.InvalidArgumentException
Kinesis.Client.exceptions.LimitExceededException
Kinesis.Client.exceptions.ResourceInUseException
Kinesis.Client.exceptions.ResourceNotFoundException
"""
pass | 5,356,838 |
def selectPlate(plates, jdRange, normalise=False, scope='all'):
"""From a list of simulated plates, returns the optimal one."""
# Gets the JD range for the following night
nextNightJDrange = _getNextNightRange(jdRange)
# First we exclude plates without new exposures
plates = [plate for plate in plates if plate._after['nNewExposures'] > 0]
# Sorts plates by inverse plate completion.
plates = sorted(plates, reverse=True, key=lambda plate: plate.getPlateCompletion()
if plate.getPlateCompletion() <= 1 else 1. / plate.getPlateCompletion())
if len(plates) == 0:
return None
# If we are scheduling only plugged plates, we rather plug a new plate
# unless we can observe a plugged plate at least for a whole set.
availableTime = (jdRange[1] - jdRange[0]) * 24.
completionIncrease = np.array(
[plate._after['completion'] - plate._before['completion'] for plate in plates])
# minSchedulingTime ensures that if the remaining time < length of a set,
# we still use the plugged plates, if any.
if scope == 'plugged':
if (availableTime > minSchedulingTime and np.all(completionIncrease == 0)):
return None
else:
# If no plate has been observed for a whole set, we try to use first
# plates that are already plugged.
if np.all(completionIncrease == 0):
pluggedPlates = [plate for plate in plates if plate.isPlugged]
if len(pluggedPlates) > 0:
plates = pluggedPlates
# If plugger, tries to select only plates at APO
if scope == 'plugged':
platesAtAPO = [plate for plate in plates if plate.getLocation() == 'APO']
if len(platesAtAPO) > 0:
plates = platesAtAPO
# Now tries to select only plates that have been marked.
markedPlates = [
plate for plate in plates if 'Accepted' in [status.label for status in plate.statuses]
]
if len(markedPlates) > 0:
plates = markedPlates
# We check if any of the plate is complete after the simulation.
# If so, we return the one with fewer new exposures.
completePlates = [plate for plate in plates
if plate._after['completion'] > plate.completion_factor]
nNewExposures = [plate._after['nNewExposures'] for plate in completePlates]
if len(completePlates) > 0:
return completePlates[np.argmin(nNewExposures)]
# We record the real completion before and after. We will normalise the
# other completions based on our scheduling logic.
for plate in plates:
plate._before['realCompletion'] = plate._before['completion']
plate._before['realCompletion+'] = plate._before['completion+']
plate._after['realCompletion'] = plate._after['completion']
plate._after['realCompletion+'] = plate._after['completion+']
# If normalise=True, we divide the several completion values by the
# length of the observing window for the plate, normalised by the length
# of the minimum plate window. The effect of this is that plates with short
# observing windows get comparatively larger completions and, thus, have
# higher chance of being selected. This is good for plugged plates, as it
# tries to schedule first plates with short windows even if other plates
# could be completed at the time.
# We also increase the completion of plates for which we have patched sets,
# while we penalise those with incomplete sets. With this logic, we hope
# that plates are observed when their incomplete sets can be patched.
if normalise:
_normaliseWindowLength(plates, jdRange, factor=1.0, apply=True)
# We also normalise using the following night, if possible.
if nextNightJDrange is not None:
_normaliseWindowLength(plates, nextNightJDrange, factor=nextNightFactor, apply=True)
# Now we normalise plate completion using a metric that gives higher
# priority to plates for which we have patched incomplete sets.
patchedSetFactor = []
for plate in plates:
nSetsFactor = 0
for ss in plate.sets:
if not ss.isMock:
nNewExps = 0
for exp in ss.totoroExposures:
if hasattr(exp, '_tmp') and exp._tmp:
nNewExps += 1
setComplete = ss.getStatus()[0] in ['Good', 'Excellent']
if setComplete and nNewExps == 0:
pass
else:
if nNewExps > 0:
nSetsFactor += 2 * nNewExps
if setComplete:
nSetsFactor *= 2
else:
nSetsFactor -= 1
patchedSetFactor.append(1. + patchSetFactor * nSetsFactor)
_completionFactor(plates, patchedSetFactor)
# We add the priority into the mix
platePriorities = np.array([plate.priority for plate in plates]) - 5.
_completionFactor(plates, 1 + platePriorityFactor * platePriorities)
ancillaryPriorities = []
for plate in plates:
if hasattr(plate, 'ancillary_weight'):
ancillaryPriorities.append(plate.ancillary_weight)
else:
ancillaryPriorities.append(1)
_completionFactor(plates, np.array(ancillaryPriorities))
# Selects the plates that have the largest increase in completion
completionIncrease = [plate._after['completion'] - plate._before['completion']
for plate in plates if plate.completion_factor <= 1.]
if len(completionIncrease) == 0:
for plate in plates:
if plate.completion_factor > 1:
completionIncrease.append(
plate._after['completion'] - plate._before['completion'])
completionIncrease = np.array(completionIncrease)
plates = np.array(plates)
maxCompletionIncrease = np.max(completionIncrease)
plates = plates[np.where(completionIncrease == maxCompletionIncrease)]
if len(plates) == 1:
return plates[0]
# If maxCompletionIncrease is 0, it means that no plate has been
# observed for at least a set. In this case, if possible, we want to use
# a plate that already has signal.
if maxCompletionIncrease == 0:
platesWithSignal = [plate for plate in plates if plate._before['completion+'] > 0]
if len(platesWithSignal) > 0:
plates = platesWithSignal
# If several plates have maximum completion increase, use the incomplete
# sets to break the tie.
completionIncreasePlus = np.array(
[plate._after['completion+'] - plate._before['completion+'] for plate in plates])
return plates[np.argmax(completionIncreasePlus)] | 5,356,839 |
def scenario_3_2():
"""
Same day, same recipient list, different shift start (for example 6am and 6pm on same day)
Should send email in all cases
"""
shift_date_morning_str = datetime.datetime.strftime(date_utils.key_to_date('2022010200') , date_utils.HOUR_KEY_FMT)
# recepients same as third record (not to send)
assert correspondence_manager.was_notification_sent(agency, category_shift, report_type, recip3_summary, shift_date_1_str, recepients3) == True
# same recipients, same date, but different shift start (ok to send)
assert correspondence_manager.was_notification_sent(agency, category_shift, report_type, recip2_summary, shift_date_morning_str, recepients2) == False
print('Scenario 3_2 Pass') | 5,356,840 |
def test_link_image_layers_all_attributes(key, value):
"""Test linking common attributes across layers of similar types."""
l1 = layers.Image(np.random.rand(10, 10), contrast_limits=(0, 0.8))
l2 = layers.Image(np.random.rand(10, 10), contrast_limits=(0.1, 0.9))
link_layers([l1, l2])
# linking does (currently) apply to things that were unequal before linking
assert l1.contrast_limits != l2.contrast_limits
# once we set either... they will both be changed
assert getattr(l1, key) != value
setattr(l2, key, value)
assert getattr(l1, key) == getattr(l2, key) == value | 5,356,841 |
def get_events(
raw: mne.io.BaseRaw,
event_picks: Union[str, list[str], list[tuple[str, str]]],
) -> tuple[np.ndarray, dict]:
"""Get events from given Raw instance and event id."""
if isinstance(event_picks, str):
event_picks = [event_picks]
events = None
for event_pick in event_picks:
if isinstance(event_pick, str):
event_id = {event_pick: 1}
else:
event_id = {event_pick[0]: 1, event_pick[1]: -1}
try:
events, _ = mne.events_from_annotations(
raw=raw,
event_id=event_id,
verbose=True,
)
return events, event_id
except ValueError as error:
print(error)
_, event_id_found = mne.events_from_annotations(
raw=raw,
verbose=False,
)
raise ValueError(
f"None of the given `event_picks´ found: {event_picks}."
f"Possible events: {*event_id_found.keys(),}"
) | 5,356,842 |
def transplant(root, u, v):
"""
注意, 这里要返回root, 不然修改不了
"""
if u.parent == None:
root = v
elif u.parent.left == u:
u.parent.left = v
else:
u.parent.right = v
if v:
v.parent = u.parent
return root | 5,356,843 |
def is_head_moderator():
"""
Returns true if invoking author is a Head Moderator (role).
"""
async def predicate(ctx: Context):
if not any(config.HEAD_MOD_ROLE in role.id for role in ctx.author.roles):
raise NotStaff("The command `{}` can only be used by a Head Moderator.".format(ctx.invoked_with))
return True
return commands.check(predicate) | 5,356,844 |
def compute_adj_matrices(type, normalize=True):
"""
Computes adjacency matrices 'n', 'd' or 's' used in GCRAM.
"""
# Get channel names
raw = mne.io.read_raw_edf('dataset/physionet.org/files/eegmmidb/1.0.0/S001/S001R01.edf', preload=True, verbose=False).to_data_frame()
ch_names = raw.columns[2:]
n_channels = 64
# Compute channel position distances using electrode positions. Required for computing 'd' and 's' adjacency matrices
ch_pos_1010 = get_sensor_pos(ch_names)
ch_pos_1010_names = []
ch_pos_1010_dist = []
for name, value in ch_pos_1010.items():
ch_pos_1010_names.append(name)
ch_pos_1010_dist.append(value)
ch_pos_1010_dist = np.array(ch_pos_1010_dist)
# Compute adjacency matrices
if type=='n':
A = n_graph()
elif type=='d':
A = d_graph(n_channels, ch_pos_1010_dist)
elif type=='s':
A = s_graph(n_channels, ch_pos_1010_dist)
# Normalize adjacency matrices
if normalize:
A = normalize_adj(A)
A = np.array(A, dtype=np.float32)
return A | 5,356,845 |
def build_census_chart(
*, alt, census_floor_df: pd.DataFrame, max_y_axis: Optional[int] = None, use_log_scale: bool = False
) -> Chart:
"""
This builds the "Admitted Patients" census chart, projecting total number of patients in the hospital over time.
Args:
alt: Reference to Altair package.
census_floor_df: Pandas data frame containing three columns: "census_hospitalized", "census_icu", and
"census_ventilated".
max_y_axis: Optional maximum value for the Y axis of the chart.
use_log_scale: Set to true to use a logarithmic scale on the Y axis. Default is linear scale.
Returns: The newly created chart.
"""
adjusted_census_floor_df = __adjust_data_for_log_scale(census_floor_df) if use_log_scale else census_floor_df
y_scale = __build_y_scale(alt, max_y_axis, use_log_scale)
x = dict(shorthand="date:T", title=i18n.t("charts-date"), axis=alt.Axis(format=(DATE_FORMAT)))
y = dict(shorthand="value:Q", title=i18n.t("charts-census"), scale=y_scale)
color = "key:N"
tooltip = ["date:T", alt.Tooltip("value:Q", format=".0f", title="Census"), "key:N"]
# TODO fix the fold to allow any number of dispositions
points = (
alt.Chart()
.transform_fold(fold=[i18n.t("census_hospitalized"), i18n.t("census_icu"), i18n.t("census_ventilated")])
.encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)
.mark_line(point=True)
.encode(
x=alt.X(**x),
y=alt.Y(**y),
color=color,
tooltip=tooltip,
)
)
bar = (
alt.Chart()
.encode(x=alt.X(**x))
.transform_filter(alt.datum.day == 0)
.mark_rule(color="black", opacity=0.35, size=2)
)
census_floor_df_renamed = adjusted_census_floor_df.rename({
"census_hospitalized": i18n.t("census_hospitalized"),
"census_icu": i18n.t("census_icu"),
"census_ventilated": i18n.t("census_ventilated")
}, axis=1)
return (
alt.layer(points, bar, data=census_floor_df_renamed)
.configure_legend(orient="bottom")
.interactive()
) | 5,356,846 |
def hasNonAsciiCharacters(sText):
"""
Returns True is specified string has non-ASCII characters, False if ASCII only.
"""
sTmp = unicode(sText, errors='ignore') if isinstance(sText, str) else sText;
return not all(ord(ch) < 128 for ch in sTmp); | 5,356,847 |
def draw_matches(image_1, image_1_keypoints, image_2, image_2_keypoints, matches):
""" Draws the matches between the image_1 and image_2.
(Credit: GT CP2017 course provided source)
Params:
image_1: The first image (can be color or grayscale).
image_1_keypoints: The image_1 keypoints.
image_2: The image to search in (can be color or grayscale)
image_2_keypoints: The image_2 keypoints.
Returns:
output: Image with a line drawn between matched keypoints.
"""
# Compute number of channels.
num_channels = 1
if len(image_1.shape) == 3:
num_channels = image_1.shape[2]
# Separation between images.
margin = 10
# Create an array that will fit both images (with a margin of 10 to
# separate the two images)
joined_image = np.zeros((max(image_1.shape[0], image_2.shape[0]),
image_1.shape[1] + image_2.shape[1] + margin,
3))
if num_channels == 1:
for channel_idx in range(3):
joined_image[:image_1.shape[0],
:image_1.shape[1],
channel_idx] = image_1
joined_image[:image_2.shape[0],
image_1.shape[1] + margin:,
channel_idx] = image_2
else:
joined_image[:image_1.shape[0], :image_1.shape[1]] = image_1
joined_image[:image_2.shape[0], image_1.shape[1] + margin:] = image_2
for match in matches:
image_1_point = (int(image_1_keypoints[match.queryIdx].pt[0]),
int(image_1_keypoints[match.queryIdx].pt[1]))
image_2_point = (int(image_2_keypoints[match.trainIdx].pt[0] +
image_1.shape[1] + margin),
int(image_2_keypoints[match.trainIdx].pt[1]))
rgb = (np.random.rand(3) * 255).astype(np.int)
cv2.circle(joined_image, image_1_point, 5, rgb, thickness=-1)
cv2.circle(joined_image, image_2_point, 5, rgb, thickness=-1)
cv2.line(joined_image, image_1_point, image_2_point, rgb, thickness=3)
return joined_image | 5,356,848 |
def copy_ttl_in():
"""
COPY_TTL_IN Action
"""
return _action("COPY_TTL_IN") | 5,356,849 |
def neuron_weight(
layer: str,
weight: torch.Tensor,
x: Optional[int] = None,
y: Optional[int] = None,
batch: Optional[int] = None,
) -> Objective:
"""Linearly weighted channel activation at one location as objective
:param layer: Name of the layer
:type layer: str
:param weight: A torch.Tensor of same length as the number of channels
:type weight: torch.Tensor
:param x: x-position, defaults to None
:type x: Optional[int], optional
:param y: y-position, defaults to None
:type y: Optional[int], optional
:param batch: which position at the batch dimension of the image tensor this objective is applied to, defaults to None
:type batch: Optional[int], optional
:return: Objective to optimize input for a linearly weighted channel activation at one location
:rtype: Objective
"""
@handle_batch(batch)
def inner(model):
layer_t = model(layer)
layer_t = _extract_act_pos(layer_t, x, y)
if weight is None:
return -layer_t.mean()
else:
return -(layer_t.squeeze() * weight).mean()
return inner | 5,356,850 |
def off():
"""Set all LEDs to 0/off"""
all(0)
show() | 5,356,851 |
def genargs() -> ArgumentParser:
"""
Generate an input string parser
:return: parser
"""
parser = ArgumentParser()
parser.add_argument("indir", help="Location of input shexj files")
parser.add_argument("outdir", help="Location of output shexc files")
parser.add_argument("-s", "--save", help="Save edited shexj image before conversion", action="store_true")
return parser | 5,356,852 |
def backup_plugin():
"""
Backup scraping-prod mongodb
"""
backup('plugin') | 5,356,853 |
def _find_and_remove_value_info_by_name(graph, name):
"""Find and remove value info in a graph by name.
"""
for value_info in graph.value_info:
if value_info.name == name:
graph.value_info.remove(value_info)
break | 5,356,854 |
def get_model_name(factory_class):
"""Get model fixture name by factory."""
return (
inflection.underscore(factory_class._meta.model.__name__)
if not isinstance(factory_class._meta.model, str) else factory_class._meta.model) | 5,356,855 |
def validate_and_filter_args(cls_list, ns):
"""
For each CONDITIONAL item found,
if conditional holds true, then argument must be set
else, then argument shouldn't be changed (value should be None or default)
"""
d = ns.__dict__.copy()
n_errs = [0]
def set_error(s):
print colorize(s,'red')
n_errs[0] += 1
for cls in cls_list:
group_cond_holds = eval(cls._group_condition, d) #pylint: disable=W0212
for (attr_name,conditional) in cls.items():
val = conditional.val
cond_holds = eval(conditional.cond, d) and group_cond_holds
if cond_holds:
if d[attr_name] is None: # required param with no default value was not specified
set_error("missing conditionally required argument %s. (condition: \"%s\", group condition: \"%s\".)"%(attr_name, conditional.cond, cls._group_condition)) #pylint: disable=W0212
else:
# Specified value for param with no default value, or optional param with a default value
if isinstance(val,type) and d[attr_name] is not None or (not isinstance(val,type)) and d[attr_name] != val:
set_error("Specified value for conditional argument %s, but conditional doesn't hold (condition: \"%s\", group condition: \"%s\")"%(attr_name, conditional.cond, cls._group_condition)) #pylint: disable=W0212
del ns.__dict__[attr_name]
# print "removed",attr_name
if n_errs[0] > 0:
raise RuntimeError("%i argument errors"%n_errs[0]) | 5,356,856 |
def keywords(kwarg1=None, kwarg2=None):
""" Test function for live kwargs | str --> None
Copy paste following to test:
arg1 = foo, arg2 = bar
"""
print('Keywords: ' + kwarg2 + ' ' + kwarg1) | 5,356,857 |
def GetSpatialFeatures(img, size=(32, 32), isFeatureVector=True):
""" Extracts spatial features of the image.
param: img: Source image
param: size: Target image size
param: isFeatureVector: Indication if the result needs to be unrolled into a feature vector
returns: Spatial features
"""
resizedImg = cv2.resize(img, size)
if isFeatureVector:
return resizedImg.ravel()
else:
return resizedImg | 5,356,858 |
def html_to_text(content):
"""Filter out HTML from the text."""
text = content['text']
try:
text = html.document_fromstring(text).text_content()
except etree.Error as e:
logging.error(
'Syntax error while processing {}: {}\n\n'
'Falling back to regexes'.format(text, e))
text = re.sub(r'<[^>]*>', '', text)
text = _to_unicode(text)
content['text'] = text
return content | 5,356,859 |
def backup_local(config):
""" Creates a local backup of the local folder
"""
for folder in config.folders:
folder_path = getrealhome(folder['path'])
backup_path = folder_path + ".backup"
gui.debug("Backing up from " + folder_path + " to " + backup_path)
if not os.path.exists(backup_path):
gui.debug(backup_path + " does not exist, creating")
os.makedirs(backup_path)
cmd = 'rsync -av ' + folder_path + '/ ' + backup_path
cmd_run = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True) | 5,356,860 |
async def get_robot_positions() -> control.RobotPositionsResponse:
"""
Positions determined experimentally by issuing move commands. Change
pipette position offsets the mount to the left or right such that a user
can easily access the pipette mount screws with a screwdriver. Attach tip
position places either pipette roughly in the front-center of the deck area
"""
robot_positions = control.RobotPositions(
change_pipette=control.ChangePipette(
target=control.MotionTarget.mount, left=[300, 40, 30], right=[95, 40, 30]
),
attach_tip=control.AttachTip(
target=control.MotionTarget.pipette, point=[200, 90, 150]
),
)
return control.RobotPositionsResponse(positions=robot_positions) | 5,356,861 |
def compute_basis(normal):
""" Compute an orthonormal basis for a vector. """
u = [0.0, 0.0, 0.0]
v = [0.0, 0.0, 0.0]
u[0] = -normal[1]
u[1] = normal[0]
u[2] = 0.0
if ((u[0] == 0.0) and (u[1] == 0.0)):
u[0] = 1.0
mag = vector_mag(u)
if (mag == 0.0):
return
for i in range(0, 3):
u[i] = u[i] / mag
v = cross_product(normal, u)
mag = vector_mag(v)
if (mag != 0.0):
for i in range(0, 3):
v[i] = v[i] / mag
return u, v | 5,356,862 |
def do_LEE_correction(max_local_sig, u1, u2, exp_phi_1, exp_phi_2):
"""
Return the global p-value for an observed local significance
after correcting for the look-elsewhere effect
given expected Euler characteristic exp_phi_1 above level u1
and exp_phi_2 above level u2
"""
n1, n2 = get_coefficients(u1,u2,exp_phi_1, exp_phi_2)
this_global_p = global_pvalue(max_local_sig**2, n1, n2)
print ' n1, n2 =', n1, n2
print ' local p_value = %f, local significance = %f' %(norm.cdf(-max_local_sig), max_local_sig)
print 'global p_value = %f, global significance = %f' %(this_global_p, -norm.ppf(this_global_p))
return this_global_p | 5,356,863 |
def _check_password(request, mail_pass, uid):
"""
[メソッド概要]
パスワードチェック
"""
error_msg = {}
if len(mail_pass) <= 0:
error_msg['mailPw'] = get_message('MOSJA10004', request.user.get_lang_mode())
logger.user_log('LOSI10012', request=request)
logger.logic_log('LOSM17015', request=request)
else:
password_hash = OaseCommon.oase_hash(mail_pass)
user = User.objects.get(user_id=uid)
if not user:
error_msg['mailPw'] = get_message('MOSJA32010', request.user.get_lang_mode())
logger.user_log('LOSI10013', request=request)
logger.logic_log('LOSM17001', request=request)
if user and user.password != password_hash:
error_msg['mailPw'] = get_message('MOSJA32038', request.user.get_lang_mode())
logger.user_log('LOSI10013', request=request)
logger.logic_log('LOSM17016', request=request)
return error_msg | 5,356,864 |
def localize():
"""Return to using page-specific workers for copyvio checks.
This disables changes made by :func:`globalize`, including stoping the
global worker threads.
This function is not thread-safe and should only be called when no checks
are being done.
"""
global _is_globalized, _global_queues, _global_workers
if not _is_globalized:
return
for i in range(len(_global_workers)):
_global_queues.unassigned.put((StopIteration, None))
_global_queues = None
_global_workers = []
_is_globalized = False | 5,356,865 |
def _read_wb_indicator(indicator: str, start: int, end: int) -> pd.DataFrame:
"""Read an indicator from WB"""
return pd.read_feather(config.paths.data + rf"/{indicator}_{start}_{end}.feather") | 5,356,866 |
def pr_branches() -> list[str]:
"""List of branches that start with 'pr-'"""
out = subprocess.run(
[
"git",
"for-each-ref",
"--shell",
'--format="%(refname:strip=3)"',
"refs/remotes/origin/pr-*",
],
capture_output=True,
)
branches = out.stdout.decode().splitlines()
return [branch.replace('"', "").replace("'", "") for branch in branches] | 5,356,867 |
def scatter_raster_plot(spike_amps, spike_depths, spike_times, n_amp_bins=10, cmap='BuPu',
subsample_factor=100, display=False):
"""
Prepare data for 2D raster plot of spikes with colour and size indicative of spike amplitude
:param spike_amps:
:param spike_depths:
:param spike_times:
:param n_amp_bins: no. of colour and size bins into which to split amplitude data
:param cmap:
:param subsample_factor: factor by which to subsample data when too many points for efficient
display
:param display: generate figure
:return: ScatterPlot object, if display=True also returns matplotlib fig and ax objects
"""
amp_range = np.quantile(spike_amps, [0, 0.9])
amp_bins = np.linspace(amp_range[0], amp_range[1], n_amp_bins)
color_bin = np.linspace(0.0, 1.0, n_amp_bins + 1)
colors = (cm.get_cmap(cmap)(color_bin)[np.newaxis, :, :3][0])
spike_amps = spike_amps[0:-1:subsample_factor]
spike_colors = np.zeros((spike_amps.size, 3))
spike_size = np.zeros(spike_amps.size)
for iA in range(amp_bins.size):
if iA == (amp_bins.size - 1):
idx = np.where(spike_amps > amp_bins[iA])[0]
# Make saturated spikes the darkest colour
spike_colors[idx] = colors[-1]
else:
idx = np.where((spike_amps > amp_bins[iA]) & (spike_amps <= amp_bins[iA + 1]))[0]
spike_colors[idx] = [*colors[iA]]
spike_size[idx] = iA / (n_amp_bins / 8)
data = ScatterPlot(x=spike_times[0:-1:subsample_factor], y=spike_depths[0:-1:subsample_factor],
c=spike_amps * 1e6, cmap='BuPu')
data.set_ylim((0, 3840))
data.set_color(color=spike_colors)
data.set_clim(clim=amp_range * 1e6)
data.set_marker_size(marker_size=spike_size)
data.set_labels(title='Spike times vs Spike depths', xlabel='Time (s)',
ylabel='Distance from probe tip (um)', clabel='Spike amplitude (uV)')
if display:
fig, ax = plot_scatter(data.convert2dict())
return data.convert2dict(), fig, ax
return data | 5,356,868 |
def is_pack_real(*args):
"""
is_pack_real(F) -> bool
'FF_PACKREAL'
@param F (C++: flags_t)
"""
return _ida_bytes.is_pack_real(*args) | 5,356,869 |
def _get_sim205(node: ast.UnaryOp) -> List[Tuple[int, int, str]]:
"""Get a list of all calls of the type "not (a <= b)"."""
errors: List[Tuple[int, int, str]] = []
if (
not isinstance(node.op, ast.Not)
or not isinstance(node.operand, ast.Compare)
or len(node.operand.ops) != 1
or not isinstance(node.operand.ops[0], ast.LtE)
):
return errors
comparison = node.operand
left = to_source(comparison.left)
right = to_source(comparison.comparators[0])
errors.append(
(node.lineno, node.col_offset, SIM205.format(a=left, b=right))
)
return errors | 5,356,870 |
def db_table_update_column(db_name,table_name,key,values,key_ref,values_ref):
"""Update key values in table where key_ref = values_ref."""
db_commands = []
for i in range(len(values)):
value = values[i]
value_ref = values_ref[i]
db_commands.append(('UPDATE "{}" SET {}={} WHERE {}={}'.format(table_name,key,value,key_ref,value_ref)))
db_query(db_name, db_commands) | 5,356,871 |
def parse_boolean(val: str) -> Union[str, bool]:
"""Try to parse a string into boolean.
The string is returned as-is if it does not look like a boolean value.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
if val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
return val | 5,356,872 |
def load_users(usertable):
"""
`usertable` is the path to a CSV with the following fields:
user.*
account.organisation
SELECT user.*, account.organisation FROM user LEFT JOIN account ON user.account_id = account.id;
"""
users = []
with open(usertable) as f:
reader = csv.reader(f)
next(reader) # skip headers
for row in reader:
fields = iter(row)
_ = next(fields) # id
email = next(fields)
hash = next(fields)
password_invalid = bool(int(next(fields)))
salt = next(fields)
first_name = next(fields)
last_name = next(fields)
is_verified = bool(int(next(fields)))
is_admin = bool(int(next(fields)))
_ = next(fields) # verification_uuid
_ = next(fields) # account_id
created = datetime.datetime.strptime(next(fields), DATE_FORMAT)
updated = datetime.datetime.strptime(next(fields), DATE_FORMAT)
# from user.account:
organisation = next(fields)
# not importing these:
if password_invalid or not is_verified or (is_admin and email == '[email protected]'):
print('Warning: not importing user %s' % email)
continue
encoded = reencode_password(salt, hash)
users.append(UserDef(email, encoded, first_name, last_name, organisation,
created, updated))
return users | 5,356,873 |
def get_progress_status_view(request):
"""Get progress status of a given task
Each submitted task is identified by an ID defined when the task is created
"""
if 'progress_id' not in request.params:
raise HTTPBadRequest("Missing argument")
return get_progress_status(request.params['progress_id']) | 5,356,874 |
def test_discrete_time_zeeman_switchoff_only():
"""
Check that switching off a field works even if no dt_update is
given (i.e. the field is just a pulse that is switched off after a
while).
"""
field_expr = df.Expression(("1", "2", "3"), degree=1)
H_ext = DiscreteTimeZeeman(field_expr, dt_update=None, t_off=2)
H_ext.setup(m, Field(df.FunctionSpace(m.mesh(), 'DG', 0), Ms))
assert diff(H_ext, np.array([1, 2, 3])) < TOL
assert(H_ext.switched_off == False)
H_ext.update(1)
assert diff(H_ext, np.array([1, 2, 3])) < TOL # not yet updating
assert(H_ext.switched_off == False)
H_ext.update(2.1)
assert diff(H_ext, np.array([0, 0, 0])) < TOL
assert(H_ext.switched_off == True) | 5,356,875 |
async def test_unsupported_condition_icon_data(hass):
"""Test with unsupported condition icon data."""
await init_integration(hass, forecast=True, unsupported_icon=True)
state = hass.states.get("weather.home")
assert state.attributes.get(ATTR_FORECAST_CONDITION) is None | 5,356,876 |
def pretty_param_string(param_ids: "collection") -> str:
"""Creates a nice string showing the parameters in the given collection"""
return ' '.join(sorted(param_ids, key=utilize_params_util.order_param_id)) | 5,356,877 |
def _center_crop(image, size):
"""Crops to center of image with specified `size`."""
# Reference: https://github.com/mlperf/inference/blob/master/v0.5/classification_and_detection/python/dataset.py#L144 # pylint: disable=line-too-long
height = tf.shape(image)[0]
width = tf.shape(image)[1]
out_height = size
out_width = size
# Reference code:
# left = (width - out_width) / 2
# right = (width + out_width) / 2
# top = (height - out_height) / 2
# bottom = (height + out_height) / 2
# img = img.crop((left, top, right, bottom))
offset_height = tf.to_int32((height - out_height) / 2)
offset_width = tf.to_int32((width - out_width) / 2)
image = tf.image.crop_to_bounding_box(
image,
offset_height,
offset_width,
target_height=out_height,
target_width=out_width,
)
return image | 5,356,878 |
def resample(ts, values, num_samples):
"""Convert a list of times and a list of values to evenly spaced samples with linear interpolation"""
assert np.all(np.diff(ts) > 0)
ts = normalize(ts)
return np.interp(np.linspace(0.0, 1.0, num_samples), ts, values) | 5,356,879 |
def main():
""" Main Prog """
args = get_args()
# for i in range(args.num, 0, -1):
# print(verse(i))
print('\n\n'.join(map(verse, range(args.num, 0, -1))))
# # Alternative way
# for n in range(args.num, 0, -1):
# print(verse(n), end='\n' * (2 if n > 1 else 1)) | 5,356,880 |
def test_get_encoder_for(solver_z, D_hat, algorithm, loss,
uv_constraint, feasible_evaluation):
"""Test for valid values."""
with get_z_encoder_for(solver=solver_z,
X=X,
D_hat=D_hat,
n_atoms=N_ATOMS,
atom_support=N_TIMES_ATOM,
algorithm=algorithm,
loss=loss,
uv_constraint=uv_constraint,
feasible_evaluation=feasible_evaluation,
n_jobs=2) as z_encoder:
assert z_encoder is not None | 5,356,881 |
def merge_align_moa(data_dir, cp_moa_link):
"""
This function aligns L1000 MOAs with the cell painting MOAs
and further fill null MOAs in one of the them (cell painting or L1000)
with another, so far they are of the same broad sample ID.
The function outputs aligned L1000 MOA metadata dataframe,
that will be used for further analysis.
params:
data_dir: directory that contains L1000 files
cp_moa_link: github link to cell painting MOA metadata information .csv file
Returns:
df_pertinfo: dataframe with aligned L1000 MOA metadata pertubation information.
"""
df_pertinfo_5 = pd.read_csv(os.path.join(data_dir, 'REP.A_A549_pert_info.txt'), delimiter = "\t")
df_moa_cp = pd.read_csv(cp_moa_link, sep="\t")
df_pertinfo_5 = df_pertinfo_5[['pert_id', 'pert_iname', 'moa']].copy()
df_moa_cp = df_moa_cp[['broad_id', 'pert_iname', 'moa']].copy()
df_pertinfo_5.rename(columns={"pert_id": "broad_id", "pert_iname": "pert_iname_L1000", "moa": "moa_L1000"}, inplace = True)
df_moa_cp.rename(columns={"pert_iname": "pert_iname_cell_painting", "moa": "moa_cell_painting"}, inplace = True)
df_pertinfo = pd.merge(df_pertinfo_5, df_moa_cp, on=['broad_id'], how = 'left')
##fill NaNs in columns - moa_L1000, pert_iname_L1000, with corresponding values in cell_painting and VICE VERSA
df_pertinfo['moa_L1000'].fillna(value=df_pertinfo['moa_cell_painting'], inplace=True)
df_pertinfo['moa_cell_painting'].fillna(value=df_pertinfo['moa_L1000'], inplace=True)
df_pertinfo['pert_iname_cell_painting'].fillna(value=df_pertinfo['pert_iname_L1000'], inplace=True)
for col in ['pert_iname_L1000', 'moa_L1000', 'pert_iname_cell_painting', 'moa_cell_painting']:
df_pertinfo[col] = df_pertinfo[col].apply(lambda x: x.lower())
df_pertinfo.rename(columns={"broad_id": "pert_id", "pert_iname_L1000": "pert_iname",
"moa_L1000": "moa"}, inplace = True)
df_pertinfo.drop(['pert_iname_cell_painting', 'moa_cell_painting'], axis = 1, inplace = True)
return df_pertinfo | 5,356,882 |
def supports_color(stream) -> bool: # type: ignore
"""Determine whether an output stream (e.g. stdout/stderr) supports displaying colored text.
A stream that is redirected to a file does not support color.
"""
return stream.isatty() and hasattr(stream, "isatty") | 5,356,883 |
def flush():
"""Flush changes made to clusto objects to the database."""
SESSION.flush() | 5,356,884 |
def parse_repo_links(
html: Union[str, bytes],
base_url: Optional[str] = None,
from_encoding: Optional[str] = None,
) -> Tuple[Dict[str, str], List[Link]]:
"""
.. versionadded:: 0.7.0
Parse an HTML page from a simple repository and return a ``(metadata,
links)`` pair.
The ``metadata`` element is a ``Dict[str, str]``. Currently, the only key
that may appear in it is ``"repository_version"``, which maps to the
repository version reported by the HTML page in accordance with :pep:`629`.
If the HTML page does not contain a repository version, this key is absent
from the `dict`.
The ``links`` element is a list of `Link` objects giving the hyperlinks
found in the HTML page.
:param html: the HTML to parse
:type html: str or bytes
:param Optional[str] base_url: an optional URL to join to the front of the
links' URLs (usually the URL of the page being parsed)
:param Optional[str] from_encoding: an optional hint to Beautiful Soup as
to the encoding of ``html`` when it is `bytes` (usually the ``charset``
parameter of the response's :mailheader:`Content-Type` header)
:rtype: Tuple[Dict[str, str], List[Link]]
:raises UnsupportedRepoVersionError: if the repository version has a
greater major component than the supported repository version
"""
soup = BeautifulSoup(html, "html.parser", from_encoding=from_encoding)
base_tag = soup.find("base", href=True)
if base_tag is not None:
if base_url is None:
base_url = base_tag["href"]
else:
base_url = urljoin(base_url, base_tag["href"])
if base_url is None:
def basejoin(url: str) -> str:
return url
else:
def basejoin(url: str) -> str:
assert isinstance(base_url, str)
return urljoin(base_url, url)
metadata = {}
pep629_meta = soup.find(
"meta",
attrs={"name": "pypi:repository-version", "content": True},
)
if pep629_meta is not None:
metadata["repository_version"] = pep629_meta["content"]
check_repo_version(metadata["repository_version"])
links = []
for link in soup.find_all("a", href=True):
links.append(
Link(
text="".join(link.strings).strip(),
url=basejoin(link["href"]),
attrs=link.attrs,
)
)
return (metadata, links) | 5,356,885 |
def process_page_metadata(generator, metadata):
"""
Process page metadata and assign css
"""
global bnews_default_settings, bnews_settings
# Inject article listing
article_listing = bnews_settings['articles']
bnews_settings = copy.deepcopy(bnews_default_settings)
bnews_settings['articles'] = article_listing
if u'styles' not in metadata:
metadata[u'styles'] = []
if u'bnews' in metadata and (metadata['bnews'] == 'True' or metadata['bnews'] == 'true'):
bnews_settings['show'] = True
bnews_settings['template-variable'] = True
else:
bnews_settings['show'] = False
bnews_settings['template-variable'] = False
if u'bnews_mode' in metadata:
bnews_settings['mode'] = metadata['bnews_mode']
if u'bnews_panel_color' in metadata:
bnews_settings['panel-color'] = metadata['bnews_panel_color']
if u'bnews_header' in metadata:
bnews_settings['header'] = metadata['bnews_header']
if u'bnews_header_link' in metadata:
bnews_settings['header-link'] = metadata['bnews_header_link']
if u'bnews_count' in metadata:
bnews_settings['count'] = int(metadata['bnews_count'])
if u'bnews_category' in metadata:
bnews_settings['category'] = metadata['bnews_category']
if u'bnews_show_categories' in metadata:
bnews_settings['show-categories'] = metadata['bnews_show_categories']
if u'bnews_show_summary' in metadata:
bnews_settings['show-summary'] = metadata['bnews_show_summary'] | 5,356,886 |
def calc_kappa4Franci(T_K, a_H, a_H2CO3s):
"""
Calculates kappa4 in the PWP equation using approach from Franci's code.
Parameters
----------
T_K : float
temperature Kelvin
a_H : float
activity of hydrogen (mol/L)
a_H2CO3s : float
activity of carbonic acid (mol/L)
Returns
-------
kappa4 : float
constant kappa4 in the PWP equation (cm^4/mmol/s)
Notes
-----
See more info under documentation for pwpRateFranci().
"""
K_2 = calc_K_2(T_K)
K_c = calc_K_c(T_K)
kappa1 = calc_kappa1(T_K)
kappa2 = calc_kappa2(T_K)
kappa3 = calc_kappa3(T_K)
kappa4 = (K_2/K_c)*(kappa1 + 1/a_H*(kappa2*a_H2CO3s + kappa3) )
return kappa4 | 5,356,887 |
def get_candidates_from_single_line(single_line_address, out_spatial_reference, max_locations):
""" parses the single line address and passes it to the AGRC geocoding service
and then returns the results as an array of candidates
"""
try:
parsed_address = Address(single_line_address)
except Exception:
return []
return make_request(
parsed_address.normalized, parsed_address.zip_code or parsed_address.city, out_spatial_reference, max_locations
) | 5,356,888 |
def create_store():
"""Gathers all the necessary info to create a new store"""
print("What is the name of the store?")
store_name = raw_input('> ')
return receipt.Store(store_name) | 5,356,889 |
def set_retention_policy(bucket_name, retention_period):
"""Defines a retention policy on a given bucket"""
# [START storage_set_retention_policy]
# bucket_name = "my-bucket"
# retention_period = 10
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
bucket.retention_period = retention_period
bucket.patch()
print('Bucket {} retention period set for {} seconds'.format(
bucket.name,
bucket.retention_period))
# [END storage_set_retention_policy] | 5,356,890 |
def vgg11_bn_vib(cutting_layer, logger, num_client = 1, num_class = 10, initialize_different = False, adds_bottleneck = False, bottleneck_option = "C8S1"):
"""VGG 11-layer model (configuration "A") with batch normalization"""
return VGG_vib(make_layers(cutting_layer,cfg['A'], batch_norm=True, adds_bottleneck = adds_bottleneck, bottleneck_option = bottleneck_option), logger, num_client = num_client, num_class = num_class, initialize_different = initialize_different) | 5,356,891 |
def test_command_line_tool_activate_server(mock_main_runner, mock_requests):
"""Test activating a server"""
runner = mock_main_runner
context = mock_main_runner.get_context()
assert context.get_active_server().name == "testserver"
result = runner.invoke(
entrypoint.cli, "server activate testserver2", catch_exceptions=False
)
assert "Set active server to" in result.output
assert context.get_active_server().name == "testserver2"
# activating a non-existant server name should just give a nice message, no crashes
result = runner.invoke(entrypoint.cli, "server activate yomomma")
assert "Invalid value" in result.output | 5,356,892 |
def pytest_sessionstart(session):
"""
pytest hook to configure plugin.
"""
config = session.config
# Get registered options
platform = config.getoption('--topology-platform')
plot_format = config.getoption('--topology-plot-format')
plot_dir = config.getoption('--topology-plot-dir')
nml_dir = config.getoption('--topology-nml-dir')
injection_file = config.getoption('--topology-inject')
log_dir = config.getoption('--topology-log-dir')
szn_dir = config.getoption('--topology-szn-dir')
platform_options = config.getoption('--topology-platform-options')
build_retries = config.getoption('--topology-build-retries')
if build_retries < 0:
raise Exception('--topology-build-retries can\'t be less than 0')
def create_dir(path):
if path:
if not isabs(path):
path = join(abspath(getcwd()), path)
if not exists(path):
makedirs(path)
# Determine plot, NML and log directory paths and create them if required
create_dir(plot_dir)
create_dir(nml_dir)
create_dir(log_dir)
# Parse attributes injection file
from pyszn.injection import parse_attribute_injection
injected_attr = None
if injection_file is not None:
log.info('Processing attribute injection...')
start_time = time()
# Get a list of all testing directories
search_paths = [
realpath(arg) for arg in config.args if isdir(arg)
]
injected_attr = parse_attribute_injection(
injection_file,
search_paths=search_paths,
ignored_paths=config.getini('norecursedirs'),
szn_dir=szn_dir
)
log.info(
'Attribute injection completed after {}s'
.format(time() - start_time)
)
# Create and register plugin
config._topology_plugin = TopologyPlugin(
platform,
plot_dir,
plot_format.lstrip('.'),
nml_dir,
injected_attr,
log_dir,
szn_dir,
parse_options(platform_options),
build_retries
)
config.pluginmanager.register(config._topology_plugin)
# Add test_id marker
config.addinivalue_line(
'markers',
'test_id(id): assign a test identifier to the test'
)
# Add topology_compatible marker
config.addinivalue_line(
'markers',
'platform_incompatible(platforms, reason=None): '
'mark a test as incompatible with a list of platform engines. '
'Optionally specify a reason for better reporting'
) | 5,356,893 |
def test_synchrotron_lum(particle_dists):
"""
test sync calculation
"""
from ..models import Synchrotron
ECPL,PL,BPL = particle_dists
lum_ref = [2.523130675e-04,
1.689956354e-02,
3.118110763e-04]
We_ref = [8.782070535e+09,
1.443896523e+10,
1.056827286e+09]
Wes = []
lsys = []
for pdist in particle_dists:
sy = Synchrotron(pdist)
Wes.append(sy.We.to('erg').value)
lsy = trapz_loglog(sy.spectrum(energy) * energy, energy).to('erg/s')
assert(lsy.unit == u.erg / u.s)
lsys.append(lsy.value)
assert_allclose(lsys, lum_ref)
assert_allclose(Wes, We_ref)
sy = Synchrotron(ECPL,B=1*u.G)
lsy = trapz_loglog(sy.spectrum(energy) * energy, energy).to('erg/s')
assert(lsy.unit == u.erg / u.s)
assert_allclose(lsy.value, 31668900.60668014) | 5,356,894 |
def perform_init_checks(wrapper, input_list, input_mapper, in_output_list, output_list,
param_list, mapper_list, short_name, level_names):
"""Perform checks on objects created by running or slicing an indicator."""
if input_mapper is not None:
checks.assert_equal(input_mapper.shape[0], wrapper.shape_2d[1])
for ts in input_list:
checks.assert_equal(ts.shape[0], wrapper.shape_2d[0])
for ts in in_output_list + output_list:
checks.assert_equal(ts.shape, wrapper.shape_2d)
for params in param_list:
checks.assert_len_equal(param_list[0], params)
for mapper in mapper_list:
checks.assert_equal(len(mapper), wrapper.shape_2d[1])
checks.assert_type(short_name, str)
checks.assert_len_equal(level_names, param_list) | 5,356,895 |
def get_conf_paths(project_metadata):
"""
Get conf paths using the default kedro patterns, and the CONF_ROOT
directory set in the projects settings.py
"""
configure_project(project_metadata.package_name)
session = KedroSession.create(project_metadata.package_name)
_activate_session(session, force=True)
context = session.load_context()
pats = ("catalog*", "catalog*/**", "**/catalog*")
conf_paths = context.config_loader._lookup_config_filepaths(Path(context.config_loader.conf_paths[0]), pats, set())
return conf_paths | 5,356,896 |
def _mat_ptrs(a):
"""Creates an array of pointers to matrices
Args:
a: A batch of matrices on GPU
Returns:
GPU array of pointers to matrices
"""
return cuda.to_gpu(numpy.arange(
a.ptr, a.ptr + a.shape[0] * a.strides[0], a.strides[0],
dtype=ctypes.c_void_p)) | 5,356,897 |
def file_upload_quota_broken(request):
"""
You can't change handlers after reading FILES; this view shouldn't work.
"""
response = file_upload_echo(request)
request.upload_handlers.insert(0, QuotaUploadHandler())
return response | 5,356,898 |
def print_error(e, print_traceback=False):
"""Logs error to stdout, so it's not only shown to the user through streamlit."""
print()
print("=" * 80)
print(f"ERROR for user {username}:", e)
if print_traceback:
print()
traceback.print_exc()
print("=" * 80)
print() | 5,356,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.