content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def load_structure(query, reduce=True, strip='solvent&~@/pseudoBonds'):
"""
Load a structure in Chimera. It can be anything accepted by `open` command.
Parameters
==========
query : str
Path to molecular file, or special query for Chimera's open (e.g. pdb:3pk2).
reduce : bool
Add hydrogens to structure. Defaults to True.
strip : str
Chimera selection spec that will be removed. Defaults to solvent&~@/pseudoBonds
(solvent that is not attached to a metal ion).
"""
print('Opening', query)
chimera.runCommand('open ' + query)
m = chimera.openModels.list()[0]
m.setAllPDBHeaders({})
if strip:
print(' Removing {}...'.format(strip))
chimera.runCommand('del ' + strip)
if reduce:
print(' Adding hydrogens...')
chimera.runCommand('addh')
return m | d91ceeba36eb04e33c238ab2ecb88ba2cc1928c7 | 3,657,342 |
from re import T
def is_into_keyword(token):
"""
INTO判定
"""
return token.match(T.Keyword, "INTO") | 337fb0062dc4288aad8ac715efcca564ddfad113 | 3,657,343 |
from typing import Union
def exp(
value: Union[Tensor, MPCTensor, int, float], iterations: int = 8
) -> Union[MPCTensor, float, Tensor]:
"""Approximates the exponential function using a limit approximation.
exp(x) = lim_{n -> infty} (1 + x / n) ^ n
Here we compute exp by choosing n = 2 ** d for some large d equal to
iterations. We then compute (1 + x / n) once and square `d` times.
Args:
value: tensor whose exp is to be calculated
iterations (int): number of iterations for limit approximation
Ref: https://github.com/LaRiffle/approximate-models
Returns:
MPCTensor: the calculated exponential of the given tensor
"""
result = (value / 2**iterations) + 1
for _ in range(iterations):
result = result * result
return result | 9cfbb63d39d41e92b506366244ec6e77d52162b2 | 3,657,344 |
def isDllInCorrectPath():
"""
Returns True if the BUFFY DLL is present and in the correct location (...\<BTS>\Mods\<BUFFY>\Assets\).
"""
return IS_DLL_IN_CORRECT_PATH | ea31391d41ba04b27df70124a65fdb48791cce57 | 3,657,346 |
import time
def time_remaining(event_time):
"""
Args:
event_time (time.struct_time): Time of the event.
Returns:
float: Time remaining between now and the event, in
seconds since epoch.
"""
now = time.localtime()
time_remaining = time.mktime(event_time) - time.mktime(now)
return time_remaining | cb3dfcf916cffc3b45f215f7642aeac8a1d6fef7 | 3,657,347 |
def _repeat(values, count):
"""Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave.
"""
return [[value] * value for value in np.tile(values, count)] | 46aa7899e7ed536525b7a94675edf89958f6f37f | 3,657,348 |
from functools import reduce
def P2D_l_TAN(df, cond, attr): # P(attr | 'target', cond)
"""Calcule la probabilité d'un attribut sachant la classe et un autre attribut.
Parameters
----------
df : pandas.DataFrame
La base d'examples.
cond : str
Le nom de l'attribut conditionnant.
attr : str
Le nom de l'attribut conditionné.
Returns
-------
dict of (int, number): (dict of number: float)
Un dictionnaire associant au couple (`t`, `c`), de classe `t` et de valeur
d'attribut conditionnant `c`, un dictionnaire qui associe à
la valeur `a` de l'attribut conditionné la probabilité
.. math:: P(attr=a|target=t,cond=c).
"""
joint_target_cond_attr = getJoint(df, ['target', cond, attr])
joint_target_cond = getJoint(df, ['target', cond])
raw_dico = dict(divide(joint_target_cond_attr, joint_target_cond))
dicos = [{(k_t, k_c): {k_a: proba}}
for (k_t, k_c, k_a), proba in raw_dico.items()]
res = {}
reduce(reduce_update, [res] + dicos)
return res | 88affcaea0368c400ccd25356d97a25c9a88a15e | 3,657,349 |
def has_no_jump(bigram, peaks_groundtruth):
"""
Tell if the two components of the bigram are same or successive in the sequence of valid peaks or not
For exemple, if groundtruth = [1,2,3], [1,1] or [2,3] have no jump but [1,3] has a jump.
bigram : the bigram to judge
peaks_groundtruth : the list of valid peaks
Return boolean
"""
assert len(bigram) == 2
if len(set(bigram)) == 1:
return True
sorted_groundtruth = sorted(peaks_groundtruth)
sorted_peaks = sorted(list(bigram))
begin = sorted_groundtruth.index(sorted_peaks[0])
end = begin+len(sorted_peaks)
return sorted_peaks == sorted_groundtruth[begin:end] | e334c389436d5cda2642f8ac7629b64074dcd0e0 | 3,657,350 |
import base64
def Base64WSDecode(s):
"""
Return decoded version of given Base64 string. Ignore whitespace.
Uses URL-safe alphabet: - replaces +, _ replaces /. Will convert s of type
unicode to string type first.
@param s: Base64 string to decode
@type s: string
@return: original string that was encoded as Base64
@rtype: bytes
@raise Base64DecodingError: If length of string (ignoring whitespace) is one
more than a multiple of four.
"""
s = RawString(s) # Base64W decode can only work with strings
s = ''.join(s.splitlines())
s = str(s.replace(" ", "")) # kill whitespace, make string (not unicode)
d = len(s) % 4
if d == 1:
raise kzr_errors.Base64DecodingError()
elif d == 2:
s += "=="
elif d == 3:
s += "="
s = RawBytes(s)
try:
return base64.urlsafe_b64decode(s)
except TypeError:
# Decoding raises TypeError if s contains invalid characters.
raise kzr_errors.Base64DecodingError() | 67db2d3f298e0220411f224299dcb20feeba5b3e | 3,657,351 |
def make_window():
"""create the window"""
window = Tk()
window.title("Pac-Man")
window.geometry("%dx%d+%d+%d" % (
WINDOW_WIDTH,
WINDOW_HEIGHT,
X_WIN_POS,
Y_WIN_POS
)
)
window = window
return window | 1e9ecb5acf91e75797520c54be1087d24392f190 | 3,657,352 |
def hasf(e):
"""
Returns a function which if applied with `x` tests whether `x` has `e`.
Examples
--------
>>> filter(hasf("."), ['statement', 'A sentence.'])
['A sentence.']
"""
return lambda x: e in x | ac9ce7cf2ed2ee8a050acf24a8d0a3b95b7f2d50 | 3,657,354 |
def borehole_model(x, theta):
"""Given x and theta, return matrix of [row x] times [row theta] of values."""
return f | 9ccfd530ff162d5f2ec786757ec03917f3367635 | 3,657,355 |
def findNodesOnHostname(hostname):
"""Return the list of nodes name of a (non-dmgr) node on the given hostname, or None
Function parameters:
hostname - the hostname to check, with or without the domain suffix
"""
m = "findNodesOnHostname:"
nodes = []
for nodename in listNodes():
if hostname.lower() == getNodeHostname(nodename).lower():
sop(m, "Found node %s which is on %s" % (nodename, hostname))
nodes.append(nodename)
#endif
#endfor
# Not found - try matching without domain - z/OS systems might not have domain configured
shorthostname = hostname.split(".")[0].lower()
for nodename in listNodes():
shortnodehostname = getNodeHostname(nodename).split(".")[0].lower()
if shortnodehostname == shorthostname:
if nodename in nodes :
sop(m, "Node name %s was already found with the domain attached" % nodename)
else :
nodes.append(nodename)
sop(m, "Found node %s which is on %s" % (nodename, hostname))
#endif
#endif
#endfor
if len(nodes) == 0 :
sop(m,"WARNING: Unable to find any node with the hostname %s (not case-sensitive)" % hostname)
sop(m,"HERE are the hostnames that your nodes think they're on:")
for nodename in listNodes():
sop(m,"\tNode %s: hostname %s" % (nodename, getNodeHostname(nodename)))
#endfor
return None
else :
return nodes
#endif | 3a4f28d5fa8c72388cb81d40913e517d343834f0 | 3,657,356 |
def MakeControlClass( controlClass, name = None ):
"""Given a CoClass in a generated .py file, this function will return a Class
object which can be used as an OCX control.
This function is used when you do not want to handle any events from the OCX
control. If you need events, then you should derive a class from both the
activex.Control class and the CoClass
"""
if name is None:
name = controlClass.__name__
return new_type("OCX" + name, (Control, controlClass), {}) | 634544543027b1870bb72544517511d4f7b08e39 | 3,657,357 |
def obtenTipoNom(linea):
""" Obtiene por ahora la primera palabra del título, tendría que regresar de que se trata"""
res = linea.split('\t')
return res[6].partition(' ')[0] | 73edc42c5203b7ebd0086876096cdd3b7c65a54c | 3,657,358 |
def histogramfrom2Darray(array, nbins):
"""
Creates histogram of elements from 2 dimensional array
:param array: input 2 dimensional array
:param nbins: number of bins so that bin size = (maximum value in array - minimum value in array) / nbins
the motivation for returning this array is for the purpose of easily plotting with matplotlib
:return: list of three elements:
list[0] = length nbins list of integers, a histogram of the array elements
list[1] = length nbins list of values of array element types, values of the lower end of the bins
list[2] = [minimum in list, maximum in list]
this is just good to know sometimes.
"""
#find minimum
minimum = np.min(array)
#find maximu
maximum = np.max(array)
#compute bin size
binsize = (maximum - minimum) / nbins
#create bin array
bins = [minimum + binsize * i for i in range(nbins)]
histo = [0 for b in range(nbins)]
for x in array:
for y in x:
#find the lower end of the affiliated bin
ab = y - (minimum + fmod(y - minimum, binsize))
histo[int(ab/binsize)-1] += 1
return [histo, bins, [minimum, maximum]] | 2c377b926b4708b6a6b29d400ae82b8d2931b938 | 3,657,359 |
def build_pert_reg(unsupervised_regularizer, cut_backg_noise=1.0,
cut_prob=1.0, box_reg_scale_mode='fixed',
box_reg_scale=0.25, box_reg_random_aspect_ratio=False,
cow_sigma_range=(4.0, 8.0), cow_prop_range=(0.0, 1.0),):
"""Build perturbation regularizer."""
if unsupervised_regularizer == 'none':
unsup_reg = None
augment_twice = False
elif unsupervised_regularizer == 'mt':
unsup_reg = regularizers.IdentityRegularizer()
augment_twice = False
elif unsupervised_regularizer == 'aug':
unsup_reg = regularizers.IdentityRegularizer()
augment_twice = True
elif unsupervised_regularizer == 'cutout':
unsup_reg = regularizers.BoxMaskRegularizer(
cut_backg_noise, cut_prob, box_reg_scale_mode, box_reg_scale,
box_reg_random_aspect_ratio)
augment_twice = False
elif unsupervised_regularizer == 'aug_cutout':
unsup_reg = regularizers.BoxMaskRegularizer(
cut_backg_noise, cut_prob, box_reg_scale_mode, box_reg_scale,
box_reg_random_aspect_ratio)
augment_twice = True
elif unsupervised_regularizer == 'cowout':
unsup_reg = regularizers.CowMaskRegularizer(
cut_backg_noise, cut_prob, cow_sigma_range, cow_prop_range)
augment_twice = False
elif unsupervised_regularizer == 'aug_cowout':
unsup_reg = regularizers.CowMaskRegularizer(
cut_backg_noise, cut_prob, cow_sigma_range, cow_prop_range)
augment_twice = True
else:
raise ValueError('Unknown supervised_regularizer \'{}\''.format(
unsupervised_regularizer))
return unsup_reg, augment_twice | 37d60049146c876d423fea6615cf43975f1ae389 | 3,657,360 |
def part_5b_avg_std_dev_of_replicates_analysis_completed(*jobs):
"""Check that the initial job data is written to the json files."""
file_written_bool_list = []
all_file_written_bool_pass = False
for job in jobs:
data_written_bool = False
if job.isfile(
f"../../src/engines/gomc/averagesWithinReplicatez.txt"
) and job.isfile(f"../../src/engines/gomc/setAverages.txt"):
data_written_bool = True
file_written_bool_list.append(data_written_bool)
if False not in file_written_bool_list:
all_file_written_bool_pass = True
return all_file_written_bool_pass | f238382e18de32b86598d5daa13f92af01311d3d | 3,657,361 |
def exportFlatClusterData(filename, root_dir, dataset_name, new_row_header,new_column_header,xt,ind1,ind2,display):
""" Export the clustered results as a text file, only indicating the flat-clusters rather than the tree """
filename = string.replace(filename,'.pdf','.txt')
export_text = export.ExportFile(filename)
column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\t')+'\n' ### format column-names for export
export_text.write(column_header)
column_clusters = string.join(['column_clusters-flat','']+ map(str, ind2),'\t')+'\n' ### format column-flat-clusters for export
export_text.write(column_clusters)
### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match
#new_row_header = new_row_header[::-1]
#xt = xt[::-1]
try: elite_dir = getGOEliteExportDir(root_dir,dataset_name)
except Exception: elite_dir = None
elite_columns = string.join(['InputID','SystemCode'])
try: sy = systemCodeCheck(new_row_header)
except Exception: sy = None
### Export each row in the clustered data matrix xt
i=0
cluster_db={}
export_lines = []
for row in xt:
id = new_row_header[i]
if sy == '$En:Sy':
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'S' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'Sy' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
else:
cluster = 'c'+str(ind1[i])
try: cluster_db[cluster].append(new_row_header[i])
except Exception: cluster_db[cluster] = [new_row_header[i]]
export_lines.append(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
export_lines.reverse()
for line in export_lines:
export_text.write(line)
export_text.close()
### Export GO-Elite input files
allGenes={}
for cluster in cluster_db:
export_elite = export.ExportFile(elite_dir+'/'+cluster+'.txt')
if sy==None:
export_elite.write('ID\n')
else:
export_elite.write('ID\tSystemCode\n')
for id in cluster_db[cluster]:
if sy == '$En:Sy':
id = string.split(id,':')[1]
ids = string.split(id,' ')
if 'ENS' in ids[0]: id = ids[0]
else: id = ids[-1]
sc = 'En'
elif sy == 'Sy' and ':' in id:
id = string.split(id,':')[1]
ids = string.split(id,' ')
sc = 'Sy'
elif sy == 'En:Sy':
id = string.split(id,' ')[0]
sc = 'En'
elif sy == 'Ae':
l = string.split(id,':')
if len(l)==2:
id = string.split(id,':')[0] ### Use the Ensembl
if len(l) == 3:
id = string.split(id,':')[1] ### Use the Ensembl
sc = 'En'
else:
sc = sy
if sy == 'S':
if ':' in id:
id = string.split(id,':')[-1]
sc = 'Ae'
try: export_elite.write(id+'\t'+sc+'\n')
except Exception: export_elite.write(id+'\n') ### if no System Code known
allGenes[id]=[]
export_elite.close()
try:
if storeGeneSetName != None:
if len(storeGeneSetName)>0 and 'driver' not in justShowTheseIDs:
exportCustomGeneSet(storeGeneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except Exception: pass
### Export as CDT file
filename = string.replace(filename,'.txt','.cdt')
if display:
try: exportJTV(filename, new_column_header, new_row_header)
except Exception: pass
export_cdt = export.ExportFile(filename)
column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\t')+'\n' ### format column-names for export
export_cdt.write(column_header)
eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eweight)
### Export each row in the clustered data matrix xt
i=0; cdt_lines=[]
for row in xt:
cdt_lines.append(string.join([new_row_header[i]]*2+['1']+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
cdt_lines.reverse()
for line in cdt_lines:
export_cdt.write(line)
export_cdt.close()
return elite_dir, filename | f9ade521b67c87518741fb56fb1c80df0961065a | 3,657,362 |
def indent_multiline(s: str, indentation: str = " ", add_newlines: bool = True) -> str:
"""Indent the given string if it contains more than one line.
Args:
s: String to indent
indentation: Indentation to prepend to each line.
add_newlines: Whether to add newlines surrounding the result
if indentation was added.
"""
lines = s.splitlines()
if len(lines) <= 1:
return s
lines_str = "\n".join(f"{indentation}{line}" for line in lines)
if add_newlines:
return f"\n{lines_str}\n"
else:
return lines_str | 62eb2fc7c3f3b493a6edc009692f472e50e960f7 | 3,657,363 |
from typing import Optional
def _get_property(self, key: str, *, offset: int = 0) -> Optional[int]:
"""Get a property from the location details.
:param key: The key for the property
:param offset: Any offset to apply to the value (if found)
:returns: The property as an int value if found, None otherwise
"""
value = self.location_details.get(key)
if value is None:
return None
return int(value[0]) + offset | 8d2c35a88810db5255cfb0ca9d7bfa6345ff3276 | 3,657,364 |
def pca_normalization(points):
"""Projects points onto the directions of maximum variance."""
points = np.transpose(points)
pca = PCA(n_components=len(np.transpose(points)))
points = pca.fit_transform(points)
return np.transpose(points) | 753bea2546341fc0be3e7cf4fd444b3ee93378f9 | 3,657,365 |
def _reformTrend(percs, inits):
"""
Helper function to recreate original trend based on percent change data.
"""
trend = []
trend.append(percs[0])
for i in range(1, len(percs)):
newLine = []
newLine.append(percs[i][0]) #append the date
for j in range(1, len(percs[i])): #for each term on date
level = float(trend[i-1][j]) * percs[i][j].numerator / percs[i][j].denominator #level is the prev level * %change
newLine.append(level)
trend.append(newLine)
return trend | 1f6c8bbb4786b53ea2c06643108ff50691b6f89c | 3,657,366 |
def PET_initialize_compression_structure(N_axial,N_azimuthal,N_u,N_v):
"""Obtain 'offsets' and 'locations' arrays for fully sampled PET compressed projection data. """
descriptor = [{'name':'N_axial','type':'uint','value':N_axial},
{'name':'N_azimuthal','type':'uint','value':N_azimuthal},
{'name':'N_u','type':'uint','value':N_u},
{'name':'N_v','type':'uint','value':N_v},
{
'name':'offsets','type':'array','value':None,
'dtype':np.int32,'size':(N_azimuthal,N_axial),
'order':'F'
},
{
'name':'locations','type':'array','value':None,
'dtype':np.uint16,
'size':(3,N_u * N_v * N_axial * N_azimuthal),'order':'F'
},
]
r = call_c_function(niftyrec_c.PET_initialize_compression_structure,
descriptor)
if not r.status == status_success():
raise ErrorInCFunction(
"The execution of 'PET_initialize_compression_structure' was unsuccessful.",
r.status,
'niftyrec_c.PET_initialize_compression_structure')
return [r.dictionary['offsets'],r.dictionary['locations']] | 1f879517182462d8b66886aa43a4103a05a5b6f9 | 3,657,367 |
def get_client_from_user_settings(settings_obj):
"""Same as get client, except its argument is a DropboxUserSettingsObject."""
return get_client(settings_obj.owner) | 4b2c2e87310464807bf6f73d1ff8d7b7c21731ff | 3,657,368 |
def train_student(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
):
"""This function trains a student using predictions made by an ensemble of
teachers. The student and teacher models are trained using the same neural
network architecture.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:return: True if student training went well
"""
# Call helper function to prepare student data using teacher predictions
stdnt_dataset = prepare_student_data(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
)
# Unpack the student dataset
stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels = stdnt_dataset
# Prepare checkpoint filename and path
filename = str(dataset) + "_" + str(nb_teachers) + "_student.ckpt"
stdnt_prep = PrepareData(stdnt_data, stdnt_labels)
stdnt_loader = DataLoader(stdnt_prep, batch_size=64, shuffle=False)
stdnt_test_prep = PrepareData(stdnt_test_data, stdnt_test_labels)
stdnt_test_loader = DataLoader(stdnt_test_prep, batch_size=64, shuffle=False)
# Start student training
train(model, stdnt_loader, stdnt_test_loader, ckpt_path, filename)
# Compute final checkpoint name for student
student_preds = softmax_preds(
model, nb_labels, stdnt_test_loader, ckpt_path + filename
)
# Compute teacher accuracy
precision = accuracy(student_preds, stdnt_test_labels)
print("\nPrecision of student after training: " + str(precision))
return True | de8db38bde151f5dd65b93a0c8a44c2289351f81 | 3,657,369 |
import numpy
def create_transition_matrix_numeric(mu, d, v):
"""
Use numerical integration.
This is not so compatible with algopy because it goes through fortran.
Note that d = 2*h - 1 following Kimura 1957.
The rate mu is a catch-all scaling factor.
The finite distribution v is assumed to be a stochastic vector.
@param mu: scales the rate matrix
@param d: dominance (as opposed to recessiveness) of preferred states.
@param v: numpy array defining a distribution over states
@return: transition matrix
"""
# Construct the numpy matrix whose entries
# are differences of log equilibrium probabilities.
# Everything in this code block is pure numpy.
F = numpy.log(v)
e = numpy.ones_like(F)
S = numpy.outer(e, F) - numpy.outer(F, e)
# Create the rate matrix Q and return its matrix exponential.
# Things in this code block may use algopy if mu and d
# are bundled with truncated Taylor information.
D = d * numpy.sign(S)
pre_Q = numpy.vectorize(numeric_fixation)(0.5*S, D)
pre_Q = mu * pre_Q
Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
P = algopy.expm(Q)
return P | a60a3da34089fffe2a48cc282ea4cbb528454fd6 | 3,657,370 |
def channelmap(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#channelmap"""
return filter(stream, channelmap.__name__, *args, **kwargs) | 8293e9004fd4dfb7ff830e477dcee4de5d163a5d | 3,657,372 |
def test_token(current_user: DBUser = Depends(get_current_user)):
"""
Test access-token
"""
return current_user | 1ceb90c1321e358124520ab5b1b1ecb07de4619d | 3,657,373 |
def process_label_imA(im):
"""Crop a label image so that the result contains
all labels, then return separate images, one for
each label.
Returns a dictionary of images and corresponding
labels (for choosing colours), also a scene bounding
box. Need to run shape statistics to determine
the number of labels and the IDs
"""
# stuff to figure out which way we slice, etc
isoidx = check_isotropy(im)
otheridx = [0, 1, 2]
otheridx.remove(isoidx)
direction = get_direction(im, isoidx)
sp = im.GetSpacing()
sp = str2ds(sp)
spacing = [sp[i] for i in otheridx]
slthickness = sp[isoidx]
labstats = sitk.LabelShapeStatisticsImageFilter()
labstats.Execute(im)
labels = labstats.GetLabels()
boxes = [labstats.GetBoundingBox(i) for i in labels]
# Need to compute bounding box for all labels, as
# this will set the row/colums
# boxes are corner and size - this code assumes 3D
corners = [(x[0], x[1], x[2]) for x in boxes]
othercorner = [(x[0] + x[3] - 1,
x[1] + x[4] - 1,
x[2] + x[5] - 1) for x in boxes]
sizes = [(x[3], x[4], x[5]) for x in boxes]
all_low_x = [C[0] for C in corners]
all_low_y = [C[1] for C in corners]
all_low_z = [C[2] for C in corners]
low_x = min(all_low_x)
low_y = min(all_low_y)
low_z = min(all_low_z)
lowcorner = (low_x, low_y, low_z)
all_high_x = [C[0] for C in othercorner]
all_high_y = [C[1] for C in othercorner]
all_high_z = [C[2] for C in othercorner]
high_x = max(all_high_x)
high_y = max(all_high_y)
high_z = max(all_high_z)
highcorner = (high_x, high_y, high_z)
allsize = (highcorner[0] - lowcorner[0] + 1,
highcorner[1] - lowcorner[1] + 1,
highcorner[2] - lowcorner[2] + 1)
# corners [otheridx] and size[otheridx] should be all the same
newcorners = [list(x) for x in corners]
newsizes = [list(x) for x in sizes]
a = otheridx[0]
b = otheridx[1]
for f in range(len(newcorners)):
newcorners[f][a] = lowcorner[a]
newcorners[f][b] = lowcorner[b]
newsizes[f][a] = allsize[a]
newsizes[f][b] = allsize[b]
ims = [sitk.RegionOfInterest(im, allsize,
lowcorner) == labels[i]
for i in range(len(labels))]
imcrop = sitk.RegionOfInterest(im, allsize, lowcorner)
return({'rois': ims, 'labels': labels,
'original': im, 'cropped': imcrop}) | 66e89e84d773d102c8fe7a6d10dd0604b52d9862 | 3,657,375 |
def render_graphs(csv_data, append_titles=""):
"""
Convenience function. Gets the aggregated `monthlies` data from
`aggregate_monthly_data(csv_data)` and returns a dict of graph
titles mapped to rendered SVGs from `monthly_total_precip_line()`
and `monthly_avg_min_max_temp_line()` using the `monthlies` data.
"""
monthlies = aggregate_monthly_data(csv_data)
return {
graph.config.title: graph.render()
for graph in [
monthly_total_precip_line(monthlies, append_titles),
monthly_avg_min_max_temp_line(monthlies, append_titles),
monthly_max_temps_box(monthlies, append_titles),
]
} | c2258faf759c2fd91e55fea06384d5f7ec030154 | 3,657,376 |
import traceback
def _get_location():
"""Return the location as a string, accounting for this function and the parent in the stack."""
return "".join(traceback.format_stack(limit=STACK_LIMIT + 2)[:-2]) | f36037a440d2e8f3613beed217a758bc0cfa752d | 3,657,377 |
def start_session():
"""do nothing here
"""
return Response.failed_response('Error') | b8c58ec837c5a77c35cb6682c6c405489cf512c0 | 3,657,379 |
def _combine_keras_model_with_trill(embedding_tfhub_handle, aggregating_model):
"""Combines keras model with TRILL model."""
trill_layer = hub.KerasLayer(
handle=embedding_tfhub_handle,
trainable=False,
arguments={'sample_rate': 16000},
output_key='embedding',
output_shape=[None, 2048]
)
input1 = tf.keras.Input([None])
trill_output = trill_layer(input1)
final_out = aggregating_model(trill_output)
final_model = tf.keras.Model(
inputs=input1,
outputs=final_out)
return final_model | 97bf695e6b083dfefcad1d2c8ac24b54687047fd | 3,657,380 |
def phases(times, names=[]):
""" Creates named phases from a set of times defining the edges of hte intervals """
if not names: names = range(len(times)-1)
return {names[i]:[times[i], times[i+1]] for (i, _) in enumerate(times) if i < len(times)-1} | 0e56dcf57a736e4555cae02b8f79b827c17e1d38 | 3,657,381 |
def smesolve(H, rho0, times, c_ops=[], sc_ops=[], e_ops=[],
_safe_mode=True, args={}, **kwargs):
"""
Solve stochastic master equation. Dispatch to specific solvers
depending on the value of the `solver` keyword argument.
Parameters
----------
H : :class:`qutip.Qobj`, or time dependent system.
System Hamiltonian.
Can depend on time, see StochasticSolverOptions help for format.
rho0 : :class:`qutip.Qobj`
Initial density matrix or state vector (ket).
times : *list* / *array*
List of times for :math:`t`. Must be uniformly spaced.
c_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs.
Deterministic collapse operator which will contribute with a standard
Lindblad type of dissipation.
Can depend on time, see StochasticSolverOptions help for format.
sc_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs.
List of stochastic collapse operators. Each stochastic collapse
operator will give a deterministic and stochastic contribution
to the eqaution of motion according to how the d1 and d2 functions
are defined.
Can depend on time, see StochasticSolverOptions help for format.
e_ops : list of :class:`qutip.Qobj`
single operator or list of operators for which to evaluate
expectation values.
kwargs : *dictionary*
Optional keyword arguments. See
:class:`qutip.stochastic.StochasticSolverOptions`.
Returns
-------
output: :class:`qutip.solver.Result`
An instance of the class :class:`qutip.solver.Result`.
"""
if "method" in kwargs and kwargs["method"] == "photocurrent":
print("stochastic solver with photocurrent method has been moved to "
"it's own function: photocurrent_mesolve")
return photocurrent_mesolve(H, rho0, times, c_ops=c_ops, sc_ops=sc_ops,
e_ops=e_ops, _safe_mode=_safe_mode,
args=args, **kwargs)
if isket(rho0):
rho0 = ket2dm(rho0)
if isinstance(e_ops, dict):
e_ops_dict = e_ops
e_ops = [e for e in e_ops.values()]
else:
e_ops_dict = None
sso = StochasticSolverOptions(True, H=H, state0=rho0, times=times,
c_ops=c_ops, sc_ops=sc_ops, e_ops=e_ops,
args=args, **kwargs)
if _safe_mode:
_safety_checks(sso)
if sso.solver_code == 120:
return _positive_map(sso, e_ops_dict)
sso.LH = liouvillian(sso.H, c_ops=sso.sc_ops + sso.c_ops) * sso.dt
if sso.method == 'homodyne' or sso.method is None:
if sso.m_ops is None:
sso.m_ops = [op + op.dag() for op in sso.sc_ops]
sso.sops = [spre(op) + spost(op.dag()) for op in sso.sc_ops]
if not isinstance(sso.dW_factors, list):
sso.dW_factors = [1] * len(sso.m_ops)
elif len(sso.dW_factors) != len(sso.m_ops):
raise Exception("The len of dW_factors is not the same as m_ops")
elif sso.method == 'heterodyne':
if sso.m_ops is None:
m_ops = []
sso.sops = []
for c in sso.sc_ops:
if sso.m_ops is None:
m_ops += [c + c.dag(), -1j * c - c.dag()]
sso.sops += [(spre(c) + spost(c.dag())) / np.sqrt(2),
(spre(c) - spost(c.dag())) * -1j / np.sqrt(2)]
sso.m_ops = m_ops
if not isinstance(sso.dW_factors, list):
sso.dW_factors = [np.sqrt(2)] * len(sso.sops)
elif len(sso.dW_factors) == len(sso.m_ops):
pass
elif len(sso.dW_factors) == len(sso.sc_ops):
dW_factors = []
for fact in sso.dW_factors:
dW_factors += [np.sqrt(2) * fact, np.sqrt(2) * fact]
sso.dW_factors = dW_factors
elif len(sso.dW_factors) != len(sso.m_ops):
raise Exception("The len of dW_factors is not the same as sc_ops")
elif sso.method == "photocurrent":
raise NotImplementedError("Moved to 'photocurrent_mesolve'")
else:
raise Exception("The method must be one of None, homodyne, heterodyne")
sso.ce_ops = [QobjEvo(spre(op)) for op in sso.e_ops]
sso.cm_ops = [QobjEvo(spre(op)) for op in sso.m_ops]
sso.LH.compile()
[op.compile() for op in sso.sops]
[op.compile() for op in sso.cm_ops]
[op.compile() for op in sso.ce_ops]
if sso.solver_code in [103, 153]:
sso.imp = 1 - sso.LH * 0.5
sso.imp.compile()
sso.solver_obj = SMESolver
sso.solver_name = "smesolve_" + sso.solver
res = _sesolve_generic(sso, sso.options, sso.progress_bar)
if e_ops_dict:
res.expect = {e: res.expect[n]
for n, e in enumerate(e_ops_dict.keys())}
return res | 4a27d54d2ca390bb3e4ac88ec2119633481df529 | 3,657,382 |
def harmonic_vector(n):
"""
create a vector in the form [1,1/2,1/3,...1/n]
"""
return np.array([[1.0 / i] for i in range(1, n + 1)], dtype='double') | 6f2a94e0a54566db614bb3c4916e1a8538783862 | 3,657,383 |
import copy
def get_install_task_flavor(job_config):
"""
Pokes through the install task's configuration (including its overrides) to
figure out which flavor it will want to install.
Only looks at the first instance of the install task in job_config.
"""
project, = job_config.get('project', 'ceph'),
tasks = job_config.get('tasks', dict())
overrides = job_config.get('overrides', dict())
install_overrides = overrides.get('install', dict())
project_overrides = install_overrides.get(project, dict())
first_install_config = dict()
for task in tasks:
if task.keys()[0] == 'install':
first_install_config = task.values()[0] or dict()
break
first_install_config = copy.deepcopy(first_install_config)
deep_merge(first_install_config, install_overrides)
deep_merge(first_install_config, project_overrides)
return get_flavor(first_install_config) | 11fcefe3df17acfbce395949aa615d8292585fb6 | 3,657,384 |
def equalize_hist(image, nbins=256):
"""Return image after histogram equalization.
Parameters
----------
image : array
Image array.
nbins : int
Number of bins for image histogram.
Returns
-------
out : float array
Image array after histogram equalization.
Notes
-----
This function is adapted from [1]_ with the author's permission.
References
----------
.. [1] http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
.. [2] http://en.wikipedia.org/wiki/Histogram_equalization
"""
image = img_as_float(image)
cdf, bin_centers = cumulative_distribution(image, nbins)
out = np.interp(image.flat, bin_centers, cdf)
return out.reshape(image.shape) | ea990cee9bef0e2edc41e2c5279f52b98d2a4d89 | 3,657,385 |
def add9336(rh):
"""
Adds a 9336 (FBA) disk to virtual machine's directory entry.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'ADD9336'
userid - userid of the virtual machine
parms['diskPool'] - Disk pool
parms['diskSize'] - size of the disk in blocks or bytes.
parms['fileSystem'] - Linux filesystem to install on the disk.
parms['mode'] - Disk access mode
parms['multiPW'] - Multi-write password
parms['readPW'] - Read password
parms['vaddr'] - Virtual address
parms['writePW'] - Write password
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.add9336")
results, blocks = generalUtils.cvtToBlocks(rh, rh.parms['diskSize'])
if results['overallRC'] != 0:
# message already sent. Only need to update the final results.
rh.updateResults(results)
if results['overallRC'] == 0:
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-t", "9336",
"-a", "AUTOG",
"-r", rh.parms['diskPool'],
"-u", "1",
"-z", blocks,
"-f", "1"]
hideList = []
if 'mode' in rh.parms:
parms.extend(["-m", rh.parms['mode']])
else:
parms.extend(["-m", 'W'])
if 'readPW' in rh.parms:
parms.extend(["-R", rh.parms['readPW']])
hideList.append(len(parms) - 1)
if 'writePW' in rh.parms:
parms.extend(["-W", rh.parms['writePW']])
hideList.append(len(parms) - 1)
if 'multiPW' in rh.parms:
parms.extend(["-M", rh.parms['multiPW']])
hideList.append(len(parms) - 1)
results = invokeSMCLI(rh,
"Image_Disk_Create_DM",
parms,
hideInLog=hideList)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if (results['overallRC'] == 0 and 'fileSystem' in rh.parms):
# Install the file system
results = installFS(
rh,
rh.parms['vaddr'],
rh.parms['mode'],
rh.parms['fileSystem'],
"9336")
if results['overallRC'] == 0:
results = isLoggedOn(rh, rh.userid)
if (results['overallRC'] == 0 and results['rs'] == 0):
# Add the disk to the active configuration.
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-m", rh.parms['mode']]
results = invokeSMCLI(rh, "Image_Disk_Create", parms)
if results['overallRC'] == 0:
rh.printLn("N", "Added dasd " + rh.parms['vaddr'] +
" to the active configuration.")
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit changeVM.add9336, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | bb7168d5b0ee084b15e8ef91633d5554669cf83f | 3,657,386 |
def get_related(user, kwargs):
"""
Get related model from user's input.
"""
for item in user.access_extra:
if item[1] in kwargs:
related_model = apps.get_model(item[0], item[1])
kwargs[item[1]] = related_model.objects.get(pk=get_id(kwargs[item[1]]))
return kwargs | 6b2ce081d1f61da734d26ef6f3c25e4da871b9ee | 3,657,388 |
def make_logical(n_tiles=1):
"""
Make a toy dataset with three labels that represent the logical functions: OR, XOR, AND
(functions of the 2D input).
"""
pat = np.array([
# X X Y Y Y
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 1, 1, 0, 1]
], dtype=int)
N, E = pat.shape
D = 2
L = E - D
pat2 = np.zeros((N, E))
pat2[:, 0:L] = pat[:, D:E]
pat2[:, L:E] = pat[:, 0:D]
pat2 = np.tile(pat2, (n_tiles, 1))
np.random.shuffle(pat2)
Y = np.array(pat2[:, 0:L], dtype=float)
X = np.array(pat2[:, L:E], dtype=float)
return X, Y | e2d936db7ae0d9ea8b0f1654e89a32b5b8c247cc | 3,657,389 |
def get_idmap_etl(
p_idmap: object,
p_etl_id: str,
p_source_table: object =None
):
"""
Генерирует скрипт ETL для таблицы Idmap
:param p_idmap: объект класса Idmap
:param p_etl_id: id etl процесса
:param p_source_table: таблица источник, которую требуется загрузить в idmap
(если не указана, возвращается лист с etl всех таблиц источников)
"""
l_source_table_id=None
if p_source_table:
l_source_table_id=p_source_table.id
l_etl=[]
l_idmap_nk_column=None
l_idmap_rk_column=None
l_etl_column=None
for i_attribute in _get_table_attribute_property(p_table=p_idmap):
if i_attribute.attribute_type==C_RK:
l_idmap_rk_column=i_attribute.id
if i_attribute.attribute_type==C_NK:
l_idmap_nk_column=i_attribute.id
if i_attribute.attribute_type==C_ETL_ATTR:
l_etl_column=i_attribute.id
for i_source_table in p_idmap.entity.source_table:
if l_source_table_id and l_source_table_id!=i_source_table.id: # пропускаем таблицу источник, если не она указана
continue
l_column_nk_sql=""
# формируем скрипт для конкатенации натуральных ключей
# сортируем список натуральных ключей по наименованию
l_source_attribute_nk=sorted(p_idmap.source_attribute_nk, key=lambda nk: nk.name)
for i_column_nk in l_source_attribute_nk:
if i_source_table.id==i_column_nk.source_table.id:
l_column_nk_sql=l_column_nk_sql+"CAST("+'"'+str(i_column_nk.id)+'"'+" AS VARCHAR(4000))\n\t\t||'@@'||\n\t\t"
l_column_nk_sql=l_column_nk_sql[:-14]
l_source_id=i_source_table.source.source_id
# генерируем etl для каждой таблицы источника
l_etl.append(
Connection().dbms.get_idmap_etl(
p_idmap_id=p_idmap.id,
p_idmap_rk_id=l_idmap_rk_column,
p_idmap_nk_id=l_idmap_nk_column,
p_etl_id=l_etl_column,
p_etl_value=p_etl_id,
p_source_table_id=i_source_table.id,
p_attribute_nk=l_column_nk_sql,
p_source_id=l_source_id,
p_max_rk=str(p_idmap.max_rk)
)
)
return l_etl | 0e24b4cbb5ea935c871cae3338094292c9ebfd02 | 3,657,390 |
def gs_tie(men, women, preftie):
"""
Gale-shapley algorithm, modified to exclude unacceptable matches
Inputs: men (list of men's names)
women (list of women's names)
pref (dictionary of preferences mapping names to list of sets of preferred names in sorted order)
Output: dictionary of stable matches
"""
rank = {}
for w in women:
rank[w] = {}
i = 1
for m in preftie[w]:
rank[w][tuple(m)] = i
i += 1
#print(rank)
prefpointer = {}
for m in men:
prefpointer[m] = 0
freemen = set(men)
S = {}
while(freemen) and prefpointer[m] < len(preftie[m]):
m = freemen.pop()
w = preftie[m][prefpointer[m]]
w = tuple(w)
#print(m + ' ' + str(w))
prefpointer[m] += 1
#print(m + ' ' + str(prefpointer[m]))
for i in range(len(w)):
if w[i] not in S:
S[w[i]] = m
#print(w[i])
else:
mprime = S[w[i]]
if m in rank[w[i]] and rank[w[i]][m] < rank[w[i]][mprime]:
S[w[i]] = m
freemen.add(mprime)
else:
freemen.add(m)
#print(S)
return S | b5dbe7047e3e6be7f0d288e49f8dae25a94db318 | 3,657,391 |
def is_iterable(value):
"""Return True if the object is an iterable type."""
return hasattr(value, '__iter__') | 55e1ecc9b264d39aaf5cfcbe89fdc01264191d95 | 3,657,392 |
def get_search_app_by_model(model):
"""
:returns: a single search app (by django model)
:param model: django model for the search app
:raises LookupError: if it can't find the search app
"""
for search_app in get_search_apps():
if search_app.queryset.model is model:
return search_app
raise LookupError(f'search app for {model} not found.') | 0670fe754df65b02d5dfc502ba3bd0a3a802370c | 3,657,393 |
def prct_overlap(adata, key_1, key_2, norm=False, ax_norm="row", sort_index=False):
"""
% or cell count corresponding to the overlap of different cell types
between 2 set of annotations/clusters.
Parameters
----------
adata: AnnData objet
key_1: observational key corresponding to one cell division/ one set of clusters
key_2: bservational key corresponding to one cell division/ one set of clusters
norm: normalise the ratio to the cell numbers given the total number of cells per
cluster in key_1
Return
------
Table containing the ratio of cells within a cluster
"""
data_1 = adata.obs[key_1].tolist()
data_2 = adata.obs[key_2].tolist()
count = {k:[] for k in list(set(data_1))}
#count = {k:[] for k in sorted(list(set(data_1)))}
i = 0
for index in data_1:
count[index].append(data_2[i])
i += 1
total_matrix = []
for key, value in count.items():
value = sorted(value)
curr_key_list = []
for element in sorted(list(set(data_2))):
curr_count = 0
for v in value:
if element == v:
curr_count += 1
curr_key_list.append(curr_count)
curr_sum = sum(curr_key_list)
#total_matrix.append([x/curr_sum for x in curr_key_list])
total_matrix.append(curr_key_list)
if norm and ax_norm == "row":
total_matrix = []
for key, value in count.items():
value = sorted(value)
curr_key_list = []
for element in sorted(list(set(data_2))):
curr_count = 0
for v in value:
if element == v:
curr_count += 1
curr_key_list.append(curr_count)
curr_sum = sum(curr_key_list)
total_matrix.append([x/curr_sum for x in curr_key_list])
elif norm:
print("""error in the argument ax_norm or it is col and
I haven't figure out how to make it for mow.
, here is the heatmap with no normalisation""")
if sort_index:
data_heatmap = pd.DataFrame(data=np.matrix(total_matrix),
index=list(set(data_1)),
columns=sorted(list(set(data_2)))).sort_index()
else:
data_heatmap = pd.DataFrame(data=np.matrix(total_matrix),
index=list(set(data_1)),
columns=sorted(list(set(data_2))))
return(data_heatmap) | 77a8382af77e8842a99211af58d6a6f85de6a50e | 3,657,394 |
def keep_category(df, colname, pct=0.05, n=5):
""" Keep a pct or number of every levels of a categorical variable
Parameters
----------
pct : float
Keep at least pct of the nb of observations having a specific category
n : int
Keep at least n of the variables having a specific category
Returns
--------
Returns an index of rows to keep
"""
tokeep = []
nmin = df.groupby(colname).apply(lambda x: x.sample(
max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index)
for index in nmin:
tokeep += index.tolist()
return pd.Index(tokeep) | 3db00aa6bdea797827a693c8e12bbf942a55ec35 | 3,657,395 |
def remove_scope_from_name(name, scope):
"""
Args:
name (str): full name of the tf variable with all the scopes
Returns:
(str): full name of the variable with the scope removed
"""
result = name.split(scope)[1]
result = result[1:] if result[0] == '/' else result
return result.split(":")[0] | aa70042a2f57185a0f5e401d182a02e5654eb2b0 | 3,657,396 |
async def get_timers_matching(ctx, name_str, channel_only=True, info=False):
"""
Interactively get a guild timer matching the given string.
Parameters
----------
name_str: str
Name or partial name of a group timer in the current guild or channel.
channel_only: bool
Whether to match against the groups in the current channel or those in the whole guild.
info: bool
Whether to display some summary info about the timer in the selector.
Returns: Timer
Raises
------
cmdClient.lib.UserCancelled:
Raised if the user manually cancels the selection.
cmdClient.lib.ResponseTimedOut:
Raised if the user fails to respond to the selector within `120` seconds.
"""
# Get the full timer list
if channel_only:
timers = ctx.client.interface.get_channel_timers(ctx.ch.id)
else:
timers = ctx.client.interface.get_guild_timers(ctx.guild.id)
# If there are no timers, quit early
if not timers:
return None
# Build a list of matching timers
name_str = name_str.strip()
timers = [timer for timer in timers if name_str.lower() in timer.name.lower()]
if len(timers) == 0:
return None
elif len(timers) == 1:
return timers[0]
else:
if info:
select_from = [timer.oneline_summary() for timer in timers]
else:
select_from = [timer.name for timer in timers]
try:
selected = await ctx.selector("Multiple matching groups found, please select one.", select_from)
except ResponseTimedOut:
raise ResponseTimedOut("Group selection timed out.") from None
except UserCancelled:
raise UserCancelled("User cancelled group selection.") from None
return timers[selected] | 48e94d2930f48b47b033ec024246065206a2bebb | 3,657,397 |
import random
def comprehension_array(size=1000000):
"""Fills an array that is handled by Python via list comprehension."""
return [random() * i for i in range(size)] | e3ccdc992e5b741cf6f164c93d36f2e45d59a590 | 3,657,398 |
def alignment(alpha, p, treatment):
"""Alignment confounding function.
Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis
for causal effects." Political Analysis 22.2 (2014): 169-182.
https://www.mattblackwell.org/files/papers/causalsens.pdf
Args:
alpha (np.array): a confounding values vector
p (np.array): a propensity score vector between 0 and 1
treatment (np.array): a treatment vector (1 if treated, otherwise 0)
"""
assert p.shape[0] == treatment.shape[0]
adj = alpha * (1 - p) * treatment + alpha * p * (1 - treatment)
return adj | 8097dbcd62ba934b31b1f8a9e72fd906109b5181 | 3,657,399 |
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores | 0e7217ec3e36a361a6747729543cd694912a2874 | 3,657,400 |
import json
def single_request(gh,kname='CVE exploit',page=1,per_page=50):
"""
解析单页仓库数据,获取CVE和exp标记
:return cve_list:list, cve id in each page by searching github.com
"""
cve=dict()
url="https://api.github.com/search/repositories?q={key_name}&sort=updated&order=desc&page={page}&per_page={per_page}".format(key_name=kname,page=page,per_page=per_page)
r=gh.call_to_the_api(url)
if r:
content=r.text
js=json.loads(content)
items=js['items']
total_count=js['total_count']
cve_add=single_parser(gh,items)
if cve_add:
cve={**cve,**cve_add}
return total_count,cve
else:
return False,False | 5fdd3fe28f0e973fb9d854e20b8ce77ed109d3c6 | 3,657,401 |
def stuff_context(sites, rup, dists):
"""
Function to fill a rupture context with the contents of all of the
other contexts.
Args:
sites (SiteCollection): A SiteCollection object.
rup (RuptureContext): A RuptureContext object.
dists (DistanceContext): A DistanceContext object.
Returns:
RuptureContext: A new RuptureContext whose attributes are all of
the elements of the three inputs.
"""
ctx = RuptureContext()
for name in [name for name in vars(sites) if not name.startswith("__")]:
setattr(ctx, name, getattr(sites, name))
for name in [name for name in vars(rup) if not name.startswith("__")]:
setattr(ctx, name, getattr(rup, name))
for name in [name for name in vars(dists) if not name.startswith("__")]:
setattr(ctx, name, getattr(dists, name))
return ctx | 9c197a41414a875942a6df22c03899c3e936967f | 3,657,403 |
def number_to_float(value):
"""The INDI spec allows a number of different number formats, given any, this returns a float
:param value: A number string of a float, integer or sexagesimal
:type value: String
:return: The number as a float
:rtype: Float
"""
# negative is True, if the value is negative
negative = value.startswith("-")
if negative:
value = value.lstrip("-")
# Is the number provided in sexagesimal form?
if value == "":
parts = [0, 0, 0]
elif " " in value:
parts = value.split(" ")
elif ":" in value:
parts = value.split(":")
elif ";" in value:
parts = value.split(";")
else:
# not sexagesimal
parts = [value, "0", "0"]
# Any missing parts should have zero
if len(parts) == 2:
# assume seconds are missing, set to zero
parts.append("0")
assert len(parts) == 3
number_strings = list(x if x else "0" for x in parts)
# convert strings to integers or floats
number_list = []
for part in number_strings:
try:
num = int(part)
except ValueError:
num = float(part)
number_list.append(num)
floatvalue = number_list[0] + (number_list[1]/60) + (number_list[2]/360)
if negative:
floatvalue = -1 * floatvalue
return floatvalue | 8b754a32848b3e697e0f82dbee4a1c35c560f1be | 3,657,404 |
def spg_line_search_step_length(current_step_length, delta, f_old, f_new,
sigma_one=0.1, sigma_two=0.9):
"""Return next step length for line search."""
step_length_tmp = (-0.5 * current_step_length ** 2 * delta /
(f_new - f_old - current_step_length * delta))
next_step_length = 0
if sigma_one <= step_length_tmp <= sigma_two * current_step_length:
next_step_length = step_length_tmp
else:
next_step_length = 0.5 * current_step_length
return next_step_length | 844cccdfe1ec3f9c2c287384284ceb2ac3530e8e | 3,657,405 |
def calc_pv_invest(area, kw_to_area=0.125, method='EuPD'):
"""
Calculate PV investment cost in Euro
Parameters
----------
area : float
Photovoltaic area
kw_to_area : float , optional
Ratio of peak power to area (default: 0.125)
For instance, 0.125 means 0.125 kWp / m2 area
(http://www.solaranlagen-portal.com/photovoltaik/leistung)
method : str, optional
Method to calculate cost (default: 'EuPD')
Options:
- 'sap':
Based on: Solaranlagenportal
http://www.solaranlagen-portal.com/photovoltaik/kosten
- 'EuPD':
Based on: EuPD Research, Photovoltaik-Preismonitor Deutschland: German PV
ModulePriceMonitor.
Returns
-------
pv_invest : float
Investcost into PV system in Euro
"""
assert method in ['sap', 'EuPD'], 'Unknown method'
assert area > 0, 'Area has to be larger than zero.'
assert kw_to_area > 0, 'kWp / area ratio has to be larger than zero.'
if method == 'sap':
kw_peak = area * kw_to_area # kW peak load
# kw_peak * (spec_price + spec_install_cost) + inverter cost
pv_invest = kw_peak * (1100 + 120) + 2000
if method == 'EuPD':
kw_peak = area * kw_to_area # kW peak load
# kw_peak * (spec_cost) + inverter cost
pv_invest = kw_peak * 1400 + 2000
return pv_invest | 2de9ee05580bc9d41522272a06cd97aaf3f5bc55 | 3,657,407 |
def samps2ms(samples: float, sr: int) -> float:
"""samples to milliseconds given a sampling rate"""
return (samples / sr) * 1000.0 | 49e07ee02984bf0e9a0a54715ef6b6e5a3c87798 | 3,657,409 |
def nice_year(dt, lang=None, bc=False):
"""Format a datetime to a pronounceable year.
For example, generate 'nineteen-hundred and eighty-four' for year 1984
Args:
dt (datetime): date to format (assumes already in local timezone)
lang (string): the language to use, use Mycroft default language if
not provided
bc (bool) pust B.C. after the year (python does not support dates
B.C. in datetime)
Returns:
(str): The formatted year string
"""
return lingua_franca.format.nice_year(dt, lang, bc) | 641195195023ecca030f6cd8d12ff9a3fc9c989c | 3,657,410 |
def get_results(job_id):
"""
Get the result of the job based on its id
"""
try:
job = Job.fetch(job_id, connection=conn)
if job.is_finished:
return jsonify({
"status": "finished",
"data": job.result
}), 200
elif job.is_failed:
return jsonify({
"status": "failed"
}), 200
else:
return jsonify({
"status": "in-progress"
}), 200
except NoSuchJobError:
return jsonify({
"msg": "job id does not exist"
}), 404 | ada9042cd4d7961415ec274a68631f6e9af81fad | 3,657,411 |
def get_clean_dict(obj: HikaruBase) -> dict:
"""
Turns an instance of a HikaruBase into a dict without values of None
This function returns a Python dict object that represents the hierarchy
of objects starting at ``obj`` and recusing into any nested objects.
The returned dict **does not** include any key/value pairs where the value
of the key is None or empty.
If you wish to instead have a dict with all key/value pairs even when
there is no useful value then you should use the dataclasses module's
``asdict()`` function on obj.
:param obj: some api_version_group of subclass of HikaruBase
:return: a dict representation of the obj instance, but if any value
in the dict was originally None, that key:value is removed from the
returned dict, hence it is a minimal representation
:raises TypeError: if the supplied obj is not a HikaruBase (dataclass),
or if obj is not an instance of a HikaruBase subclass
"""
if not isinstance(obj, HikaruBase):
raise TypeError("obj must be a kind of HikaruBase")
initial_dict = asdict(obj)
clean_dict = _clean_dict(initial_dict)
return clean_dict | 3daca47b6d8c42fca8856221f39b635791eb0fce | 3,657,412 |
def generate_html_frieze(type, value):
"""
Gets the data to be able to generate the frieze.
Calls the function to actually generate HTML.
Input:
- Type (session or dataset) of the second input
- A SQLAlchemy DB session or a dataset (list of mappings)
Output:
- The HTML to be displayed
"""
if type == "session":
session = value
mappings = list(get_all_mappings(session))
elif type == "dataset":
mappings = value
holes_raw = calc_all_holes("dataset", mappings)
holes = []
for hole in holes_raw:
holes.append(
{
"devices_id": -1000,
"id": -1000,
"iova": None,
"phys_addr": hole[0],
"size": hole[1],
}
)
for hole in holes:
hole["devices_id"] = -1
try:
mappings = add_device_info(mappings, session)
except:
session = create_session()
mappings = add_device_info(mappings, session)
mappings_as_dict = []
for m in mappings:
mappings_as_dict.append(m.__dict__)
memory_state = sorted(
mappings_as_dict + holes, key=lambda mapping: mapping["phys_addr"]
)
memory_state = unify_common_space(memory_state)
html_frieze = create_html_from_memory_state(memory_state, session)
return html_frieze | ddf914d9d710e60af48a6dc687a9e3961ab0cf94 | 3,657,413 |
from typing import Optional
import re
def instantiate_model(model_to_train: str,
dataset_directory: str,
performance_directory: str,
gpu: Optional[bool] = None):
"""
A function to create the instance of the imported Class,
Classifier.
Args:
model_to_train (str): name of the pretrained model to train
dataset directory (str): Directory containing the data
performance directory (str): The directory where the generated text, checkpoints
model_stats will be saved.
gpu (bool): Boolean indicating availability of a GPU
Returns:
None.
"""
file = get_latest_exp(performance_directory)
if file is not None:
filename = re.findall('\\\\([^\\\\]+)\.txt', file)
exp_no = int((re.findall('_([0-9]+)', filename[0]))[0])
exp_no += 1
else:
exp_no = 1
Model = Classifier(exp_no, model_to_train, dataset_directory, performance_directory, gpu=gpu)
return Model | 8053053b5e77f1c74404826e7335b05bece8b99f | 3,657,415 |
def generate_hmac_key():
"""
Generates a key for use in the :func:`~securitylib.advanced_crypto.hmac` function.
:returns: :class:`str` -- The generated key, in byte string.
"""
return generate_secret_key(HMAC_KEY_MINIMUM_LENGTH) | 877cf9fbe56b6715f1744839ce83ac1abf9d7da8 | 3,657,416 |
def uscensus(location, **kwargs):
"""US Census Provider
Params
------
:param location: Your search location you want geocoded.
:param benchmark: (default=4) Use the following:
> Public_AR_Current or 4
> Public_AR_ACSYYYY or 8
> Public_AR_Census2010 or 9
:param vintage: (default=4) Use the following:
> Current_Current or 4
> Census2010_Current or 410
> ACS2013_Current or 413
> ACS2014_Current or 414
> ACS2015_Current or 415
> Current_ACS2015 or 8
> Census2010_ACS2015 or 810
> ACS2013_ACS2015 or 813
> ACS2014_ACS2015 or 814
> ACS2015_ACS2015 or 815
> Census2010_Census2010 or 910
> Census2000_Census2010 or 900
:param method: (default=geocode) Use the following:
> geocode
> reverse
API Reference
-------------
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
"""
return get(location, provider='uscensus', **kwargs) | bd73acb87f27e3f14d0b1e22ebd06b91fcec9d85 | 3,657,418 |
def reco_source_position_sky(cog_x, cog_y, disp_dx, disp_dy, focal_length, pointing_alt, pointing_az):
"""
Compute the reconstructed source position in the sky
Parameters
----------
cog_x: `astropy.units.Quantity`
cog_y: `astropy.units.Quantity`
disp: DispContainer
focal_length: `astropy.units.Quantity`
pointing_alt: `astropy.units.Quantity`
pointing_az: `astropy.units.Quantity`
Returns
-------
"""
src_x, src_y = disp_to_pos(disp_dx, disp_dy, cog_x, cog_y)
return camera_to_sky(src_x, src_y, focal_length, pointing_alt, pointing_az) | 14b7fee325bc8a571a13d257f046cd0e7bf838db | 3,657,420 |
def segment_annotations(table, num, length, step=None):
""" Generate a segmented annotation table by stepping across the audio files, using a fixed
step size (step) and fixed selection window size (length).
Args:
table: pandas DataFrame
Annotation table.
num: int
Number of segments
length: float
Selection length in seconds.
step: float
Selection step size in seconds. If None, the step size is set
equal to the selection length.
Returns:
df: pandas DataFrame
Annotations table
"""
if step is None:
step = length
segs = []
for n in range(num):
# select annotations that overlap with segment
t1 = n * step
t2 = t1 + length
a = table[(table.start < t2) & (table.end > t1)].copy()
if len(a) > 0:
# shift and crop annotations
a['start'] = a['start'].apply(lambda x: max(0, x - t1))
a['end'] = a['end'].apply(lambda x: min(length, x - t1))
a['sel_id'] = n #map to segment
segs.append(a)
df = pd.concat(segs)
df.set_index(keys=['sel_id'], inplace=True, append=True)
df = df.swaplevel()
df = df.sort_index()
return df | 4b1bb8298113b43716fcd5f7d2a27b244f63829c | 3,657,421 |
def get_vdw_style(vdw_styles, cut_styles, cutoffs):
"""Get the VDW_Style section of the input file
Parameters
----------
vdw_styles : list
list of vdw_style for each box, one entry per box
cut_styles : list
list of cutoff_style for each box, one entry per box. For a
box with vdw_style == 'none', the cutoff style is None
cutoffs : list
list with cutoffs for each box, one entry per box For a
box with vdw_style == 'none', the cutoff is None
"""
assert len(vdw_styles) == len(cut_styles)
assert len(vdw_styles) == len(cutoffs)
valid_vdw_styles = ["lj", "none"]
valid_cut_styles = {vstyle: [] for vstyle in valid_vdw_styles}
valid_cut_styles["lj"].append("cut")
valid_cut_styles["lj"].append("cut_tail")
valid_cut_styles["lj"].append("cut_switch")
valid_cut_styles["lj"].append("cut_shift")
valid_cut_styles["none"].append(None)
for vdw_style in vdw_styles:
if vdw_style not in valid_vdw_styles:
raise ValueError(
"Unsupported vdw_style: {}. Supported options "
"include {}".format(vdw_style, vdw_styles)
)
for cut_style, vdw_style in zip(cut_styles, vdw_styles):
if cut_style not in valid_cut_styles[vdw_style]:
raise ValueError(
"Unsupported cutoff style: {}. Supported "
"options for the selected vdw_style ({}) include "
"{}".format(cut_style, vdw_style, valid_cut_styles[vdw_style])
)
for cut_style, cutoff in zip(cut_styles, cutoffs):
if cut_style == "cut_switch":
if not isinstance(cutoff, np.ndarray) or len(cutoff) != 2:
raise ValueError(
'Style "cut_switch" requires an inner '
"and outer cutoff. Use the "
"cutoffs=[inner_cut,outer_cut] "
"kwargs option."
)
inp_data = """
# VDW_Style"""
for vdw_style, cut_style, cutoff in zip(vdw_styles, cut_styles, cutoffs):
if vdw_style == "none":
inp_data += """
{vdw_style}""".format(
vdw_style=vdw_style
)
else:
if cut_style == "cut_switch":
inner_cutoff = cutoff[0]
outer_cutoff = cutoff[1]
inp_data += """
{vdw_style} {cut_style} {inner_cutoff} {outer_cutoff}""".format(
vdw_style=vdw_style,
cut_style=cut_style,
inner_cutoff=inner_cutoff,
outer_cutoff=outer_cutoff,
)
else:
inp_data += """
{vdw_style} {cut_style} {cutoff}""".format(
vdw_style=vdw_style, cut_style=cut_style, cutoff=cutoff
)
inp_data += """
!------------------------------------------------------------------------------
"""
return inp_data | 5cd0825d73e11c4fcb8ecce0526493414842697c | 3,657,422 |
def freduce(x, axis=None):
"""
Reduces a spectrum to positive frequencies only
Works on the last dimension (contiguous in c-stored array)
:param x: numpy.ndarray
:param axis: axis along which to perform reduction (last axis by default)
:return: numpy.ndarray
"""
if axis is None:
axis = x.ndim - 1
siz = list(x.shape)
siz[axis] = int(np.floor(siz[axis] / 2 + 1))
return np.take(x, np.arange(0, siz[axis]), axis=axis) | 8d13e66a18ef950422af49a68012605cf0d03947 | 3,657,424 |
import json
def sort_shipping_methods(request):
"""Sorts shipping methods after drag 'n drop.
"""
shipping_methods = request.POST.get("objs", "").split('&')
assert (isinstance(shipping_methods, list))
if len(shipping_methods) > 0:
priority = 10
for sm_str in shipping_methods:
sm_id = sm_str.split('=')[1]
sm_obj = ShippingMethod.objects.get(pk=sm_id)
sm_obj.priority = priority
sm_obj.save()
priority = priority + 10
result = json.dumps({
"message": _(u"The shipping methods have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json') | 307ecef020ac296982a7006ce1392cb807461546 | 3,657,426 |
def appendRecordData(record_df, record):
"""
Args:
record_df (pd.DataFrame):
record (vcf.model._Record):
Returns:
(pd.DataFrame): record_df with an additional row of record (SNP) data.
"""
# Alternate allele bases
if len(record.ALT) == 0:
alt0, alt1 = np.nan, np.nan
elif len(record.ALT) == 1:
alt0, alt1 = record.ALT[0], np.nan
varIdentifier = pd.Series(record.ID, name="varIdentifier")
df = pd.DataFrame(
data = {"refBase": record.REF, "altAllele0": alt0,
"altAllele1": alt1},
index = varIdentifier)
record_df = record_df.append(df, ignore_index=False)
return record_df | 0904b317e1925743ed9449e1fcb53aaafa2ffc81 | 3,657,427 |
def get_removed_channels_from_file(fn):
"""
Load a list of removed channels from a file.
Raises
------
* NotImplementedError if the file format isn't supported.
Parameters
----------
fn : str
Filename
Returns
-------
to_remove : list of str
List of channels to remove.
"""
assert isinstance(fn, str)
if fn.endswith('.mat'):
# try:
data = loadmat(fn)
# except: for old .mat files in hdf5 format...
assert('CHANNAMES' in data), f"{fn} must contain CHANNAMES!"
assert('CHANACTIVE' in data), f"{fn} must contain CHANACTIVE!"
channel_active = data['CHANACTIVE'].flatten()
channel_names = np.array(
[str(i[0]) for i in data['CHANNAMES'].flatten()],
)
idx = np.argwhere(channel_active == 0).flatten()
return channel_names[idx].tolist()
else:
raise NotImplementedError(f"Cannot load file: {fn}") | ac3cbeb83c7f1305adf343ce26be3f70f8ae48e8 | 3,657,428 |
def invertHomogeneous(M, range_space_homogeneous=False, A_property=None):
""" Return the inverse transformation of a homogeneous matrix.
A homogenous matrix :math:`M` represents the transformation :math:`y = A x + b`
in homogeneous coordinates. More precisely,
..math:
M \tilde{x} = \left[ \begin{matrix}
A & b \\
\end{matrix} \right]
\left[ \begin{matrix}
x \\
1
\end{matrix} \right]
Its inverse is the homogeneous matrix that represents the transformation
:math:`x = A^{-1} ( y - b )`.
Parameters
----------
M : numpy array of float, shape (num_dims, num_dims + 1) or (num_dims + 1, num_dims + 1)
Matrix representing an affine transformation in homogeneous coordinates.
if ``M.shape == (num_dims + 1, num_dims + 1)``, its last row is :math:`[0 1]`
so that its output is also in homogeneous coordinates.
range_space_homogeneous : bool, optional
If True, the output has an extra row :math:`[ 0 1 ]` appended to the bottom
so that its range space is also expressed in homogeneous coordinates.
A_property : {'diag', 'ortho'}, optional
Special property of the submatrix `A` that could make inversion easier.
If no argument is given, this function just calls `m.np.linalg.pinv`.
Returns
-------
M_inverse : numpy array of float, shape (num_dims, num_dims + 1) or (num_dims + 1, num_dims + 1)
Inverse transformation corresponding to input `M`.
"""
if A_property is None:
invert = m.np.pinv
elif A_property == 'diag':
def invert(x):
return m.np.diag(1 / m.np.diag(A))
elif A_property == 'ortho':
invert = m.np.transpose
else:
err_str = f"Can't parse keyword argument 'A_property={A_property}'"
raise ValueError(err_str)
A, b = fromHomogeneous(M)
A_inverse = invert(A)
b_inverse = -A_inverse @ b
M_inverse = homogeneousMatrix(
A_inverse, b_inverse,
range_space_homogeneous=range_space_homogeneous
)
return M_inverse | ea9039c935c82686291145652f762eb79404e417 | 3,657,429 |
import requests
def show_department(department_id):
"""
Returns rendered template to show department with its employees.
:param department_id: department id
:return: rendered template to show department with its employees
"""
url = f'{HOST}api/department/{department_id}'
department = requests.get(url).json()
return render_template('department.html', department=department) | 170318ea40a4f7355fab77f2aeaaad682b9fab2f | 3,657,431 |
def archive_scan():
"""
Returns converted to a dictionary of functions to apply to parameters of archive_scan.py
"""
# Dictionary of default values setter, type converters and other applied functions
d_applied_functions = {
'favor': [bool_converter, favor_default],
'cnn': [bool_converter],
'gpd': [bool_converter],
'model-name': [apply_default_model_name],
'weights': [apply_default_weights],
'features-number': [int_converter],
'waveform-duration': [float_converter],
'start': [utc_datetime_converter, start_date_default],
'end': [utc_datetime_converter, end_date_default],
'database': [database_filler],
'threshold': [threshold_converter],
'batch-size': [int_converter],
'frequency': [float_converter],
'trace-size': [float_converter, trace_size_converter],
'shift': [int_converter],
'generate-s-files': [string_trimmer],
'detections-for-event': [int_converter],
'generate-waveforms': [string_trimmer],
'register-events': [string_trimmer],
'no-filter': [bool_converter],
'no-detrend': [bool_converter],
'trace-normalization': [bool_converter],
'wavetool-waveforms': [bool_converter],
'detection-stations': [bool_converter],
'plot-positives': [bool_converter],
'silence-wavetool': [bool_converter],
'plot-positives-original': [bool_converter],
'print-scores': [bool_converter],
'print-precision': [int_converter],
'combine-events-range': [float_converter],
'time': [bool_converter],
'cpu': [bool_converter],
'print-files': [bool_converter],
'channel-order': [channel_order_converter],
'print-params': [bool_converter],
}
return d_applied_functions | 71aa3d2c17e880a152529de09b0614dfd619e7da | 3,657,432 |
def esOperador(o):
""""retorna true si 'o' es un operador"""
return o == "+" or o == "-" or o == "/" or o == "*" | 7e1088b641dee7cad2594159c4a34cf979362458 | 3,657,433 |
def valid_identity(identity):
"""Determines whether or not the provided identity is a valid value."""
valid = (identity == "homer") or (identity == "sherlock")
return valid | 9865d19802b596d1d5fdce6ff8d236678da29ee6 | 3,657,434 |
def is_align_flow(*args):
"""
is_align_flow(ea) -> bool
"""
return _ida_nalt.is_align_flow(*args) | 40aa1fb7d86083bc3ace94c6913eb9b4b5ab200e | 3,657,435 |
import time
def avro_rdd(ctx, sqlContext, hdir, date=None, verbose=None):
"""
Parse avro-snappy files on HDFS
:returns: a Spark RDD object
"""
if date == None:
date = time.strftime("year=%Y/month=%-m/day=%-d", time.gmtime(time.time()-60*60*24))
path = '%s/%s' % (hdir, date)
elif len(str(date)) == 8: # YYYYMMDD
ddd = dt.strptime(str(date), "%Y%m%d")
date = time.strftime("year=%Y/month=%-m/day=%-d", ddd.utctimetuple())
path = '%s/%s' % (hdir, date)
else:
path = hdir
if date:
path = '%s/%s' % (hdir, date)
print("### hdir", path, type(path))
if isinstance(path, list):
afiles = path
else:
# get avro files from HDFS
afiles = avro_files(path, verbose=verbose)
print("### avro_files", afiles)
# define newAPIHadoopFile parameters, java classes
aformat="org.apache.avro.mapreduce.AvroKeyInputFormat"
akey="org.apache.avro.mapred.AvroKey"
awrite="org.apache.hadoop.io.NullWritable"
aconv="org.apache.spark.examples.pythonconverters.AvroWrapperToJavaConverter"
rdd = []
# load data from HDFS
if len(afiles) == 0:
rdd = ctx.emptyRDD()
else:
rdd = ctx.union([ctx.newAPIHadoopFile(f, aformat, akey, awrite, aconv) for f in afiles])
# the records are stored as [(dict, None), (dict, None)], therefore we take first element
# and assign them to new rdd
avro_rdd = rdd.map(lambda x: x[0])
records = avro_rdd.take(1) # take function will return list of records
if verbose:
print("### avro records", records, type(records))
return avro_rdd | caa923e4b6186e106a59764cbb61f908858acd70 | 3,657,436 |
import random
def generate_gesture_trace(position):
"""
生成手势验证码轨迹
:param position:
:return:
"""
x = []
y = []
for i in position:
x.append(int(i.split(',')[0]))
y.append(int(i.split(',')[1]))
trace_x = []
trace_y = []
for _ in range(0, 2):
tepx = [x[_], x[_ + 1], x[_ + 2]]
tepy = [y[_], y[_ + 1], y[_ + 2]]
[a, b, c] = get_func(tepx, tepy)
if _ == 0:
for i in range(x[0], x[1]):
trace_x.append(i)
trace_y.append(a * i * i + b * i + c)
for i in range(x[1], x[2]):
trace_x.append(i)
if random.randint(1, 5) == 1:
trace_y.append((((float)(y[2] - y[1])) / (x[2] - x[1])) * (i - x[1]) + y[1] + random.randint(-1, 1))
else:
trace_y.append((((float)(y[2] - y[1])) / (x[2] - x[1])) * (i - x[1]) + y[1])
else:
for i in range(x[2], x[3]):
trace_x.append(i)
trace_y.append(a * i * i + b * i + c)
trace_x = [int(i) for i in trace_x]
trace_y = [int(i) for i in trace_y]
last_trace_x = []
last_trace_y = []
plot_line(trace_x, trace_y, [0, 280], [0, 158])
xx = 0
while xx < len(trace_x) - 1:
last_trace_x.append(trace_x[xx])
last_trace_y.append(trace_y[xx])
xx += random.randint(1, 4)
last_trace_x.append(trace_x[-1])
last_trace_y.append(trace_y[-1])
timestamp_list = []
timestamp = random.randint(180, 220)
for i in range(len(last_trace_x)):
t = random.randint(5, 10)
timestamp += t
timestamp_list.append(timestamp)
i += 1
trace = [{
'p': ','.join([str(last_trace_x[0]), str(last_trace_y[0])]),
't': 1
}]
for i in range(len(last_trace_x)):
trace.append({
'p': ','.join([str(last_trace_x[i]), str(last_trace_y[i])]),
't': timestamp_list[i]
})
trace.append({
'p': ','.join([str(last_trace_x[-1]), str(last_trace_y[-1])]),
't': timestamp_list[-1] + random.randint(50, 100)
})
return x[3] - x[0], trace | 3281cf9e99175190e2855ac98593f67473703c77 | 3,657,437 |
def mad_daub_noise_est(x, c=0.6744):
""" Estimate the statistical dispersion of the noise with Median Absolute
Deviation on the first order detail coefficients of the 1d-Daubechies
wavelets transform.
"""
try:
_, cD = pywt.wavedec(x, pywt.Wavelet('db3'), level=1)
except ValueError:
cD = pywt.wavedec(x, pywt.Wavelet('db3'), level=0)
return mad(cD, c=c) | 3811d490e344cd4029e5b7f018823ad02c27e3dd | 3,657,439 |
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
From Django's "django/template/defaultfilters.py".
Copied from: https://github.com/django/django/blob/a6b3938afc0204093b5356ade2be30b461a698c5/django/utils/text.py#L394
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower()).strip()
return re.sub(r'[-\s]+', '-', value) | 3fc85ffec7faa3b4df2d1556dfd7b1d7c3e9920e | 3,657,440 |
import json
def get_categories() -> dict:
""" :return: dictionary with a hirachy of all categories """
with open("../src/categories.json", "r", encoding="utf-8") as f:
return json.load(f) | 90a442840550f3251137b2f9ff8fb5581d8d49e5 | 3,657,441 |
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
### LOAD VALIDATION SET | 97acc81878076c030287840a0bbacccbde0e50a8 | 3,657,443 |
def createSynthModel():
"""Return the modeling mesh, the porosity distribution and the
parametric mesh for inversion.
"""
# Create the synthetic model
world = mt.createCircle(boundaryMarker=-1, segments=64)
tri = mt.createPolygon([[-0.8, -0], [-0.5, -0.7], [0.7, 0.5]],
isClosed=True, area=0.0015)
c1 = mt.createCircle(radius=0.2, pos=[-0.2, 0.5], segments=32,
area=0.0025, marker=3)
c2 = mt.createCircle(radius=0.2, pos=[0.32, -0.3], segments=32,
area=0.0025, marker=3)
poly = mt.mergePLC([world, tri, c1, c2])
poly.addRegionMarker([0.0, 0, 0], 1, area=0.0015)
poly.addRegionMarker([-0.9, 0, 0], 2, area=0.0015)
c = mt.createCircle(radius=0.99, segments=16, start=np.pi, end=np.pi*3)
[poly.createNode(p.pos(), -99) for p in c.nodes()]
mesh = pg.meshtools.createMesh(poly, q=34.4, smooth=[1, 10])
mesh.scale(1.0/5.0)
mesh.rotate([0., 0., 3.1415/3])
mesh.rotate([0., 0., 3.1415])
petro = pg.solver.parseArgToArray([[1, 0.9], [2, 0.6], [3, 0.3]],
mesh.cellCount(), mesh)
# Create the parametric mesh that only reflect the domain geometry
world = mt.createCircle(boundaryMarker=-1, segments=32, area=0.0051)
paraMesh = pg.meshtools.createMesh(world, q=34.0, smooth=[1, 10])
paraMesh.scale(1.0/5.0)
return mesh, paraMesh, petro | aa63ce6c8b633530efb17add4d902da30c62689c | 3,657,445 |
def edits_dir():
"""
Return the directory for the editable files (used by the
website).
"""
return _mkifnotexists("") | eb882c04e3269496a610103908453a73e4a7ae5f | 3,657,446 |
def convolve_hrf(X, onsets, durations, n_vol, tr, ops=100):
"""
Convolve each X's column iteratively with HRF and align with the timeline of BOLD signal
parameters:
----------
X[array]: [n_event, n_sample]
onsets[array_like]: in sec. size = n_event
durations[array_like]: in sec. size = n_event
n_vol[int]: the number of volumes of BOLD signal
tr[float]: repeat time in second
ops[int]: oversampling number per second
Returns:
---------
X_hrfed[array]: the result after convolution and alignment
"""
assert np.ndim(X) == 2, 'X must be a 2D array'
assert X.shape[0] == len(onsets) and X.shape[0] == len(durations), \
'The length of onsets and durations should be matched with the number of events.'
assert ops in (10, 100, 1000), 'Oversampling rate must be one of the (10, 100, 1000)!'
# unify the precision
decimals = int(np.log10(ops))
onsets = np.round(np.asarray(onsets), decimals=decimals)
durations = np.round(np.asarray(durations), decimals=decimals)
tr = np.round(tr, decimals=decimals)
n_clipped = 0 # the number of clipped time points earlier than the start point of response
onset_min = onsets.min()
if onset_min > 0:
# The earliest event's onset is later than the start point of response.
# We supplement it with zero-value event to align with the response.
X = np.insert(X, 0, np.zeros(X.shape[1]), 0)
onsets = np.insert(onsets, 0, 0, 0)
durations = np.insert(durations, 0, onset_min, 0)
onset_min = 0
elif onset_min < 0:
print("The earliest event's onset is earlier than the start point of response.\n"
"We clip the earlier time points after hrf_convolution to align with the response.")
n_clipped = int(-onset_min * ops)
# do convolution in batches for trade-off between speed and memory
batch_size = int(100000 / ops)
bat_indices = np.arange(0, X.shape[-1], batch_size)
bat_indices = np.r_[bat_indices, X.shape[-1]]
vol_t = (np.arange(n_vol) * tr * ops).astype(int) # compute volume acquisition timing
n_time_point = int(((onsets + durations).max()-onset_min) * ops)
X_hrfed = np.zeros([n_vol, 0])
for idx, bat_idx in enumerate(bat_indices[:-1]):
X_bat = X[:, bat_idx:bat_indices[idx+1]]
# generate X raw time course
X_tc = np.zeros((n_time_point, X_bat.shape[-1]), dtype=np.float32)
for i, onset in enumerate(onsets):
onset_start = int(onset * ops)
onset_end = int(onset_start + durations[i] * ops)
X_tc[onset_start:onset_end, :] = X_bat[i, :]
# generate hrf kernel
hrf = spm_hrf(tr, oversampling=tr*ops)
hrf = hrf[:, np.newaxis]
# convolve X raw time course with hrf kernal
X_tc_hrfed = convolve(X_tc, hrf, method='fft')
X_tc_hrfed = X_tc_hrfed[n_clipped:, :]
# downsample to volume timing
X_hrfed = np.c_[X_hrfed, X_tc_hrfed[vol_t, :]]
print('hrf convolution: sample {0} to {1} finished'.format(bat_idx+1, bat_indices[idx+1]))
return X_hrfed | d035b47ffafe0ac3d7e1446d4d36dc2f707363bd | 3,657,448 |
def flatten(x, params):
"""
Plain ol' 2D flatten
:param x: input tensor
:param params: {dict} hyperparams (sub-selection)
:return: output tensor
"""
return layers.Flatten()(x) | 6db829641681ab48f75b23894f9a4a3250250cec | 3,657,449 |
def xml_unescape(text):
""" Do the inverse of `xml_escape`.
Parameters
----------
text: str
The text to be escaped.
Returns
-------
escaped_text: str
"""
return unescape(text, xml_unescape_table) | 2e53d8bc617ad70fd22bb5dd82cd34db366b80a4 | 3,657,450 |
def tseb_pt(T_air, T_rad, u, p, z, Rs_1, Rs24, vza, zs,
aleafv, aleafn, aleafl, adeadv, adeadn, adeadl,
albedo, ndvi, lai, clump, hc, time, t_rise, t_end,
leaf_width, a_PT_in=1.32, iterations=35):
"""Priestley-Taylor TSEB
Calculates the Priestley Taylor TSEB fluxes using a single observation of
composite radiometric temperature and using resistances in series.
Parameters
----------
T_air : ee.Image
Air temperature (Kelvin).
T_rad : ee.Image
Radiometric composite temperature (Kelvin).
u : ee.Image
Wind speed above the canopy (m s-1).
p : ee.Image
Atmospheric pressure (kPa)
z : ee.Image
Elevation (m)
Rs_1 : ee.Image
Overpass insolation (w m-2)
Rs24 : ee.Image
Daily insolation (w m-2)
vza : float
View Zenith Angle (radians).
zs : ee.Image
Solar Zenith Angle (radians).
aleafv : ee.Image
aleafn : ee.Image
aleafl : ee.Image
adeadv : ee.Image
adeadn : ee.Image
adeadl : ee.Image
albedo : ee.Image
ndvi : ee.Image
Normalized Difference Vegetation Index
lai : ee.Image
Effective Leaf Area Index (m2 m-2).
clump : ee.Image
hc : ee.Image
Canopy height (m).
time
t_rise : ee.Image
t_end : ee.Image
leaf_width : ee.Image
Average/effective leaf width (m)
a_PT_in : float, optional
Priestley Taylor coefficient for canopy potential transpiration
(the default is 1.32).
iterations: int, optional
Number of iterations of main calculation
(the default is 35)
Returns
-------
ET : ee.Image
Evapotranspiration (mm).
References
----------
.. [Norman1995] J.M. Norman, W.P. Kustas, & K.S. Humes (1995),
Source approach for estimating soil and vegetation energy fluxes in
observations of directional radiometric surface temperature,
Agricultural and Forest Meteorology,
Volume 77, Issues 3-4, Pages 263-293,
http://dx.doi.org/10.1016/0168-1923(95)02265-Y.
.. [Kustas1999] W.P. Kustas, & J.M. Norman (1999), Evaluation of soil
and vegetation heat flux predictions using a simple two-source
model with radiometric temperatures for partial canopy cover,
Agricultural and Forest Meteorology, Volume 94, Issue 1, Pages 13-29,
http://dx.doi.org/10.1016/S0168-1923(99)00005-2.
"""
# print('\nINPUTS')
# print('T_rad: {:20.14f}'.format(float(utils.image_value(T_rad).values()[0])))
# print('T_air: {:20.14f}'.format(float(utils.image_value(T_air).values()[0])))
# print('u: {:20.14f}'.format(float(utils.image_value(u).values()[0])))
# print('Rs_1: {:20.14f}'.format(float(utils.image_value(Rs_1).values()[0])))
# print('Rs24: {:20.14f}'.format(float(utils.image_value(Rs24).values()[0])))
# # print('vza: {:20.14f}'.format(float(utils.image_value(vza).values()[0])))
# print('zs: {:20.14f}'.format(float(utils.image_value(zs).values()[0])))
# print('albedo: {:20.14f}'.format(float(utils.image_value(albedo).values()[0])))
# print('ndvi: {:20.14f}'.format(float(utils.image_value(ndvi).values()[0])))
# print('lai: {:20.14f}'.format(float(utils.image_value(lai).values()[0])))
# print('clump: {:20.14f}'.format(float(utils.image_value(clump).values()[0])))
# print('hc: {:20.14f}'.format(float(utils.image_value(hc).values()[0])))
# print('time: {:20.14f}'.format(float(utils.image_value(time).values()[0])))
# print('t_rise: {:20.14f}'.format(float(utils.image_value(t_rise).values()[0])))
# print('t_end: {:20.14f}'.format(float(utils.image_value(t_end).values()[0])))
# ************************************************************************
# Correct Clumping Factor
f_green = 1.
# LAI for leaf spherical distribution
F = lai.expression('lai * clump', {'lai': lai, 'clump': clump})
# Fraction cover at nadir (view=0)
fc = F.expression('1.0 - exp(-0.5 * F)', {'F': F}) \
.clamp(0.01, 0.9)
# LAI relative to canopy projection only
lai_c = lai.expression('lai / fc', {'lai': lai, 'fc': fc})
# Houborg modification (according to Anderson et al. 2005)
fc_q = lai \
.expression('1 - (exp(-0.5 * F / cos(vza)))', {'F': F, 'vza': vza}) \
.clamp(0.05, 0.90)
# Brutsaert (1982)
z0m = hc.expression('hc * 0.123', {'hc': hc})
# CGM - add(0) is to mimic numpy copy, check if needed
z0h = z0m.add(0)
d_0 = hc.expression('hc * (2.0 / 3.0)', {'hc': hc})
# Correction of roughness parameters for bare soils (F < 0.1)
d_0 = d_0.where(F.lte(0.1), 0.00001)
z0m = z0m.where(F.lte(0.1), 0.01)
z0h = z0h.where(F.lte(0.1), 0.0001)
# Correction of roughness parameters for water bodies
# (NDVI < 0 and albedo < 0.05)
water_mask = ndvi.lte(0).And(albedo.lte(0.05))
d_0 = d_0.where(water_mask, 0.00001)
z0m = z0m.where(water_mask, 0.00035)
z0h = z0h.where(water_mask, 0.00035)
# Check to avoid division by 0 in the next computations
z0h = z0h.where(z0h.eq(0), 0.001)
z0m = z0m.where(z0m.eq(0), 0.01)
# DEADBEEF
# z_u = ee.Number(50.0)
# z_t = ee.Number(50.0)
z_u = ee.Image.constant(50.0)
z_t = ee.Image.constant(50.0)
# z_u = lai.multiply(0).add(50)
# z_t = lai.multiply(0).add(50)
# Parameters for In-Canopy Wind Speed Extinction
leaf = lai.expression(
'(0.28 * (F ** (0.66667)) * (hc ** (0.33333)) * '
'(leaf_width ** (-0.33333)))',
{'F': F, 'hc': hc, 'leaf_width': leaf_width})
leaf_c = lai.expression(
'(0.28 * (lai_c ** (0.66667)) * (hc ** (0.33333)) * '
'(leaf_width ** (-0.33333)))',
{'lai_c': lai_c, 'hc': hc, 'leaf_width': leaf_width})
leaf_s = lai.expression(
'(0.28 * (0.1 ** (0.66667)) * (hc ** (0.33333)) * '
'(leaf_width ** (-0.33333)))',
{'hc': hc, 'leaf_width': leaf_width})
# ************************************************************************
# Atmospheric Parameters
# Saturation vapour pressure [kPa] (FAO56 3-8)
e_s = T_air.expression(
'0.6108 * exp((17.27 * (T_air - 273.16)) / ((T_air - 273.16) + 237.3))',
{'T_air': T_air})
# Slope of the saturation vapor pressure [kPa] (FAO56 3-9)
Ss = T_air.expression(
'4098. * e_s / (((T_air - 273.16) + 237.3) ** 2)',
{'e_s': e_s, 'T_air': T_air})
# Latent heat of vaporization (~2.45 at 20 C) [MJ kg-1] (FAO56 3-1)
lambda1 = T_air.expression(
'(2.501 - (2.361e-3 * (T_air - 273.16)))',
{'T_air': T_air})
# Psychrometric constant [kPa C-1] (FAO56 3-10)
g = p.expression('1.615E-3 * p / lambda1', {'p': p, 'lambda1': lambda1})
# ************************************************************************
# Initialization of
a_PT = albedo.multiply(0).add(a_PT_in)
# a_PT = ee.Image.constant(a_PT_in)
# a_PT = mask.multiply(a_PT)
# CGM - This was also being computed inside albedo_separation function below
# Commented out from here for now.
# e_atm = T_air.expression(
# '1.0 - (0.2811 * (exp(-0.0003523 * ((T_air - 273.16) ** 2))))',
# {'T_air': T_air})
Rs_c, Rs_s, albedo_c, albedo_s = tseb_utils.albedo_separation(
albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, zs)
# CGM - Moved emissivity calculation to separate function.
# I removed the Rs0 check.
e_atm = tseb_utils.emissivity(T_air)
# p = T_air.expression(
# '101.3 * (((T_air - (0.0065 * z)) / T_air) ** 5.26)',
# {'T_air': T_air, 'z': z})
# Density of air? (kg m-3)
r_air = T_air.expression(
'101.3 * (((T_air - (0.0065 * z)) / T_air) ** 5.26) / 1.01 / T_air / 0.287',
{'T_air': T_air, 'z': z})
cp = ee.Number(1004.16)
# cp = ee.Image.constant(1004.16)
# Assume neutral conditions on first iteration (use T_air for Ts and Tc)
# CGM - Using lai for F to match Python code
u_attr = tseb_utils.compute_u_attr(
u=u, d0=d_0, z0m=z0m, z_u=z_u, fm=0)
r_ah = tseb_utils.compute_r_ah(
u_attr=u_attr, d0=d_0, z0h=z0h, z_t=z_t, fh=0)
# CGM - Why is this function is passing "lai" to "F"?
r_s = tseb_utils.compute_r_s(
u_attr=u_attr, T_s=T_air, T_c=T_air, hc=hc, F=lai, d0=d_0, z0m=z0m,
leaf=leaf, leaf_s=leaf_s, fm_h=0)
r_x = tseb_utils.compute_r_x(
u_attr=u_attr, hc=hc, F=lai, d0=d_0, z0m=z0m, xl=leaf_width,
leaf_c=leaf_c, fm_h=0)
# r_ah, r_s, r_x, u_attr = tseb_utils.compute_resistance(
# u, T_air, T_air, hc, lai, d_0, z0m, z0h, z_u, z_t, leaf_width, leaf,
# leaf_s, leaf_c, 0, 0, 0)
T_c = T_air
# DEADBEEF - In IDL, this calculation is in C, not K?
T_s = lai.expression(
'((T_rad - 273.16) - (fc_q * (T_c - 273.16))) / (1 - fc_q) + 273.16',
{'T_rad': T_rad, 'T_c': T_c, 'fc_q': fc_q})
# T_s = lai.expression(
# '(T_rad - (fc_q * T_c)) / (1 - fc_q)',
# {'T_rad': T_rad, 'T_c': T_c, 'fc_q': fc_q})
# CGM - Initialize to match T_air shape
# This doesn't seem to do anything, commenting out for now
# H_iter = T_air.multiply(0).add(200.16)
EF_s = T_air.multiply(0)
# print('\nF: {:20.14f}'.format(float(utils.image_value(F).values()[0])))
# print('fc: {:20.14f}'.format(float(utils.image_value(fc).values()[0])))
# print('lai_c: {:20.14f}'.format(float(utils.image_value(lai_c).values()[0])))
# print('fc_q: {:20.14f}'.format(float(utils.image_value(fc_q).values()[0])))
# print('z0h: {:20.14f}'.format(float(utils.image_value(z0h).values()[0])))
# print('z0m: {:20.14f}'.format(float(utils.image_value(z0m).values()[0])))
# print('leaf: {:20.14f}'.format(float(utils.image_value(leaf).values()[0])))
# print('leaf_c: {:20.14f}'.format(float(utils.image_value(leaf_c).values()[0])))
# print('leaf_s: {:20.14f}'.format(float(utils.image_value(leaf_s).values()[0])))
# print('e_s: {:20.14f}'.format(float(utils.image_value(e_s).values()[0])))
# print('Ss: {:20.14f}'.format(float(utils.image_value(Ss).values()[0])))
# print('lambda1: {:20.14f}'.format(float(utils.image_value(lambda1).values()[0])))
# print('p: {:20.14f}'.format(float(utils.image_value(p).values()[0])))
# print('z: {:20.14f}'.format(float(utils.image_value(z).values()[0])))
# print('g: {:20.14f}'.format(float(utils.image_value(g).values()[0])))
# print('a_PT: {:20.14f}'.format(float(utils.image_value(a_PT).values()[0])))
# print('Rs_c: {:20.14f}'.format(float(utils.image_value(Rs_c).values()[0])))
# print('Rs_s: {:20.14f}'.format(float(utils.image_value(Rs_s).values()[0])))
# print('albedo_c: {:20.14f}'.format(float(utils.image_value(albedo_c).values()[0])))
# print('albedo_s: {:20.14f}'.format(float(utils.image_value(albedo_s).values()[0])))
# print('e_atm: {:20.14f}'.format(float(utils.image_value(e_atm).values()[0])))
# print('r_air: {:20.14f}'.format(float(utils.image_value(r_air).values()[0])))
# print('cp: {:20.14f}'.format(float(cp.getInfo())))
# print('d_0: {:20.14f}'.format(float(utils.image_value(d_0).values()[0])))
# print('z0m: {:20.14f}'.format(float(utils.image_value(z0m).values()[0])))
# print('z0h: {:20.14f}'.format(float(utils.image_value(z0h).values()[0])))
# print('u_attr: {:20.14f}'.format(float(utils.image_value(u_attr).values()[0])))
# print('r_ah: {:20.14f}'.format(float(utils.image_value(r_ah).values()[0])))
# print('r_s: {:20.14f}'.format(float(utils.image_value(r_s).values()[0])))
# print('r_x: {:20.14f}'.format(float(utils.image_value(r_x).values()[0])))
# print('T_c: {:20.14f}'.format(float(utils.image_value(T_c).values()[0])))
# print('T_s: {:20.14f}'.format(float(utils.image_value(T_s).values()[0])))
# print('EF_s: {:20.14f}'.format(float(utils.image_value(EF_s).values()[0])))
# print('Iterations: {}'.format(iterations))
# ************************************************************************
# Start Loop for Stability Correction and Water Stress
def iter_func(n, prev):
# Extract inputs from previous iteration
a_PT_iter = ee.Image(ee.Dictionary(prev).get('a_PT'))
EF_s_iter = ee.Image(ee.Dictionary(prev).get('EF_s'))
r_ah_iter = ee.Image(ee.Dictionary(prev).get('r_ah'))
r_s_iter = ee.Image(ee.Dictionary(prev).get('r_s'))
r_x_iter = ee.Image(ee.Dictionary(prev).get('r_x'))
T_c_iter = ee.Image(ee.Dictionary(prev).get('T_c'))
T_s_iter = ee.Image(ee.Dictionary(prev).get('T_s'))
u_attr_iter = ee.Image(ee.Dictionary(prev).get('u_attr'))
Rn_c = tseb_utils.compute_Rn_c(
albedo_c, T_air, T_c_iter, T_s_iter, e_atm, Rs_c, F)
Rn_s = tseb_utils.compute_Rn_s(
albedo_s, T_air, T_c_iter, T_s_iter, e_atm, Rs_s, F)
Rn = Rn_c.add(Rn_s)
# Rn_s, Rn_c, Rn = tseb_utils.compute_Rn(
# albedo_c, albedo_s, T_air, T_c_iter, T_s_iter, e_atm, Rs_c, Rs_s, F)
G = tseb_utils.compute_G0(
Rn, Rn_s, albedo, ndvi, t_rise, t_end, time, EF_s_iter)
LE_c = albedo \
.expression(
'f_green * (a_PT * Ss / (Ss + g)) * Rn_c',
{'f_green': f_green, 'a_PT': a_PT_iter, 'Ss': Ss, 'g': g,
'Rn_c': Rn_c}) \
.max(0)
H_c = albedo.expression(
'Rn_c - LE_c', {'Rn_c': Rn_c, 'LE_c': LE_c})
T_c_iter = tseb_utils.temp_separation_tc(
H_c, fc_q, T_air, T_rad, r_ah_iter, r_s_iter, r_x_iter, r_air, cp)
T_s_iter = tseb_utils.temp_separation_ts(T_c_iter, fc_q, T_air, T_rad)
T_ac = tseb_utils.temp_separation_tac(
T_c_iter, T_s_iter, fc_q, T_air, r_ah_iter, r_s_iter, r_x_iter)
# T_c_iter, T_s_iter, T_ac = tseb_utils.temp_separation(
# H_c, fc_q, T_air, T_rad, r_ah_iter, r_s_iter, r_x_iter, r_air, cp)
H_s = albedo.expression(
'r_air * cp * (T_s - T_ac) / r_s',
{'r_air': r_air, 'cp': cp, 'T_s': T_s_iter, 'T_ac': T_ac, 'r_s': r_s_iter})
H_c = albedo.expression(
'r_air * cp * (T_c - T_ac) / r_x',
{'r_air': r_air, 'cp': cp, 'T_c': T_c_iter, 'T_ac': T_ac, 'r_x': r_x_iter})
H = albedo.expression('H_s + H_c', {'H_s': H_s, 'H_c': H_c})
LE_s = albedo.expression(
'Rn_s - G - H_s', {'Rn_s': Rn_s, 'G': G, 'H_s': H_s})
LE_c = albedo.expression('Rn_c - H_c', {'Rn_c': Rn_c, 'H_c': H_c})
# CGM - Is there a reason this isn't up with the H calculation?
H = H.where(H.eq(0), 10.0)
# CGM - This wont doing anything at this position in the code.
# Commenting out for now.
# r_ah_iter = r_ah_iter.where(r_ah_iter.eq(0), 10.0)
# CGM - This doesn't seem to do anything, commenting out for now
# mask_iter = H_iter.divide(H).lte(1.05).And(H_iter.divide(H).gte(0.95))
# chk_iter = np.sum(mask_iter) / np.size(mask_iter)
fh = tseb_utils.compute_stability_fh(
H, T_rad, u_attr_iter, r_air, z_t, d_0, cp)
fm = tseb_utils.compute_stability_fm(
H, T_rad, u_attr_iter, r_air, z_u, d_0, z0m, cp)
fm_h = tseb_utils.compute_stability_fm_h(
H, T_rad, u_attr_iter, r_air, hc, d_0, z0m, cp)
# CGM - z0h is not used in this function, should it be?
# fm, fh, fm_h = tseb_utils.compute_stability(
# H, T_rad, r_air, cp, u_attr, z_u, z_t, hc, d_0, z0m, z0h)
u_attr_iter = tseb_utils.compute_u_attr(
u=u, d0=d_0, z0m=z0m, z_u=z_u, fm=fm)
r_ah_iter = tseb_utils.compute_r_ah(
u_attr=u_attr_iter, d0=d_0, z0h=z0h, z_t=z_t, fh=fh)
r_s_iter = tseb_utils.compute_r_s(
u_attr=u_attr_iter, T_s=T_s_iter, T_c=T_c_iter, hc=hc, F=lai,
d0=d_0, z0m=z0m, leaf=leaf, leaf_s=leaf_s, fm_h=fm_h)
# CGM - Why is this function is passing "lai" to "F"?
r_x_iter = tseb_utils.compute_r_x(
u_attr=u_attr_iter, hc=hc, F=lai, d0=d_0, z0m=z0m, xl=leaf_width,
leaf_c=leaf_c, fm_h=fm_h)
# r_ah_iter, r_s_iter, r_x_iter, u_attr_iter = tseb_utils.compute_resistance(
# u, T_s_iter, T_c_iter, hc, lai, d_0, z0m, z0h, z_u, z_t,
# leaf_width, leaf, leaf_s, leaf_c, fm, fh, fm_h)
a_PT_iter = a_PT_iter \
.where(LE_s.lte(0), a_PT_iter.subtract(0.05)) \
.where(a_PT_iter.lte(0), 0.01)
den_s = albedo.expression('Rn_s - G', {'Rn_s': Rn_s, 'G': G})
den_s = den_s.updateMask(den_s.neq(0))
# den_s[den_s == 0.] = np.nan
EF_s_iter = albedo.expression(
'LE_s / den_s', {'LE_s': LE_s, 'den_s': den_s})
return ee.Dictionary({
'a_PT': a_PT_iter, 'EF_s': EF_s_iter, 'G': G,
'H_c': H_c, 'H_s': H_s, 'LE_c': LE_c, 'LE_s': LE_s,
'Rn_c': Rn_c, 'Rn_s': Rn_s,
'r_ah': r_ah_iter, 'r_s': r_s_iter, 'r_x': r_x_iter,
'T_ac': T_ac, 'T_c': T_c_iter, 'T_s': T_s_iter,
'u_attr': u_attr_iter})
# Iterate the function n times
# CGM - Iteration count is an input to the function
input_images = ee.Dictionary({
'a_PT': a_PT, 'EF_s': EF_s, 'G': ee.Image(0),
'H_c': ee.Image(0), 'H_s': ee.Image(0),
'LE_c': ee.Image(0), 'LE_s': ee.Image(0),
'Rn_c': ee.Image(0), 'Rn_s': ee.Image(0),
'r_ah': r_ah, 'r_s': r_s, 'r_x': r_x,
'T_ac': ee.Image(0), 'T_c': T_c, 'T_s': T_s, 'u_attr': u_attr
})
iter_output = ee.Dictionary(
ee.List.sequence(1, iterations).iterate(iter_func, input_images))
# Unpack the iteration output
a_PT = ee.Image(iter_output.get('a_PT'))
Rn_c = ee.Image(iter_output.get('Rn_c'))
Rn_s = ee.Image(iter_output.get('Rn_s'))
G = ee.Image(iter_output.get('G'))
H_c = ee.Image(iter_output.get('H_c'))
H_s = ee.Image(iter_output.get('H_s'))
LE_c = ee.Image(iter_output.get('LE_c'))
LE_s = ee.Image(iter_output.get('LE_s'))
# T_ac = ee.Image(iter_output.get('T_ac'))
# T_c = ee.Image(iter_output.get('T_c'))
# T_s = ee.Image(iter_output.get('T_s'))
# r_ah = ee.Image(iter_output.get('r_ah'))
# r_s = ee.Image(iter_output.get('r_s'))
# r_x = ee.Image(iter_output.get('r_x'))
# print('\na_PT: {:20.14f}'.format(utils.image_value(a_PT).values()[0]))
# print('Rn_c: {:20.14f}'.format(utils.image_value(Rn_c).values()[0]))
# print('Rn_s: {:20.14f}'.format(utils.image_value(Rn_s).values()[0]))
# print('G: {:20.14f}'.format(utils.image_value(G).values()[0]))
# print('H_c: {:20.14f}'.format(utils.image_value(H_c).values()[0]))
# print('H_s: {:20.14f}'.format(utils.image_value(H_s).values()[0]))
# print('LE_c: {:20.14f}'.format(utils.image_value(LE_c).values()[0]))
# print('LE_s: {:20.14f}'.format(utils.image_value(LE_s).values()[0]))
# print('r_ah: {:20.14f}'.format(utils.image_value(r_ah).values()[0]))
# print('r_s: {:20.14f}'.format(utils.image_value(r_s).values()[0]))
# print('r_x: {:20.14f}'.format(utils.image_value(r_x).values()[0]))
# print('T_ac: {:20.14f}'.format(utils.image_value(T_ac).values()[0]))
# print('T_c: {:20.14f}'.format(utils.image_value(T_c).values()[0]))
# print('T_s: {:20.14f}'.format(utils.image_value(T_s).values()[0]))
# ************************************************************************
# Check Energy Balance Closure
ind = a_PT.lte(0.01)
LE_s = LE_s.where(ind, 1.0)
LE_c = LE_c.where(ind, 1.0)
G = G.where(ind, Rn_s.subtract(H_s))
ind = LE_s.gt(Rn_s)
LE_s = LE_s.where(ind, Rn_s)
H_s = H_s.where(ind, Rn_s.subtract(G).subtract(LE_s))
# CGM - Check order of operations
ind = LE_c.gt(Rn_c.add(100))
# CGM - Not used below since LE_c is recomputed
LE_c = LE_c.where(ind, Rn_c.add(100))
H_c = H_c.where(ind, -100)
LE_s = albedo.expression(
'Rn_s - G - H_s', {'Rn_s': Rn_s, 'G': G, 'H_s': H_s})
LE_c = albedo.expression('Rn_c - H_c', {'Rn_c': Rn_c, 'H_c': H_c})
# The latent heat of vaporization is 2.45 MJ kg-1
# Assume Rs24 is still in W m-2 day-1 and convert to MJ kg-1
# CGM - Leaving out scaling value for now
ET = albedo \
.expression(
'((LE_c + LE_s) / Rs_1) * (Rs24 / 2.45) * scaling',
{'LE_c': LE_c, 'LE_s': LE_s, 'Rs_1': Rs_1,
'Rs24': Rs24.multiply(0.0864 / 24.0),
'scaling': 1}) \
.max(0.01)
# print('\nRn_c: {:20.14f}'.format(utils.image_value(Rn_c).values()[0]))
# print('Rn_s: {:20.14f}'.format(utils.image_value(Rn_s).values()[0]))
# print('G: {:20.14f}'.format(utils.image_value(G).values()[0]))
# print('H_c: {:20.14f}'.format(utils.image_value(H_c).values()[0]))
# print('H_s: {:20.14f}'.format(utils.image_value(H_s).values()[0]))
# print('LE_c: {:20.14f}'.format(utils.image_value(LE_c).values()[0]))
# print('LE_s: {:20.14f}'.format(utils.image_value(LE_s).values()[0]))
# print('\nET: {:20.14f}'.format(utils.image_value(ET).values()[0]))
return ET | 6851b00f27b1819e79ce7ed625074c37ac35298f | 3,657,451 |
def GetPrivateIpv6GoogleAccessTypeMapper(messages, hidden=False):
"""Returns a mapper from text options to the PrivateIpv6GoogleAccess enum.
Args:
messages: The message module.
hidden: Whether the flag should be hidden in the choice_arg
"""
help_text = """
Sets the type of private access to Google services over IPv6.
PRIVATE_IPV6_GOOGLE_ACCESS_TYPE must be one of:
bidirectional
Allows Google services to initiate connections to GKE pods in this
cluster. This is not intended for common use, and requires previous
integration with Google services.
disabled
Default value. Disables private access to Google services over IPv6.
outbound-only
Allows GKE pods to make fast, secure requests to Google services
over IPv6. This is the most common use of private IPv6 access.
$ gcloud alpha container clusters create \
--private-ipv6-google-access-type=disabled
$ gcloud alpha container clusters create \
--private-ipv6-google-access-type=outbound-only
$ gcloud alpha container clusters create \
--private-ipv6-google-access-type=bidirectional
"""
return arg_utils.ChoiceEnumMapper(
'--private-ipv6-google-access-type',
messages.NetworkConfig.PrivateIpv6GoogleAccessValueValuesEnum,
_GetPrivateIPv6CustomMappings(),
hidden=hidden,
help_str=help_text) | 9aa87977be9d0888d572c70d07535c9ec0b9d8f4 | 3,657,452 |
def calc_director(moi):
""" Calculate the director from a moment of inertia.
The director is the dominant eigenvector of the MOI tensor
Parameters:
-----------
moi : list
3x3 array; MOItensor
Returns:
--------
director : list
3 element list of director vector
"""
w, v = np.linalg.eig(moi)
director = v[:, np.argmin(w)]
return director | 28f8b3446f83759704d426653dc8f7812e71e900 | 3,657,453 |
def _solve_upper_triangular(A, b):
""" Solves Ax=b when A is upper triangular. """
return solve_triangular(A, b, lower=False) | 5c33d5d10922172a133a478bdfdcb8cf7cd83120 | 3,657,454 |
def check_create_account_key(key):
"""
Returns the user_id if the reset key is valid (matches a user_id and that
user does not already have an account). Otherwise returns None.
"""
query = sqlalchemy.text("""
SELECT user_id
FROM members
WHERE create_account_key = :k
AND user_id NOT IN (SELECT user_id FROM users)
""")
result = flask.g.db.execute(query, k=key).first()
if result is not None:
return result['user_id']
else:
return None | b02a710d443410b5b60c31a030d056f3282a5747 | 3,657,455 |
def _crc16(data, start = _CRC16_START) :
"""Compute CRC16 for bytes/bytearray/memoryview data"""
crc = start
for b in data :
crc ^= b << 8
for _ in range(8) :
crc = ((crc << 1) & 0xFFFF) ^ _CRC16_POLY if crc & 0x8000 else (crc << 1)
return crc | e6e33471601d3126ac7873b61e23f843349e8e90 | 3,657,457 |
import json
def load_json():
"""Load the translation dictionary."""
try:
with open(JSON_FILENAME, "r", encoding="utf8") as file:
known_names = json.load(file)
if "version" in known_names:
if known_names.get("version") < JSON_VERSION:
print("Unkown version: {}, current version: {}".format(
known_names.get("version"), JSON_VERSION))
raise Exception(
"Version mismatch. Backup the file and recreate.")
else:
print("No version number found")
known_names = {}
except FileNotFoundError:
known_names = {}
return known_names | d263411d0c0aae7bba30f92c5af22dd7ff596542 | 3,657,458 |
def get_username() -> str:
"""
Prompts the user to enter a username and then returns it
:return: The username entered by the user
"""
while True:
print("Please enter your username (without spaces)")
username = input().strip()
if ' ' not in username:
return username | 1a18a229908b86c32a0822c068b5b9081cc9fdc3 | 3,657,459 |
def condition(f):
"""
Decorator for conditions
"""
@wraps(f)
def try_execute(*args, **kwargs):
try:
res, m = f(*args, **kwargs)
m.conditions_results.append(res)
return m
except Exception as e:
raise ConditionError(e)
return try_execute | fb05645861c7aa234f894cc8eee3689e1f1293c9 | 3,657,460 |
def get_spatial_anomalies(
coarse_obs_path, fine_obs_rechunked_path, variable, connection_string
) -> xr.Dataset:
"""Calculate the seasonal cycle (12 timesteps) spatial anomaly associated
with aggregating the fine_obs to a given coarsened scale and then reinterpolating
it back to the original spatial resolution. The outputs of this function are
dependent on three parameters:
* a grid (as opposed to a specific GCM since some GCMs run on the same grid)
* the time period which fine_obs (and by construct coarse_obs) cover
* the variable
Parameters
----------
coarse_obs : xr.Dataset
Coarsened to a GCM resolution. Chunked along time.
fine_obs_rechunked_path : xr.Dataset
Original observation spatial resolution. Chunked along time.
variable: str
The variable included in the dataset.
Returns
-------
seasonal_cycle_spatial_anomalies : xr.Dataset
Spatial anomaly for each month (i.e. of shape (nlat, nlon, 12))
"""
# interpolate coarse_obs back to the original scale
[coarse_obs, fine_obs_rechunked] = load_paths([coarse_obs_path, fine_obs_rechunked_path])
obs_interpolated, _ = regrid_dataset(
ds=coarse_obs,
ds_path=coarse_obs_path,
target_grid_ds=fine_obs_rechunked.isel(time=0),
variable=variable,
connection_string=connection_string,
)
# use rechunked fine_obs from coarsening step above because that is in map chunks so it
# will play nice with the interpolated obs
schema_maps_chunks.validate(fine_obs_rechunked[variable])
# calculate difference between interpolated obs and the original obs
spatial_anomalies = obs_interpolated - fine_obs_rechunked
# calculate seasonal cycle (12 time points)
seasonal_cycle_spatial_anomalies = spatial_anomalies.groupby("time.month").mean()
return seasonal_cycle_spatial_anomalies | 54dc830e9eb6b7440abf5857141ab369d8d45358 | 3,657,461 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.