content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _is_test_file(filesystem, dirname, filename): """Return true if the filename points to a test file.""" return (_has_supported_extension(filesystem, filename) and not is_reference_html_file(filename))
ba161818a6f2497e1122519945f255d56488f231
3,955
def kitchen_door_device() -> Service: """Build the kitchen door device.""" transitions: TransitionFunction = { "unique": { "open_door_kitchen": "unique", "close_door_kitchen": "unique", }, } final_states = {"unique"} initial_state = "unique" return build_deterministic_service_from_transitions(transitions, initial_state, final_states)
700a1d92087ac91f5311b4c55380f1a6f18860b4
3,956
import http def sql_connection_delete( request: http.HttpRequest, pk: int ) -> http.JsonResponse: """AJAX processor for the delete SQL connection operation. :param request: AJAX request :param pk: primary key for the connection :return: AJAX response to handle the form """ conn = models.SQLConnection.objects.filter(pk=pk).first() if not conn: # The view is not there. Redirect to workflow detail return http.JsonResponse({'html_redirect': reverse('home')}) return services.delete( request, conn, reverse('connection:sqlconn_delete', kwargs={'pk': conn.id}))
754e7d7f15a0be843b89c89446a7d4f39bc1401f
3,957
from sage.all import solve import html def simpson_integration( title = text_control('<h2>Simpson integration</h2>'), f = input_box(default = 'x*sin(x)+x+1', label='$f(x)=$'), n = slider(2,100,2,6, label='# divisions'), interval_input = selector(['from slider','from keyboard'], label='Integration interval', buttons=True), interval_s = range_slider(-10,10,default=(0,10), label="slider: "), interval_g = input_grid(1,2,default=[[0,10]], label="keyboard: "), output_form = selector(['traditional','table','none'], label='Computations form', buttons=True)): """ Interact explaining the simpson method for definite integrals, based on work by Lauri Ruotsalainen, 2010 (based on the application "Numerical integrals with various rules" by Marshall Hampton and Nick Alexander) INPUT: - ``f`` -- function of variable x to integrate - ``n`` -- number of divisions (mult. of 2) - ``interval_input`` -- swithes the input for interval between slider and keyboard - ``interval_s`` -- slider for interval to integrate - ``interval_g`` -- input grid for interval to integrate - ``output_form`` -- the computation is formatted in a traditional form, in a table or missing EXAMPLES: Invoked in the notebook, the following command will produce the fully formatted interactive mathlet. In the command line, it will simply return the underlying HTML and Sage code which creates the mathlet:: sage: interacts.calculus.simpson_integration() <html>...</html> """ x = SR.var('x') f = symbolic_expression(f).function(x) if interval_input == 'from slider': interval = interval_s else: interval = interval_g[0] def parabola(a, b, c): A, B, C = SR.var("A, B, C") K = solve([A*a[0]**2+B*a[0]+C==a[1], A*b[0]**2+B*b[0]+C==b[1], A*c[0]**2+B*c[0]+C==c[1]], [A, B, C], solution_dict=True)[0] f = K[A]*x**2+K[B]*x+K[C] return f xs = []; ys = [] dx = float(interval[1]-interval[0])/n for i in range(n+1): xs.append(interval[0] + i*dx) ys.append(f(x=xs[-1])) parabolas = Graphics() lines = Graphics() for i in range(0, n-1, 2): p = parabola((xs[i],ys[i]),(xs[i+1],ys[i+1]),(xs[i+2],ys[i+2])) parabolas += plot(p(x=x), (x, xs[i], xs[i+2]), color="red") lines += line([(xs[i],ys[i]), (xs[i],0), (xs[i+2],0)],color="red") lines += line([(xs[i+1],ys[i+1]), (xs[i+1],0)], linestyle="-.", color="red") lines += line([(xs[-1],ys[-1]), (xs[-1],0)], color="red") html(r'Function $f(x)=%s$'%latex(f(x))) show(plot(f(x),x,interval[0],interval[1]) + parabolas + lines, xmin = interval[0], xmax = interval[1]) numeric_value = integral_numerical(f,interval[0],interval[1])[0] approx = dx/3 *(ys[0] + sum([4*ys[i] for i in range(1,n,2)]) + sum([2*ys[i] for i in range(2,n,2)]) + ys[n]) html(r'Integral value to seven decimal places is: $\displaystyle\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x} = %.6f$'% (interval[0],interval[1], N(numeric_value,digits=7))) if output_form == 'traditional': sum_formula_html = r"\frac{d}{3} \cdot \left[ f(x_0) + %s + f(x_{%s})\right]" % ( ' + '.join([ r"%s \cdot f(x_{%s})" %(i%2*(-2)+4, i+1) for i in range(0,n-1)]), n ) sum_placement_html = r"\frac{%.2f}{3} \cdot \left[ f(%.2f) + %s + f(%.2f)\right]" % ( dx, N(xs[0],digits=5), ' + '.join([ r"%s \cdot f(%.2f)" %(i%2*(-2)+4, N(xk, digits=5)) for i, xk in enumerate(xs[1:-1])]), N(xs[n],digits=5) ) sum_values_html = r"\frac{%.2f}{3} \cdot \left[ %s %s %s\right]" %( dx, "%.2f + "%N(ys[0],digits=5), ' + '.join([ r"%s \cdot %.2f" %(i%2*(-2)+4, N(yk, digits=5)) for i, yk in enumerate(ys[1:-1])]), " + %.2f"%N(ys[n],digits=5) ) html(r''' <div class="math"> \begin{align*} \int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x} & \approx %s \\ & = %s \\ & = %s \\ & = %.6f \end{align*} </div> ''' % ( interval[0], interval[1], sum_formula_html, sum_placement_html, sum_values_html, N(approx,digits=7) )) elif output_form == 'table': s = [['$i$','$x_i$','$f(x_i)$','$m$','$m\cdot f(x_i)$']] for i in range(0,n+1): if i==0 or i==n: j = 1 else: j = (i+1)%2*(-2)+4 s.append([i, xs[i], ys[i],j,N(j*ys[i])]) s.append(['','','','$\sum$','$%s$'%latex(3/dx*approx)]) pretty_print(table(s, header_row=True)) html(r'$\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x}\approx\frac {%.2f}{3}\cdot %s=%s$'% (interval[0], interval[1],dx,latex(3/dx*approx),latex(approx)))
45e575e9ebda475a613555dfcb43ae7d739131c9
3,958
def object_reactions_form_target(object): """ Get the target URL for the object reaction form. Example:: <form action="{% object_reactions_form_target object %}" method="post"> """ ctype = ContentType.objects.get_for_model(object) return reverse("comments-ink-react-to-object", args=(ctype.id, object.id))
5bcd4d9fa8db783c78668820326dd55038ef609e
3,959
def check_args(**kwargs): """ Check arguments for themis load function Parameters: **kwargs : a dictionary of arguments Possible arguments are: probe, level The arguments can be: a string or a list of strings Invalid argument are ignored (e.g. probe = 'g', level='l0', etc.) Invalid argument names are ignored (e.g. 'probes', 'lev', etc.) Returns: list Prepared arguments in the same order as the inputs Examples: res_probe = check_args(probe='a') (res_probe, res_level) = check_args(probe='a b', level='l2') (res_level, res_probe) = check_args(level='l1', probe=['a', 'b']) # With incorrect argument probes: res = check_args(probe='a', level='l2', probes='a b') : res = [['a'], ['l2']] """ valid_keys = {'probe', 'level'} valid_probe = {'a', 'b', 'c', 'd', 'e'} valid_level = {'l1', 'l2'} # Return list of values from arg_list that are only included in valid_set def valid_list(arg_list, valid_set): valid_res = [] for arg in arg_list: if arg in valid_set: valid_res.append(arg) return valid_res # Return list res = [] for key, values in kwargs.items(): if key.lower() not in valid_keys: continue # resulting list arg_values = [] # convert string into list, or ignore the argument if isinstance(values, str): values = [values] elif not isinstance(values, list): continue for value in values: arg_values.extend(value.strip().lower().split()) # simple validation of the arguments if key.lower() == 'probe': arg_values = valid_list(arg_values, valid_probe) if key.lower() == 'level': arg_values = valid_list(arg_values, valid_level) res.append(arg_values) return res
3e25dc43df0a80a9a16bcca0729ee0b170a9fb89
3,960
def make_theta_mask(aa): """ Gives the theta of the bond originating each atom. """ mask = np.zeros(14) # backbone mask[0] = BB_BUILD_INFO["BONDANGS"]['ca-c-n'] # nitrogen mask[1] = BB_BUILD_INFO["BONDANGS"]['c-n-ca'] # c_alpha mask[2] = BB_BUILD_INFO["BONDANGS"]['n-ca-c'] # carbon mask[3] = BB_BUILD_INFO["BONDANGS"]['ca-c-o'] # oxygen # sidechain for i, theta in enumerate(SC_BUILD_INFO[aa]['angles-vals']): mask[4 + i] = theta return mask
f33c1b46150ed16154c9a10c92f30cf9f60c2f51
3,961
def create_keypoint(n,*args): """ Parameters: ----------- n : int Keypoint number *args: tuple, int, float *args must be a tuple of (x,y,z) coordinates or x, y and z coordinates as arguments. :: # Example kp1 = 1 kp2 = 2 create_keypoint(kp1,(0,0,0)) # x,y,z as tuple create_keypoint(kp2,1,1,1) # x,y,z as arguments """ if len(args)==1 and isinstance(args[0],tuple): x,y,z = args[0][0],args[0][1],args[0][2] else: x,y,z = args[0], args[1], args[2] _kp = "K,%g,%g,%g,%g"%(n,x,y,z) return _kp
e498e36418ec19d2feef122d3c42a346f9de4af7
3,962
import time def wait_for_sidekiq(gl): """ Return a helper function to wait until there are no busy sidekiq processes. Use this with asserts for slow tasks (group/project/user creation/deletion). """ def _wait(timeout=30, step=0.5): for _ in range(timeout): time.sleep(step) busy = False processes = gl.sidekiq.process_metrics()["processes"] for process in processes: if process["busy"]: busy = True if not busy: return True return False return _wait
7fe98f13e9474739bfe4066f20e5f7d813ee4476
3,963
def insert_node_after(new_node, insert_after): """Insert new_node into buffer after insert_after.""" next_element = insert_after['next'] next_element['prev'] = new_node new_node['next'] = insert_after['next'] insert_after['next'] = new_node new_node['prev'] = insert_after return new_node
e03fbd7bd44a3d85d36069d494464b9237bdd306
3,965
def apply_wavelet_decomposition(mat, wavelet_name, level=None): """ Apply 2D wavelet decomposition. Parameters ---------- mat : array_like 2D array. wavelet_name : str Name of a wavelet. E.g. "db5" level : int, optional Decomposition level. It is constrained to return an array with a minimum size of larger than 16 pixels. Returns ------- list The first element is an 2D-array, next elements are tuples of three 2D-arrays. i.e [mat_n, (cH_level_n, cV_level_n, cD_level_n), ..., (cH_level_1, cV_level_1, cD_level_1)] """ (nrow, ncol) = mat.shape max_level = int( min(np.floor(np.log2(nrow / 16.0)), np.floor(np.log2(ncol / 16.0)))) if (level is None) or (level > max_level) or (level < 1): level = max_level return pywt.wavedec2(mat, wavelet_name, level=level)
d91f534d605d03c364c89383629a7142f4705ac8
3,966
import math def ACE(img, ratio=4, radius=300): """The implementation of ACE""" global para para_mat = para.get(radius) if para_mat is not None: pass else: size = radius * 2 + 1 para_mat = np.zeros((size, size)) for h in range(-radius, radius + 1): for w in range(-radius, radius + 1): if not h and not w: continue para_mat[radius + h, radius + w] = 1.0 / \ math.sqrt(h ** 2 + w ** 2) para_mat /= para_mat.sum() para[radius] = para_mat h, w = img.shape[:2] p_h, p_w = [0] * radius + list(range(h)) + [h - 1] * radius, \ [0] * radius + list(range(w)) + [w - 1] * radius temp = img[np.ix_(p_h, p_w)] res = np.zeros(img.shape) for i in range(radius * 2 + 1): for j in range(radius * 2 + 1): if para_mat[i][j] == 0: continue res += (para_mat[i][j] * np.clip((img - temp[i:i + h, j:j + w]) * ratio, -1, 1)) return res
6809067ec1aed0f20d62d672fcfb554e0ab51f28
3,967
def classname(object, modname): """Get a class name and qualify it with a module name if necessary.""" name = object.__name__ if object.__module__ != modname: name = object.__module__ + '.' + name return name
af4e05b0adaa9c90bb9946edf1dba67a40e78323
3,968
import time def demc_block(y, pars, pmin, pmax, stepsize, numit, sigma, numparams, cummodels, functype, myfuncs, funcx, iortholist, fits, gamma=None, isGR=True, ncpu=1): """ This function uses a differential evolution Markov chain with block updating to assess uncertainties. PARAMETERS ---------- y: Array containing dependent data Params: Array of initial guess for parameters #Pmin: Array of parameter minimum values #Pmax: Array of parameter maximum values stepsize: Array of 1-sigma change in parameter per iteration Numit: Number of iterations to perform Sigma: Standard deviation of data noise in y Numparams: Number of parameters for each model Cummodels: Cumulative number of models used Functype: Define function type (eclipse, ramp, ip, etc), see models.py Myfuncs: Pointers to model functions Funcx: Array of x-axis values for myfuncs fit: List of fit objects gamma: Multiplcation factor in parameter differential, establishes acceptance rate OUTPUTS ------- This function returns an array of the best fitting parameters, an array of all parameters over all iterations, and numaccept. REFERENCES ---------- Cajo J. F. Ter Braak, "Genetic algorithms and Markov Chain Monte Carlo: Differential Evolution Markov Chain makes Bayesian computing easy," Biometrics, 2006. HISTORY ------- Adapted from mcmc.py Kevin Stevenson, UChicago August 2012 """ global fit fit = fits params = np.copy(pars) nchains, nump = params.shape nextp = np.copy(params) #Proposed parameters bestp = np.copy(params[0]) #Best-fit parameters pedit = np.copy(params) #Editable parameters numaccept = 0 allparams = np.zeros((nump, nchains, numit)) inotfixed = np.where(stepsize != 0)[0] ishare = np.where(stepsize < 0)[0] #ifree = np.where(stepsize > 0)[0] outside = np.zeros((nchains, nump)) numevents = len(fit) intsteps = np.min((numit/5,1e5)) isrednoise = False wavelet = None noisefunc = None #UPDATE PARAMTER(S) EQUAL TO OTHER PARAMETER(S) if (ishare.size > 0): for s in range(ishare.size): params[:,ishare[s]] = params[:,int(abs(stepsize[ishare[s]])-1)] #Define blocks blocks = [] for j in range(numevents): #Build list of blocks blocks = np.concatenate((blocks, fit[j].blocks)) for i in range(cummodels[j],cummodels[j+1]): if functype[i] == 'noise': # Set up for modified chi-squared calculation using correlated noise isrednoise = True wavelet = fit[j].etc[k] noisefunc = myfuncs[i] blocks = blocks.astype(int) iblocks = [] eps = [] numblocks = blocks.max() + 1 numbp = np.zeros(numblocks) ifree = [[] for i in range(numblocks)] for b in range(numblocks): #Map block indices whereb = np.where(blocks == b)[0] iblocks.append(whereb) #Locate indices of free parameters in each block for w in whereb: ifree[b] = np.concatenate((ifree[b],numparams[w]+np.where(stepsize[numparams[w]:numparams[w+1]] > 0)[0])).astype(int) #Calculate number of free parameters per block numbp[b] += len(ifree[b]) eps.append(npr.normal(0, stepsize[ifree[b]]/100., [numit,numbp[b]])) print("Number of free parameters per block:") print(numbp) numa = np.zeros(numblocks) if gamma == None: gamma = 2.38/np.sqrt(2.*numbp) print("gamma:") print(gamma) #Calc chi-squared for model type using current params currchisq = np.zeros(nchains) currmodel = [[] for i in range(numevents)] for j in range(numevents): currmodel[j], noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j], funcx, cummodels, numparams, j) currchisq += calcChisq(y[j], sigma[j], currmodel[j], nchains, params, j, noisepars, isrednoise, wavelet, noisefunc) bestchisq = currchisq[0] #GENERATE RANDOM NUMBERS FOR MCMC numnotfixed = len(inotfixed) unif = npr.rand(numit,nchains) randchains = npr.randint(0,nchains,[numit,nchains,2]) #START TIMER clock = timer.Timer(numit,progress = np.arange(0.05,1.01,0.05)) #Run Differential Evolution Monte Carlo algorithm 'numit' times for m in range(numit): #Select next event (block) to update b = m % numblocks #Remove model component(s) that are taking a step pedit = np.copy(params) nextmodel = currmodel[:] for j in range(numevents): ymodels, noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j], funcx, cummodels, numparams, j, iblocks[b]) nextmodel[j] = np.divide(currmodel[j],ymodels) #Generate next step using differential evolution for n in range(nchains): rand1, rand2 = randchains[m,n] while rand1 == n or rand2 == n or rand1 == rand2: rand1, rand2 = npr.randint(0,nchains,2) nextp[n,ifree[b]] = params[n,ifree[b]] + gamma[b]*(params[rand1,ifree[b]]-params[rand2,ifree[b]]) + eps[b][m] #CHECK FOR NEW STEPS OUTSIDE BOUNDARIES ioutside = np.where(np.bitwise_or(nextp[n] < pmin, nextp[n] > pmax))[0] if (len(ioutside) > 0): nextp[n,ioutside] = np.copy(params[n,ioutside]) outside[n,ioutside] += 1 #UPDATE PARAMTER(S) EQUAL TO OTHER PARAMETER(S) if (ishare.size > 0): for s in range(ishare.size): nextp[:,ishare[s]] = nextp[:,int(abs(stepsize[ishare[s]])-1)] #COMPUTE NEXT CHI SQUARED AND ACCEPTANCE VALUES pedit = np.copy(nextp) nextchisq = np.zeros(nchains) for j in range(numevents): ymodels, noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j], funcx, cummodels, numparams, j, iblocks[b]) nextmodel[j] = np.multiply(nextmodel[j],ymodels) nextchisq += calcChisq(y[j], sigma[j], nextmodel[j], nchains, params, j, noisepars, isrednoise, wavelet, noisefunc) #CALCULATE ACCEPTANCE PROBABILITY accept = np.exp(0.5 * (currchisq - nextchisq)) #print(b,currchisq[0], nextchisq[0], accept[0]) for n in range(nchains): if accept[n] >= 1: #ACCEPT BETTER STEP numaccept += 1 numa[b] += 1 params[n] = np.copy(nextp[n]) currchisq[n] = np.copy(nextchisq[n]) if (currchisq[n] < bestchisq): bestp = np.copy(params[n]) bestchisq = np.copy(currchisq[n]) elif unif[m,n] <= accept[n]: #ACCEPT WORSE STEP numaccept += 1 numa[b] += 1 params[n] = np.copy(nextp[n]) currchisq[n] = np.copy(nextchisq[n]) allparams[:,:,m] = params.T #PRINT INTERMEDIATE INFO if ((m+1) % intsteps == 0) and (m > 0): print("\n" + time.ctime()) #print("Number of times parameter tries to step outside its prior:") #print(outside) print("Current Best Parameters: ") print(bestp) #Apply Gelman-Rubin statistic if isGR: #Check for no accepted steps in each chain #stdev = np.std(allparams[inotfixed],axis=1) #ichain = np.where(stdev > 0.)[0] #Call test #psrf, meanpsrf = gr.convergetest(allparams[inotfixed,ichain,:m+1], len(ichain)) psrf, meanpsrf = gr.convergetest(allparams[inotfixed,:,:m+1], nchains) numconv = np.sum(np.bitwise_and(psrf < 1.01, psrf >= 1.00)) print("Gelman-Rubin statistic for free parameters:") print(psrf) if numconv == numnotfixed: #and m >= 1e4: print("All parameters have converged to within 1% of unity. Halting MCMC.") allparams = allparams[:,:,:m+1] break clock.check(m+1) print("Acceptance rate per block (%):") print(100.*numa*numblocks/numit/nchains) allparams = np.reshape(allparams,(nump, (m+1)*nchains)) return allparams, bestp, numaccept, (m+1)*nchains
414168976c732d66165e19c356800158b2056a1e
3,969
def shape5d(a, data_format="NDHWC"): """ Ensuer a 5D shape, to use with 5D symbolic functions. Args: a: a int or tuple/list of length 3 Returns: list: of length 5. if ``a`` is a int, return ``[1, a, a, a, 1]`` or ``[1, 1, a, a, a]`` depending on data_format "NDHWC" or "NCDHW". """ s2d = shape3d(a) if data_format == "NDHWC": return [1] + s2d + [1] else: return [1, 1] + s2d
fe6d974791a219c45a543a4d853f5d44770d0c9a
3,970
def resnet152(pretrained=False, num_classes=1000, ifmask=True, **kwargs): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ block = Bottleneck model = ResNet(block, [3, 8, 36, 3], num_classes=1000, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) model.fc = nn.Linear(512 * block.expansion, num_classes) if ifmask: model.lmask = LearnableMaskLayer(feature_dim=512* block.expansion, num_classes=num_classes) return model
8b72a8e284d098a089448e4a10d5f393345d7278
3,972
def register_driver(cls): """ Registers a driver class Args: cls (object): Driver class. Returns: name: driver name """ _discover_on_demand() if not issubclass(cls, BaseDriver): raise QiskitChemistryError('Could not register class {} is not subclass of BaseDriver'.format(cls)) return _register_driver(cls)
82eca23a5cf5caf9a028d040ac523aa6e20ae01d
3,973
def ceil(array, value): """ Returns the smallest index i such that array[i - 1] < value. """ l = 0 r = len(array) - 1 i = r + 1 while l <= r: m = l + int((r - l) / 2) if array[m] >= value: # This mid index is a candidate for the index we are searching for # so save it, and continue searching for a smaller candidate on the # left side. i = m r = m - 1 else: # This mid index is not a candidate so continue searching the right # side. l = m + 1 return i
689148cebc61ee60c99464fde10e6005b5d901a9
3,974
import copy def FindOrgByUnionEtIntersection(Orgs): """Given a set of organizations considers all the possible unions and intersections to find all the possible organizations""" NewNewOrgs=set([]) KnownOrgs=copy.deepcopy(Orgs) for h in combinations(Orgs,2): #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]|h[1])]) #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]&h[1])]) FoundOrgs=NewNewOrgs NewOrgs=NewNewOrgs-KnownOrgs while NewOrgs: NewNewOrgs=set([]) for h in combinations(NewOrgs,2): #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]|h[1])]) #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]&h[1])]) for h in NewOrgs: for t in KnownOrgs: #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h|t)]) #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h&t)]) KnownOrgs|=NewOrgs NewOrgs=NewNewOrgs-KnownOrgs#NewOrgs is what we actually found KnownOrgs-=Orgs return KnownOrgs
6e2450f49522186094b205dd86d8e698aca708bc
3,975
def get_sf_fa( constraint_scale: float = 1 ) -> pyrosetta.rosetta.core.scoring.ScoreFunction: """ Get score function for full-atom minimization and scoring """ sf = pyrosetta.create_score_function('ref2015') sf.set_weight( pyrosetta.rosetta.core.scoring.ScoreType.atom_pair_constraint, 5.0 * constraint_scale) sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.dihedral_constraint, 1.0 * constraint_scale) sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.angle_constraint, 1.0 * constraint_scale) return sf
b82b352f3fc031cc18951b037779a71247e5095f
3,976
from typing import Optional from typing import List def make_keypoint(class_name: str, x: float, y: float, subs: Optional[List[SubAnnotation]] = None) -> Annotation: """ Creates and returns a keypoint, aka point, annotation. Parameters ---------- class_name : str The name of the class for this ``Annotation``. x : float The ``x`` value of the point. y : float The ``y`` value of the point. subs : Optional[List[SubAnnotation]], default: None List of ``SubAnnotation``s for this ``Annotation``. Returns ------- Annotation A point ``Annotation``. """ return Annotation(AnnotationClass(class_name, "keypoint"), {"x": x, "y": y}, subs or [])
bc4a96c8376890eaaa2170ab1cc1401dcb2781a4
3,977
def plane_mean(window): """Plane mean kernel to use with convolution process on image Args: window: the window part to use from image Returns: Normalized residual error from mean plane Example: >>> from ipfml.filters.kernels import plane_mean >>> import numpy as np >>> window = np.arange(9).reshape([3, 3]) >>> result = plane_mean(window) >>> (result < 0.0001) True """ window = np.array(window) width, height = window.shape # prepare data nb_elem = width * height xs = [int(i / height) for i in range(nb_elem)] ys = [i % height for i in range(nb_elem)] zs = np.array(window).flatten().tolist() # get residual (error) from mean plane computed tmp_A = [] tmp_b = [] for i in range(len(xs)): tmp_A.append([xs[i], ys[i], 1]) tmp_b.append(zs[i]) b = np.matrix(tmp_b).T A = np.matrix(tmp_A) fit = (A.T * A).I * A.T * b errors = b - A * fit residual = np.linalg.norm(errors) return residual
7383078ec3c88ac52728cddca9a725f6211b2d2c
3,978
def _eval_field_amplitudes(lat, k=5, n=1, amp=1e-5, field='v', wave_type='Rossby', parameters=Earth): """ Evaluates the latitude dependent amplitudes at a given latitude point. Parameters ---------- lat : Float, array_like or scalar latitude(radians) k : Integer, scalar spherical wave-number (dimensionless) Default : 5 n : Integer, scaler wave-mode (dimensionless) Default : 1 amp : Float, scalar wave amplitude(m/sec) Default : 1e-5 field : str pick 'phi' for geopotential height, 'u' for zonal velocity and 'v' for meridional velocity Defualt : 'v' wave_type: str choose Rossby waves or WIG waves or EIG waves. Defualt: Rossby parameters: dict planetary parameters dict with keys: angular_frequency: float, (rad/sec) gravitational_acceleration: float, (m/sec^2) mean_radius: float, (m) layer_mean_depth: float, (m) Defualt: Earth's parameters defined above Returns ------- Either u_hat(m/sec), v_hat(m/sec) or p_hat(m^2/sec^2) : Float, array_like or scalar Evaluation of the amplitudes for the zonal velocity, or meridional velocity or the geopotential height respectivly. Notes ----- This function supports k>=1 and n>=1 inputs only. Special treatments are required for k=0 and n=-1,0/-. """ if not isinstance(wave_type, str): raise TypeError(str(wave_type) + ' should be string...') # unpack dictionary into vars: OMEGA = _unpack_parameters(parameters, 'angular_frequency') G = _unpack_parameters(parameters, 'gravitational_acceleration') A = _unpack_parameters(parameters, 'mean_radius') H0 = _unpack_parameters(parameters, 'layer_mean_depth') # Lamb's parameter: Lamb = (2. * OMEGA * A)**2 / (G * H0) # evaluate wave frequency: all_omegas = _eval_omega(k, n, parameters) # check for validity of wave_type: if wave_type not in all_omegas: raise KeyError(wave_type + ' should be Rossby, EIG or WIG...') omega = all_omegas[wave_type] # evaluate the meridional velocity amp first: v_hat = _eval_meridional_velocity(lat, Lamb, n, amp) # evaluate functions for u and phi: v_hat_plus_1 = _eval_meridional_velocity(lat, Lamb, n + 1, amp) v_hat_minus_1 = _eval_meridional_velocity(lat, Lamb, n - 1, amp) # Eq. (6a) in the text if field == 'v': return v_hat # Eq. (6b) in the text elif field == 'u': u_hat = (- ((n + 1) / 2.0)**0.5 * (omega / (G * H0)**0.5 + k / A) * v_hat_plus_1 - ((n) / 2.0)**0.5 * (omega / (G * H0)**0.5 - k / A) * v_hat_minus_1) # pre-factors u_hat = G * H0 * Lamb**0.25 / \ (1j * A * (omega**2 - G * H0 * (k / A)**2)) * u_hat return u_hat # Eq. (6c) in the text elif field == 'phi': p_hat = (- ((n + 1) / 2.0)**0.5 * (omega + (G * H0)**0.5 * k / A) * v_hat_plus_1 + ((n) / 2.0)**0.5 * (omega - (G * H0)**0.5 * k / A) * v_hat_minus_1) p_hat = G * H0 * Lamb**0.25 / \ (1j * A * (omega**2 - G * H0 * (k / A)**2)) * p_hat return p_hat else: raise KeyError('field must be u, v or phi')
db74c50ef6328055ab2a59faecba72cc28afd136
3,979
def get_uframe_info(): """ Get uframe configuration information. (uframe_url, uframe timeout_connect and timeout_read.) """ uframe_url = current_app.config['UFRAME_URL'] + current_app.config['UFRAME_URL_BASE'] timeout = current_app.config['UFRAME_TIMEOUT_CONNECT'] timeout_read = current_app.config['UFRAME_TIMEOUT_READ'] return uframe_url, timeout, timeout_read
921f42d59af265152d7ce453a19cb8057af8415e
3,980
def yd_process_results( mentions_dataset, predictions, processed, sentence2ner, include_offset=False, mode='default', rank_pred_score=True, ): """ Function that can be used to process the End-to-End results. :return: dictionary with results and document as key. """ assert mode in ['best_candidate', 'remove_invalid', 'default'] res = {} for doc in mentions_dataset: if doc not in predictions: # No mentions found, we return empty list. continue pred_doc = predictions[doc] ment_doc = mentions_dataset[doc] text = processed[doc][0] res_doc = [] for pred, ment in zip(pred_doc, ment_doc): sent = ment["sentence"] idx = ment["sent_idx"] start_pos = ment["pos"] mention_length = int(ment["end_pos"] - ment["pos"]) if pred["prediction"] != "NIL": candidates = [ { 'cand_rank': cand_rank, 'cand_name': cand_name, 'cand_score': cand_score, } for cand_rank, (cand_name, cand_mask, cand_score) in enumerate(zip(pred['candidates'], pred['masks'], pred['scores'])) if float(cand_mask) == 1 ] if rank_pred_score: candidates = sorted(candidates, key=lambda x: float(x['cand_score']), reverse=True) # make sure that ed_model predict is always in the first place. for cand_index, candidate in enumerate(candidates): if candidate['cand_name'] == pred['prediction']: if cand_index != 0: candidates[0], candidates[cand_index] = candidates[cand_index], candidates[0] break if len(candidates) == 1: temp = ( start_pos, mention_length, pred["prediction"], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) else: if mode == 'best_candidate': for cand_index, candidate in enumerate(candidates): tmp_cand_name = candidate['cand_name'].replace('_', ' ') if sentence2ner is not None and \ tmp_cand_name in sentence2ner and \ ment["tag"] != sentence2ner[tmp_cand_name]: continue else: temp = ( start_pos, mention_length, candidate['cand_name'], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) break elif mode == 'remove_invalid': tmp_cand_name = pred["prediction"].replace('_', '') if sentence2ner is not None and \ tmp_cand_name in sentence2ner and \ ment["tag"] != sentence2ner[tmp_cand_name]: pass else: temp = ( start_pos, mention_length, pred["prediction"], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) elif mode == 'default': temp = ( start_pos, mention_length, pred["prediction"], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) res[doc] = res_doc return res
32352c6aabea6750a6eb410d62232c96ad6b7e7d
3,981
import re def valid(f): """Formula f is valid if and only if it has no numbers with leading zero, and evals true.""" try: return not re.search(r'\b0[0-9]', f) and eval(f) is True except ArithmeticError: return False
1303729dc53288ea157687f78d7266fa7cb2ce79
3,982
def user_info(): """ 渲染个人中心页面 :return: """ user = g.user if not user: return redirect('/') data={ "user_info":user.to_dict() } return render_template("news/user.html",data=data)
54c6c6122f28553f0550a744d5b51c26221f7c60
3,983
def _check_X(X, n_components=None, n_features=None, ensure_min_samples=1): """Check the input data X. See https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_base.py . Parameters ---------- X : array-like, shape (n_samples, n_features) n_components : integer Returns ------- X : array, shape (n_samples, n_features) """ X = check_array(X, dtype=[np.float64, np.float32], ensure_min_samples=ensure_min_samples) if n_components is not None and X.shape[0] < n_components: raise ValueError('Expected n_samples >= n_components ' 'but got n_components = %d, n_samples = %d' % (n_components, X.shape[0])) if n_features is not None and X.shape[1] != n_features: raise ValueError("Expected the input data X have %d features, " "but got %d features" % (n_features, X.shape[1])) return X
429120092a963d1638e04cc96afdfe5979470fee
3,984
def read_viirs_geo (filelist, ephemeris=False, hgt=False): """ Read JPSS VIIRS Geo files and return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle. if ephemeris=True, then return midTime, satellite position, velocity, attitude """ if type(filelist) is str: filelist = [filelist] if len(filelist) ==0: return None # Open user block to read Collection_Short_Name with h5py.File(filelist[0], 'r') as fn: user_block_size = fn.userblock_size with open(filelist[0], 'rU') as fs: ub_text = fs.read(user_block_size) ub_xml = etree.fromstring(ub_text.rstrip('\x00')) #print(ub_text) #print(etree.tostring(ub_xml)) CollectionName = ub_xml.find('Data_Product/N_Collection_Short_Name').text+'_All' #print(CollectionName) # read the data geos = [h5py.File(filename, 'r') for filename in filelist] if not ephemeris: Latitude = np.concatenate([f['All_Data'][CollectionName]['Latitude'][:] for f in geos]) Longitude = np.concatenate([f['All_Data'][CollectionName]['Longitude'][:] for f in geos]) SatelliteAzimuthAngle = np.concatenate([f['All_Data'][CollectionName]['SatelliteAzimuthAngle'][:] for f in geos]) SatelliteRange = np.concatenate([f['All_Data'][CollectionName]['SatelliteRange'][:] for f in geos]) SatelliteZenithAngle = np.concatenate([f['All_Data'][CollectionName]['SatelliteZenithAngle'][:] for f in geos]) Height = np.concatenate([f['All_Data'][CollectionName]['Height'][:] for f in geos]) if hgt: return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle, Height else: return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle if ephemeris: MidTime = np.concatenate([f['All_Data'][CollectionName]['MidTime'] [:] for f in geos]) SCPosition = np.concatenate([f['All_Data'][CollectionName]['SCPosition'][:] for f in geos]) SCVelocity = np.concatenate([f['All_Data'][CollectionName]['SCVelocity'][:] for f in geos]) SCAttitude = np.concatenate([f['All_Data'][CollectionName]['SCAttitude'][:] for f in geos]) return MidTime, SCPosition, SCVelocity, SCAttitude
1b8bbd34651e13aabe752fa8e3ac8c6679d757ca
3,985
def _in_terminal(): """ Detect if Python is running in a terminal. Returns ------- bool ``True`` if Python is running in a terminal; ``False`` otherwise. """ # Assume standard Python interpreter in a terminal. if "get_ipython" not in globals(): return True ip = globals()["get_ipython"]() # IPython as a Jupyter kernel. if hasattr(ip, "kernel"): return False return True
9716c2a1809f21ed8b827026d29b4ad69045f8d5
3,986
import re def create_text_pipeline(documents): """ Create the full text pre-processing pipeline using spaCy that first cleans the texts using the cleaning utility functions and then also removes common stopwords and corpus specific stopwords. This function is used specifically on abstracts. :param documents: A list of textual documents to pre-process. :return cleaned_docs: Pre-processed textual documents. """ # Load all the documents into a spaCy pipe. docs = nlp.pipe(documents, disable=["ner"]) cleaned_docs = [] # Lowercase + custom stopwords list + remove one character tokens + remove symbolical and punctuation tokens. for doc in docs: lowercased_sents_without_stops = [] for sent in doc.sents: lowercased_lemmas_one_sent = [] for token in sent: if not token.pos_ in {"SYM", "PUNCT"} \ and len(token) > 1 \ and not has_links(token.lower_) \ and not check_for_mostly_numeric_string(token.lower_) \ and not re.sub(r'[^\w\s]', '', token.lemma_) in CUSTOM_STOPS: lowercased_lemmas_one_sent.append(token.lower_) sentence = ' '.join(lowercased_lemmas_one_sent) lowercased_sents_without_stops.append(sentence) cleaned_docs.append([s for s in lowercased_sents_without_stops]) return cleaned_docs
d31632c7c1d9a2c85362e05ae43f96f35993a746
3,987
def giou_dist(tlbrs1, tlbrs2): """Computes pairwise GIoU distance.""" assert tlbrs1.ndim == tlbrs2.ndim == 2 assert tlbrs1.shape[1] == tlbrs2.shape[1] == 4 Y = np.empty((tlbrs1.shape[0], tlbrs2.shape[0])) for i in nb.prange(tlbrs1.shape[0]): area1 = area(tlbrs1[i, :]) for j in range(tlbrs2.shape[0]): iou = 0. area_union = area1 + area(tlbrs2[j, :]) iw = min(tlbrs1[i, 2], tlbrs2[j, 2]) - max(tlbrs1[i, 0], tlbrs2[j, 0]) + 1 ih = min(tlbrs1[i, 3], tlbrs2[j, 3]) - max(tlbrs1[i, 1], tlbrs2[j, 1]) + 1 if iw > 0 and ih > 0: area_inter = iw * ih area_union -= area_inter iou = area_inter / area_union ew = max(tlbrs1[i, 2], tlbrs2[j, 2]) - min(tlbrs1[i, 0], tlbrs2[j, 0]) + 1 eh = max(tlbrs1[i, 3], tlbrs2[j, 3]) - min(tlbrs1[i, 1], tlbrs2[j, 1]) + 1 area_encls = ew * eh giou = iou - (area_encls - area_union) / area_encls Y[i, j] = (1. - giou) * 0.5 return Y
40dcd6b59f350f167ab8cf31be425e98671243d4
3,988
def easter(date): """Calculate the date of the easter. Requires a datetime type object. Returns a datetime object with the date of easter for the passed object's year. """ if 1583 <= date.year < 10000: # Delambre's method b = date.year / 100 # Take the firsts two digits of the year. h = (((19 * (date.year % 19) + b - (b / 4)) - ((b - ((b + 8) / 25) + 1) / 3) + 15) % 30) k = ((32 + 2 * (b % 4) + 2 * ((date.year % 100) / 4) - h - ((year % 100) % 4)) % 7) m = ((date.year % 19) + 11 * h + 22 * k) / 451 return datetime.date(date.year, (h + k - 7 * m + 114) / 31, ((h + k - 7 * m + 114) % 31) + 1) elif 1 <= date.year < 1583: # Julian calendar d = (19 * (date.year % 19) + 15) % 30 e = (2 * (date.year % 4) + 4 * (date.year % 7) - d + 34) % 7 return datetime.date(date.year, (d + e + 114) / 31, ((d + e + 114) % 31) + 1) else: # Negative value raise ValueError, "Invalid year: %d." % year
90bfaf56fb5164cdfb185f430ca11e7a5d9c2785
3,989
from typing import Dict def state_mahalanobis(od: Mahalanobis) -> Dict: """ Mahalanobis parameters to save. Parameters ---------- od Outlier detector object. """ state_dict = {'threshold': od.threshold, 'n_components': od.n_components, 'std_clip': od.std_clip, 'start_clip': od.start_clip, 'max_n': od.max_n, 'cat_vars': od.cat_vars, 'ohe': od.ohe, 'd_abs': od.d_abs, 'clip': od.clip, 'mean': od.mean, 'C': od.C, 'n': od.n} return state_dict
7be602c5a0c89d67adc223c911abccd96d359664
3,990
def show_table(table, **options): """ Displays a table without asking for input from the user. :param table: a :class:`Table` instance :param options: all :class:`Table` options supported, see :class:`Table` documentation for details :return: None """ return table.show_table(**options)
ec040d4a68d2b3cb93493f336daf1aa63289756e
3,991
def create_client(name, func): """Creating resources/clients for all needed infrastructure: EC2, S3, IAM, Redshift Keyword arguments: name -- the name of the AWS service resource/client func -- the boto3 function object (e.g. boto3.resource/boto3.client) """ print("Creating client for", name) return func(name, region_name=DWH_REGION, aws_access_key_id=KEY, aws_secret_access_key=SECRET)
a688c36918ebb4bc76ee1594c6f4cca638587d7d
3,992
def hamming(s0, s1): """ >>> hamming('ABCD', 'AXCY') 2 """ assert len(s0) == len(s1) return sum(c0 != c1 for c0, c1 in zip(s0, s1))
efaba3e6aca8349b0dc5df575b937ba67a148d0e
3,993
import pickle def load_embeddings(topic): """ Load TSNE 2D Embeddings generated from fitting BlazingText on the news articles. """ print(topic) embeddings = pickle.load( open(f'covidash/data/{topic}/blazing_text/embeddings.pickle', 'rb')) labels = pickle.load( open(f'covidash/data/{topic}/blazing_text/labels.pickle', 'rb')) if '</s>' in labels: labels.remove('</s>') embeddings = embeddings[:len(labels), :] return embeddings, labels
de2f74c7e467e0f057c10a0bc15b79ee9eecb40f
3,994
def mosaic_cut(image, original_width, original_height, width, height, center, ptop, pleft, pbottom, pright, shiftx, shifty): """Generates a random center location to use for the mosaic operation. Given a center location, cuts the input image into a slice that will be concatenated with other slices with the same center in order to construct a final mosaicked image. Args: image: `Tensor` of shape [None, None, 3] that needs to be altered. original_width: `float` value indicating the original width of the image. original_height: `float` value indicating the original height of the image. width: `float` value indicating the final width of the image. height: `float` value indicating the final height of the image. center: `float` value indicating the desired center of the final patched image. ptop: `float` value indicating the top of the image without padding. pleft: `float` value indicating the left of the image without padding. pbottom: `float` value indicating the bottom of the image without padding. pright: `float` value indicating the right of the image without padding. shiftx: `float` 0.0 or 1.0 value indicating if the image is on the left or right. shifty: `float` 0.0 or 1.0 value indicating if the image is at the top or bottom. Returns: image: The cropped image in the same datatype as the input image. crop_info: `float` tensor that is applied to the boxes in order to select the boxes still contained within the image. """ def cast(values, dtype): return [tf.cast(value, dtype) for value in values] with tf.name_scope('mosaic_cut'): center = tf.cast(center, width.dtype) zero = tf.cast(0.0, width.dtype) cut_x, cut_y = center[1], center[0] # Select the crop of the image to use left_shift = tf.minimum( tf.minimum(cut_x, tf.maximum(zero, -pleft * width / original_width)), width - cut_x) top_shift = tf.minimum( tf.minimum(cut_y, tf.maximum(zero, -ptop * height / original_height)), height - cut_y) right_shift = tf.minimum( tf.minimum(width - cut_x, tf.maximum(zero, -pright * width / original_width)), cut_x) bot_shift = tf.minimum( tf.minimum(height - cut_y, tf.maximum(zero, -pbottom * height / original_height)), cut_y) (left_shift, top_shift, right_shift, bot_shift, zero) = cast([left_shift, top_shift, right_shift, bot_shift, zero], tf.float32) # Build a crop offset and a crop size tensor to use for slicing. crop_offset = [zero, zero, zero] crop_size = [zero - 1, zero - 1, zero - 1] if shiftx == 0.0 and shifty == 0.0: crop_offset = [top_shift, left_shift, zero] crop_size = [cut_y, cut_x, zero - 1] elif shiftx == 1.0 and shifty == 0.0: crop_offset = [top_shift, cut_x - right_shift, zero] crop_size = [cut_y, width - cut_x, zero - 1] elif shiftx == 0.0 and shifty == 1.0: crop_offset = [cut_y - bot_shift, left_shift, zero] crop_size = [height - cut_y, cut_x, zero - 1] elif shiftx == 1.0 and shifty == 1.0: crop_offset = [cut_y - bot_shift, cut_x - right_shift, zero] crop_size = [height - cut_y, width - cut_x, zero - 1] # Contain and crop the image. ishape = tf.cast(tf.shape(image)[:2], crop_size[0].dtype) crop_size[0] = tf.minimum(crop_size[0], ishape[0]) crop_size[1] = tf.minimum(crop_size[1], ishape[1]) crop_offset = tf.cast(crop_offset, tf.int32) crop_size = tf.cast(crop_size, tf.int32) image = tf.slice(image, crop_offset, crop_size) crop_info = tf.stack([ tf.cast(ishape, tf.float32), tf.cast(tf.shape(image)[:2], dtype=tf.float32), tf.ones_like(ishape, dtype=tf.float32), tf.cast(crop_offset[:2], tf.float32) ]) return image, crop_info
2874ea65a695d7ebebf218e5a290069a9f3c1e8e
3,996
import requests def get_children_info(category_id: str) -> list[dict]: """Get information about children categories of the current category. :param: category_id: category id. :return: info about children categories. """ # Create the URL url = f'{POINT}/resources/v2/title/domains/{DOMAIN}/' \ f'categories/{category_id}/children' # Request response = requests.get(url, params=REQUEST_PARAMS, headers=REQUEST_HEADERS) # If error if not response: # Raise exception to retry request by decorator raise RequestException() # Extract data children_data = response.json().get('data') if children_data: return children_data['categories'] return []
f5a651c1f58c75ee56d1140ee41dc6dd39570f88
3,997
from datetime import datetime def GetTypedValue(field_type, value): """Returns a typed value based on a schema description and string value. BigQuery's Query() method returns a JSON string that has all values stored as strings, though the schema contains the necessary type information. This method provides conversion services to make it easy to persist the data in your JSON as "typed" data. Args: field_type: The field type (as defined by BigQuery). value: The field value, typed as a string. Returns: A value of the appropriate type. Raises: NotSupportedError: Raised if the field type is not supported. """ if value is None: return None if field_type == FieldTypes.STRING: return value if field_type == FieldTypes.INTEGER: if value == 'NaN': return None else: return int(value) if field_type == FieldTypes.FLOAT: if value == 'NaN': return None else: return float(value) if field_type == FieldTypes.TIMESTAMP: if value == 'NaN': return None else: dt = datetime.datetime.utcfromtimestamp(float(value)) return dt.isoformat(' ') if field_type == FieldTypes.BOOLEAN: return value.lower() == 'true' else: raise NotSupportedError( 'Type {field_type} is not supported.'.format(field_type=field_type))
8e6198d089bae4e1044b2998da97a8cbcf6130b2
3,998
def predict_from_file(audio_file, hop_length=None, fmin=50., fmax=MAX_FMAX, model='full', decoder=torchcrepe.decode.viterbi, return_harmonicity=False, return_periodicity=False, batch_size=None, device='cpu', pad=True): """Performs pitch estimation from file on disk Arguments audio_file (string) The file to perform pitch tracking on hop_length (int) The hop_length in samples fmin (float) The minimum allowable frequency in Hz fmax (float) The maximum allowable frequency in Hz model (string) The model capacity. One of 'full' or 'tiny'. decoder (function) The decoder to use. See decode.py for decoders. return_harmonicity (bool) [DEPRECATED] Whether to also return the network confidence return_periodicity (bool) Whether to also return the network confidence batch_size (int) The number of frames per batch device (string) The device used to run inference pad (bool) Whether to zero-pad the audio Returns pitch (torch.tensor [shape=(1, 1 + int(time // hop_length))]) (Optional) periodicity (torch.tensor [shape=(1, 1 + int(time // hop_length))]) """ # Load audio audio, sample_rate = torchcrepe.load.audio(audio_file) # Predict return predict(audio, sample_rate, hop_length, fmin, fmax, model, decoder, return_harmonicity, return_periodicity, batch_size, device, pad)
7e1f8036e5d0506f28a4b36b9e23c2d4a0237218
3,999
import unittest def test(): """Runs the unit tests without test coverage.""" tests = unittest.TestLoader().discover('cabotage/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
bcb57638bea41f3823cd22aa7a43b159591ad99b
4,000
def nm_to_uh(s): """Get the userhost part of a nickmask. (The source of an Event is a nickmask.) """ return s.split("!")[1]
5e6c07b7b287000a401ba81117bc55be47cc9a24
4,001
def upload_example_done(request, object_id): """ This view is a callback that receives POST data from uploadify when the download is complete. See also /media/js/uploadify_event_handlers.js. """ example = get_object_or_404(Example, id=object_id) # # Grab the post data sent by our OnComplete handler and parse it. Set the fields # on our example object as appropriate and save. # if request.method == 'POST': post_response = request.POST['s3_response'] location_rexp = '<Location>(.*)</Location>' example.file_url = unquote_plus(re.search(location_rexp, post_response).group(1)) example.file_name = request.POST['file_obj[name]'] example.file_size = request.POST['file_obj[size]'] example.file_upload_speed = request.POST['upload_data[speed]'] example.file_uploaded = datetime.now() example.save() print example.file_url print example.file_name print example.file_uploaded return HttpResponse((reverse('examples_example_detail', args=[example.id])))
b748cb1708ddfdd2959f723902c45668c8774df2
4,002
def epoch_in_milliseconds(epoch): """ >>> epoch_in_milliseconds(datetime_from_seconds(-12345678999.0001)) -12345679000000 """ return epoch_in_seconds(epoch) * 1000
75ae0779ae2f6d1987c1fdae0a6403ef725a6893
4,003
def get_workspaces(clue, workspaces): """ Imports all workspaces if none were provided. Returns list of workspace names """ if workspaces is None: logger.info("no workspaces specified, importing all toggl workspaces...") workspaces = clue.get_toggl_workspaces() logger.info("The following workspaces will be imported: %s", str(workspaces)) return workspaces
aae8b5f2585a7865083b433f4aaf9874d5ec500e
4,004
import pprint def create_hparams(hparams_string=None, hparams_json=None, verbose=True): """Create model hyperparameters. Parse nondefault from given string.""" hparams = tf.contrib.training.HParams( training_stage='train_style_extractor',#['train_text_encoder','train_style_extractor','train_style_attention','train_refine_layernorm'] full_refine=False, ################################ # Experiment Parameters # ################################ epochs=500, iters=1000000, iters_per_checkpoint=5000, log_per_checkpoint=1, seed=1234, dynamic_loss_scaling=True, fp16_run=False, distributed_run=False, dist_backend="nccl", dist_url="tcp://localhost:54321", cudnn_enabled=True, cudnn_benchmark=False, numberworkers=8, ignore_layers=['embedding.weight'], ################################ # Data Parameters # ################################ load_mel=True, training_files='../../../spk_ttsdatafull_libri500_unpacked/training_with_mel_frame.txt', mel_dir='../../../spk_ttsdatafull_libri500_unpacked/', text_cleaners=['english_cleaners'], is_partial_refine=False, is_refine_style=False, use_GAN=False, GAN_type='wgan-gp',#['lsgan', 'wgan-gp'] GAN_alpha=1.0, GP_beata=10.0, Generator_pretrain_step=1, add_noise=False, ################################ # Audio Parameters # ################################ max_wav_value=32768.0, num_mels=80, num_freq=1025, min_mel_freq=0, max_mel_freq=8000, sample_rate=16000, frame_length_ms=50, frame_shift_ms=12.5, preemphasize=0.97, min_level_db=-100, ref_level_db=0, # suggest use 20 for griffin-lim and 0 for wavenet max_abs_value=4, symmetric_specs=True, # if true, suggest use 4 as max_abs_value # Eval: griffin_lim_iters=60, power=1.5, # Power to raise magnitudes to prior to Griffin-Lim threshold=0.5, # for stop token minlenratio=0.0, # Minimum length ratio in inference. maxlenratio=50.0, # Maximum length ratio in inference. use_phone=True, phone_set_file="../../../spk_ttsdatafull_libri500_unpacked/phone_set.json", n_symbols=5000, # len(symbols), embed_dim=512, # Dimension of character embedding. pretrained_model=None, # VQVAE use_vqvae=False, aux_encoder_kernel_size=3, aux_encoder_n_convolutions=2, aux_encoder_embedding_dim=512, speaker_embedding_dim=256, commit_loss_weight=1.0, # Contribution of commitment loss, between 0.1 and 2.0 (default: 1.0) eprenet_conv_layers=3, # Number of encoder prenet convolution layers. eprenet_conv_chans=512, # Number of encoder prenet convolution channels. eprenet_conv_filts=5, # Filter size of encoder prenet convolution. dprenet_layers=2, # Number of decoder prenet layers. dprenet_units=256, # Number of decoder prenet hidden units. positionwise_layer_type="linear", # FFN or conv or (conv+ffn) in encoder after self-attention positionwise_conv_kernel_size=1, # Filter size of conv elayers=6, # Number of encoder layers. eunits=1536, # Number of encoder hidden units. adim=384, # Number of attention transformation dimensions. aheads=4, # Number of heads for multi head attention. dlayers=6, # Number of decoder layers. dunits=1536, # Number of decoder hidden units. duration_predictor_layers=2, duration_predictor_chans=384, duration_predictor_kernel_size=3, use_gaussian_upsampling=False, postnet_layers=5, # Number of postnet layers. postnet_chans=512, # Number of postnet channels. postnet_filts=5, # Filter size of postnet. use_scaled_pos_enc=True, # Whether to use trainable scaled positional encoding. use_batch_norm=True, # Whether to use batch normalization in posnet. encoder_normalize_before=True, # Whether to perform layer normalization before encoder block. decoder_normalize_before=True, # Whether to perform layer normalization before decoder block. encoder_concat_after=False, # Whether to concatenate attention layer's input and output in encoder. decoder_concat_after=False, # Whether to concatenate attention layer's input and output in decoder. reduction_factor=1, # Reduction factor. is_multi_speakers=True, is_spk_layer_norm=True, pretrained_spkemb_dim=512, n_speakers=8000, spk_embed_dim=128, # Number of speaker embedding dimenstions. spk_embed_integration_type="concat", # concat or add, How to integrate speaker embedding. use_ssim_loss=True, use_f0=False, log_f0=False, f0_joint_train=False, f0_alpha=0.1, stop_gradient_from_pitch_predictor=False, pitch_predictor_layers=2, pitch_predictor_chans=384, pitch_predictor_kernel_size=3, pitch_predictor_dropout=0.5, pitch_embed_kernel_size=9, pitch_embed_dropout=0.5, is_multi_styles=False, n_styles=6, style_embed_dim=128, # Number of style embedding dimenstions. style_embed_integration_type="concat", # concat or add, How to integrate style embedding. style_vector_type='mha',#gru or mha, How to generate style vector. style_query_level='sentence',#phone or sentence # value: pytorch, xavier_uniform, xavier_normal, kaiming_uniform, kaiming_normal transformer_init="pytorch", # How to initialize transformer parameters. initial_encoder_alpha=1.0, initial_decoder_alpha=1.0, transformer_enc_dropout_rate=0.1, # Dropout rate in encoder except attention & positional encoding. transformer_enc_positional_dropout_rate=0.1, # Dropout rate after encoder positional encoding. transformer_enc_attn_dropout_rate=0.1, # Dropout rate in encoder self-attention module. transformer_dec_dropout_rate=0.1, # Dropout rate in decoder except attention & positional encoding. transformer_dec_positional_dropout_rate=0.1, # Dropout rate after decoder positional encoding. transformer_dec_attn_dropout_rate=0.1, # Dropout rate in deocoder self-attention module. transformer_enc_dec_attn_dropout_rate=0.1, # Dropout rate in encoder-deocoder attention module. duration_predictor_dropout_rate=0.1, eprenet_dropout_rate=0.5, # Dropout rate in encoder prenet. dprenet_dropout_rate=0.5, # Dropout rate in decoder prenet. postnet_dropout_rate=0.5, # Dropout rate in postnet. use_masking=True, # Whether to apply masking for padded part in loss calculation. use_weighted_masking=False, # Whether to apply weighted masking in loss calculation. bce_pos_weight=1.0, # Positive sample weight in bce calculation (only for use_masking=true). loss_type="L2", # L1, L2, L1+L2, How to calculate loss. # Reference: # Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention # https://arxiv.org/abs/1710.08969 use_gst=False, use_mutual_information=False, mutual_information_lambda=0.1, mi_loss_type='unbias',#['bias','unbias'] style_extractor_presteps=300000, choosestl_steps=100000, gst_train_att=False, att_name='100k_noshuffle_gru', shuffle=False, gst_reference_encoder='multiheadattention',#'multiheadattention' or 'convs' gst_reference_encoder_mha_layers=4, gst_tokens=10, gst_heads=4, gst_conv_layers=6, gst_conv_chans_list=(32, 32, 64, 64, 128, 128), gst_conv_kernel_size=3, gst_conv_stride=2, gst_gru_layers=1, gst_gru_units=128, step_use_predicted_dur=20000, ################################ # Optimization Hyperparameters # ################################ learning_rate_decay_scheme='noam', use_saved_learning_rate=True, warmup_steps=10000, # Optimizer warmup steps. decay_steps=12500, # halves the learning rate every 12.5k steps decay_rate=0.5, # learning rate decay rate # decay_end=300000, # decay_rate=0.01, initial_learning_rate=0.5, # Initial value of learning rate. final_learning_rate=1e-5, weight_decay=1e-6, grad_clip_thresh=1.0, batch_criterion='utterance', batch_size=2, mask_padding=True # set model's padded outputs to padded values ) if hparams_json: print('Parsing hparams in json # {}'.format(hparams_json)) with open(hparams_json) as json_file: hparams.parse_json(json_file.read()) if hparams_string: print('Parsing command line hparams # {}'.format(hparams_string)) hparams.parse(hparams_string) # if hparams.use_phone: # from text.phones import Phones # phone_class = Phones(hparams.phone_set_file) # hparams.n_symbols = len(phone_class._symbol_to_id) # del phone_class if verbose: print('Final parsed hparams:') pprint(hparams.values()) return hparams
d4d255241b7322a10369bc313a0ddc971c0115a6
4,005
def ChromiumFetchSync(name, work_dir, git_repo, checkout='origin/master'): """Some Chromium projects want to use gclient for clone and dependencies.""" if os.path.isdir(work_dir): print '%s directory already exists' % name else: # Create Chromium repositories one deeper, separating .gclient files. parent = os.path.split(work_dir)[0] Mkdir(parent) proc.check_call(['gclient', 'config', git_repo], cwd=parent) proc.check_call(['git', 'clone', git_repo], cwd=parent) proc.check_call(['git', 'fetch'], cwd=work_dir) proc.check_call(['git', 'checkout', checkout], cwd=work_dir) proc.check_call(['gclient', 'sync'], cwd=work_dir) return (name, work_dir)
8bb0f593eaf874ab1a6ff95a913ef34b566a47bc
4,006
def kld_error(res, error='simulate', rstate=None, return_new=False, approx=False): """ Computes the `Kullback-Leibler (KL) divergence <https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`_ *from* the discrete probability distribution defined by `res` *to* the discrete probability distribution defined by a **realization** of `res`. Parameters ---------- res : :class:`~dynesty.results.Results` instance :class:`~dynesty.results.Results` instance for the distribution we are computing the KL divergence *from*. error : {`'jitter'`, `'resample'`, `'simulate'`}, optional The error method employed, corresponding to :meth:`jitter_run`, :meth:`resample_run`, and :meth:`simulate_run`, respectively. Default is `'simulate'`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. return_new : bool, optional Whether to return the realization of the run used to compute the KL divergence. Default is `False`. approx : bool, optional Whether to approximate all sets of uniform order statistics by their associated marginals (from the Beta distribution). Default is `False`. Returns ------- kld : `~numpy.ndarray` with shape (nsamps,) The cumulative KL divergence defined *from* `res` *to* a random realization of `res`. new_res : :class:`~dynesty.results.Results` instance, optional The :class:`~dynesty.results.Results` instance corresponding to the random realization we computed the KL divergence *to*. """ # Define our original importance weights. logp2 = res.logwt - res.logz[-1] # Compute a random realization of our run. if error == 'jitter': new_res = jitter_run(res, rstate=rstate, approx=approx) elif error == 'resample': new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True) logp2 = logp2[samp_idx] # re-order our original results to match elif error == 'simulate': new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True) new_res = jitter_run(new_res) logp2 = logp2[samp_idx] # re-order our original results to match else: raise ValueError("Input `'error'` option '{0}' is not valid." .format(error)) # Define our new importance weights. logp1 = new_res.logwt - new_res.logz[-1] # Compute the KL divergence. kld = np.cumsum(np.exp(logp1) * (logp1 - logp2)) if return_new: return kld, new_res else: return kld
430ad6cb1c25c0489343717d7cf8a44bdfb5725d
4,007
def article_detail(request, slug): """ Show details of the article """ article = get_article_by_slug(slug=slug, annotate=True) comment_form = CommentForm() total_views = r.incr(f'article:{article.id}:views') return render(request, 'articles/post/detail.html', {'article': article, 'section': article.category, 'comment_form': comment_form, 'total_views': total_views})
079d31509fe573ef207600e007e51ac14e9121c4
4,008
def deactivate(userid, tfa_response): """ Deactivate 2FA for a specified user. Turns off 2FA by nulling-out the ``login.twofa_secret`` field for the user record, and clear any remaining recovery codes. Parameters: userid: The user for which 2FA should be disabled. tfa_response: User-supplied response. May be either the Google Authenticator (or other app) supplied code, or a recovery code. Returns: Boolean True if 2FA was successfully disabled, otherwise Boolean False if the verification of `tfa_response` failed (bad challenge-response or invalid recovery code). """ # Sanity checking for length requirement of recovery code/TOTP is performed in verify() function if verify(userid, tfa_response): # Verification passed, so disable 2FA force_deactivate(userid) return True else: return False
1ac0f5e716675ccb118c9656bcc7c9b4bd3f9606
4,009
def hsi_normalize(data, max_=4096, min_ = 0, denormalize=False): """ Using this custom normalizer for RGB and HSI images. Normalizing to -1to1. It also denormalizes, with denormalize = True) """ HSI_MAX = max_ HSI_MIN = min_ NEW_MAX = 1 NEW_MIN = -1 if(denormalize): scaled = (data - NEW_MIN) * (HSI_MAX - HSI_MIN)/(NEW_MAX - NEW_MIN) + HSI_MIN return scaled.astype(np.float32) scaled = (data - HSI_MIN) * (NEW_MAX - NEW_MIN)/(HSI_MAX - HSI_MIN) + NEW_MIN return scaled.astype(np.float32)
7f276e90843c81bc3cf7715e54dadd4a78162f93
4,010
from typing import Optional def safe_elem_text(elem: Optional[ET.Element]) -> str: """Return the stripped text of an element if available. If not available, return the empty string""" text = getattr(elem, "text", "") return text.strip()
12c3fe0c96ffdb5578e485b064ee4df088192114
4,011
def resource(filename): """Returns the URL a static resource, including versioning.""" return "/static/{0}/{1}".format(app.config["VERSION"], filename)
b330c052180cfd3d1b622c18cec7e633fa7a7910
4,012
import csv def read_q_stats(csv_path): """Return list of Q stats from file""" q_list = [] with open(csv_path, newline='') as csv_file: reader = csv.DictReader(csv_file) for row in reader: q_list.append(float(row['q'])) return q_list
f5bee4859dc4bac45c4c3e8033da1b4aba5d2818
4,014
def _validate(config): """Validate the configuation. """ diff = set(REQUIRED_CONFIG_KEYS) - set(config.keys()) if len(diff) > 0: raise ValueError( "config is missing required keys".format(diff)) elif config['state_initial']['status'] not in config['status_values']: raise ValueError( "initial status '{}' is not among the allowed status values" .format(config['state_initial']['status'])) else: return config
07c92e5a5cc722efbbdc684780b1edb66aea2532
4,015
def exp_by_squaring(x, n): """ Compute x**n using exponentiation by squaring. """ if n == 0: return 1 if n == 1: return x if n % 2 == 0: return exp_by_squaring(x * x, n // 2) return exp_by_squaring(x * x, (n - 1) // 2) * x
ef63d2bf6f42690fd7c5975af0e961e2a3c6172f
4,016
def _compare(expected, actual): """ Compare SslParams object with dictionary """ if expected is None and actual is None: return True if isinstance(expected, dict) and isinstance(actual, SslParams): return expected == actual.__dict__ return False
4a82d1631b97960ecc44028df4c3e43dc664d3e5
4,017
def update_token(refresh_token, user_id): """ Refresh the tokens for a given user :param: refresh_token Refresh token of the user :param: user_id ID of the user for whom the token is to be generated :returns: Generated JWT token """ token = Token.query.filter_by(refresh_token=refresh_token).first() token.access_token = Token.encode_token(user_id, "access").decode("utf-8") token.refresh_token = Token.encode_token(user_id, "refresh").decode( "utf-8" ) db.session.commit() return token
0b91cf19f808067a9c09b33d7497548743debe14
4,018
from re import X def minimax(board): """ Returns the optimal action for the current player on the board. """ def max_value(state, depth=0): if ttt.terminal(state): return (None, ttt.utility(state)) v = (None, -2) for action in ttt.actions(state): v = max(v, (action, min_value(ttt.result(state, action), depth+1)[1] - (depth/10)), key=lambda x: x[1]) return v def min_value(state, depth=0): if ttt.terminal(state): return (None, ttt.utility(state)) v = (None, 2) for action in ttt.actions(state): v = min(v, (action, max_value(ttt.result(state, action), depth+1)[1] + (depth/10)), key=lambda x: x[1]) return v if ttt.player(board) == X: return max_value(board)[0] elif ttt.player(board) == O: return min_value(board)[0]
8de42db3ad40d597bf9600bcd5fec7c7f775f84d
4,019
import random def random_order_dic_keys_into_list(in_dic): """ Read in dictionary keys, and return random order list of IDs. """ id_list = [] for key in in_dic: id_list.append(key) random.shuffle(id_list) return id_list
d18ac34f983fbaff59bfd90304cd8a4a5ebad42e
4,020
from typing import Union from pathlib import Path from typing import Dict import json def read_json(json_path: Union[str, Path]) -> Dict: """ Read json file from a path. Args: json_path: File path to a json file. Returns: Python dictionary """ with open(json_path, "r") as fp: data = json.load(fp) return data
c0b55e5363a134282977ee8a01083490e9908fcf
4,021
def igraph_to_csc(g, save=False, fn="csc_matlab"): """ Convert an igraph to scipy.sparse.csc.csc_matrix Positional arguments: ===================== g - the igraph graph Optional arguments: =================== save - save file to disk fn - the file name to be used when writing (appendmat = True by default) """ assert isinstance(g, igraph.Graph), "Arg1 'g' must be an igraph graph" print "Creating CSC from igraph object ..." gs = csc_matrix(g.get_adjacency().data) # Equiv of calling to_dense so may case MemError print "CSC creation complete ..." if save: print "Saving to MAT file ..." sio.savemat(fn, {"data":gs}, True) # save as MAT format only. No other options! return gs
12ea73531599cc03525e898e39b88f2ed0ad97c3
4,022
def xml2dict(data): """Turn XML into a dictionary.""" converter = XML2Dict() if hasattr(data, 'read'): # Then it's a file. data = data.read() return converter.fromstring(data)
0c73989b4ea83b2b1c126b7f1b39c6ebc9e18115
4,023
def balance_dataset(data, size=60000): """Implements upsampling and downsampling for the three classes (low, medium, and high) Parameters ---------- data : pandas DataFrame A dataframe containing the labels indicating the different nightlight intensity bins size : int The number of samples per classes for upsampling and downsampling Returns ------- pandas DataFrame The data with relabelled and balanced nightlight intensity classes """ bin_labels = data.label.unique() classes = [] for label in bin_labels: class_ = data[data.label == label].reset_index() if len(class_) >= size: sample = class_.sample( n=size, replace=False, random_state=SEED ) elif len(class_) < size: sample = class_.sample( n=size, replace=True, random_state=SEED ) classes.append(sample) data_balanced = pd.concat(classes) data_balanced = data_balanced.sample( frac=1, random_state=SEED ).reset_index(drop=True) data_balanced = data_balanced.iloc[:, 1:] return data_balanced
93cd5888c28f9e208379d7745790b7e1e0cb5b79
4,024
def updateStopList(userId, newStop): """ Updates the list of stops for the user in the dynamodb table """ response = dynamodb_table.query( KeyConditionExpression=Key('userId').eq(userId)) if response and len(response["Items"]) > 0: stops = response["Items"][0]['stops'] else: stops = {} if newStop['code'] in stops: existingStop = stops[newStop['code']] if 'buses' in existingStop: newStop['buses'] = list( set(existingStop['buses'] + newStop['buses'])) stops[newStop['code']] = newStop response = dynamodb_table.update_item( Key={ 'userId': userId }, UpdateExpression="set stops = :s", ExpressionAttributeValues={ ':s': stops } ) card_title = render_template('card_title') responseText = render_template( "add_bus_success", stop=newStop['code'], route=",".join(newStop['buses'])) return statement(responseText).simple_card(card_title, responseText)
433ae6c4562f6a6541fc262925ce0bba6fb742ec
4,025
import re def is_blacklisted_module(module: str) -> bool: """Return `True` if the given module matches a blacklisted pattern.""" # Exclude stdlib modules such as the built-in "_thread" if is_stdlib_module(module): return False # Allow user specified exclusions via CLI blacklist = set.union(MODULE_BLACKLIST_PATTERNS, config.excluded_imports) return any(re.fullmatch(p, module) for p in blacklist)
391d63a4d8a4f24d1d3ba355745ffe0079143e68
4,026
def _build_geojson_query(query): """ See usages below. """ # this is basically a translation of the postgis ST_AsGeoJSON example into sqlalchemy/geoalchemy2 return func.json_build_object( "type", "FeatureCollection", "features", func.json_agg(func.ST_AsGeoJSON(query.subquery(), maxdecimaldigits=5).cast(JSON)), )
dd7a0893258cf95e1244458ba9bd74c5239f65c5
4,029
import urllib def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None, separator='&'): """URL encode a dict/`MultiDict`. If a value is `None` it will not appear in the result string. Per default only values are encoded into the target charset strings. If `encode_keys` is set to ``True`` unicode keys are supported too. If `sort` is set to `True` the items are sorted by `key` or the default sorting algorithm. .. versionadded:: 0.5 `sort`, `key`, and `separator` were added. :param obj: the object to encode into a query string. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation. """ if isinstance(obj, MultiDict): items = obj.lists() elif isinstance(obj, dict): items = [] for k, v in obj.iteritems(): if not isinstance(v, (tuple, list)): v = [v] items.append((k, v)) else: items = obj or () if sort: items.sort(key=key) tmp = [] for key, values in items: if encode_keys and isinstance(key, unicode): key = key.encode(charset) else: key = str(key) for value in values: if value is None: continue elif isinstance(value, unicode): value = value.encode(charset) else: value = str(value) tmp.append('%s=%s' % (urllib.quote(key), urllib.quote_plus(value))) return separator.join(tmp)
2106032d6a4cf895c525e9db702655af3439a95c
4,030
from datetime import datetime def create_export_and_wait_for_completion(name, bucket, prefix, encryption_config, role_arn=None): """ Request QLDB to export the contents of the journal for the given time period and S3 configuration. Before calling this function the S3 bucket should be created, see :py:class:`pyqldbsamples.export_journal.create_s3_bucket_if_not_exists` :type name: str :param name: Name of the ledger to create a journal export for. :type bucket: str :param bucket: S3 bucket to write the data to. :type prefix: str :param prefix: S3 prefix to be prefixed to the files being written. :type encryption_config: dict :param encryption_config: Encryption configuration for S3. :type role_arn: str :param role_arn: The IAM role ARN to be used when exporting the journal. :rtype: dict :return: The result of the request. """ if role_arn is None: role_arn = create_export_role(EXPORT_ROLE_NAME, encryption_config.get('KmsKeyArn'), ROLE_POLICY_NAME, bucket) try: start_time = datetime.utcnow() - timedelta(minutes=JOURNAL_EXPORT_TIME_WINDOW_MINUTES) end_time = datetime.utcnow() result = create_export(name, start_time, end_time, bucket, prefix, encryption_config, role_arn) wait_for_export_to_complete(Constants.LEDGER_NAME, result.get('ExportId')) logger.info('JournalS3Export for exportId {} is completed.'.format(result.get('ExportId'))) return result except Exception as e: logger.exception('Unable to create an export!') raise e
9fb6f66dc02d70ffafe1c388188b99b9695a6900
4,031
def sample_student(user, **kwargs): """create and return sample student""" return models.Student.objects.create(user=user, **kwargs)
a70c3a181b1ee0627465f016953952e082a51c27
4,032
from datetime import datetime def normalise_field_value(value): """ Converts a field value to a common type/format to make comparable to another. """ if isinstance(value, datetime): return make_timezone_naive(value) elif isinstance(value, Decimal): return decimal_to_string(value) return value
3cbc4c4d7ae027c030e70a1a2bd268bdd0ebe556
4,033
from typing import Any from typing import Tuple from typing import List import collections import yaml def parse_yaml(stream: Any) -> Tuple[Swagger, List[str]]: """ Parse the Swagger specification from the given text. :param stream: YAML representation of the Swagger spec satisfying file interface :return: (parsed Swagger specification, parsing errors if any) """ # adapted from https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts # and https://stackoverflow.com/questions/13319067/parsing-yaml-return-with-line-number object_pairs_hook = collections.OrderedDict class OrderedLoader(yaml.SafeLoader): def compose_node(self, parent, index): # the line number where the previous token has ended (plus empty lines) node = Composer.compose_node(self, parent, index) node.__lineno__ = self.line + 1 return node def construct_mapping(loader, node, deep=False): loader.flatten_mapping(node) mapping = Constructor.construct_pairs(loader, node, deep=deep) ordered_hook = object_pairs_hook(mapping) # assert not hasattr(ordered_hook, "__lineno__"), \ # "Expected ordered mapping to have no __lineno__ attribute set before" # setattr(ordered_hook, "__lineno__", node.__lineno__) return RawDict(adict=ordered_hook, source=stream.name, lineno=node.__lineno__) OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) raw_dict = yaml.load(stream, OrderedLoader) swagger = Swagger() errors = [] # type: List[str] adict = raw_dict.adict tag_exists: bool = False if 'tags' in adict: if len(adict['tags']) > 0: tag_exists = True for tag in adict['tags']: for key, value in tag.adict.items(): if key == 'name': swagger.name = value if swagger.name == '': if not (OptionKey.PermitAbsenseOfTagNameIfNoTagsExist in parse_options and not tag_exists): errors.append('missing tag "name" in the swagger specification') swagger.base_path = adict.get('basePath', '') for path_id, path_dict in adict.get('paths', RawDict()).adict.items(): path, path_errors = _parse_path(raw_dict=path_dict) path.identifier = path_id path.swagger = swagger errors.extend(['in path {!r}: {}'.format(path_id, error) for error in path_errors]) if not path_errors: swagger.paths[path_id] = path for def_id, def_dict in adict.get('definitions', RawDict()).adict.items(): typedef, def_errors = _parse_typedef(raw_dict=def_dict) errors.extend(['in definition {!r}: {}'.format(def_id, error) for error in def_errors]) adef = Definition() adef.swagger = swagger adef.identifier = def_id adef.typedef = typedef if not def_errors: swagger.definitions[def_id] = adef for param_id, param_dict in adict.get('parameters', RawDict()).adict.items(): param, param_errors = _parse_parameter(raw_dict=param_dict) errors.extend(['in parameter {!r}: {}'.format(param_id, error) for error in param_errors]) if not param_errors: swagger.parameters[param_id] = param swagger.raw_dict = raw_dict return swagger, errors
7155520db98bf1d884ba46f22b46297a901f4411
4,035
import itertools def dataset_first_n(dataset, n, show_classes=False, class_labels=None, **kw): """ Plots first n images of a dataset containing tensor images. """ # [(img0, cls0), ..., # (imgN, clsN)] first_n = list(itertools.islice(dataset, n)) # Split (image, class) tuples first_n_images, first_n_classes = zip(*first_n) if show_classes: titles = first_n_classes if class_labels: titles = [class_labels[cls] for cls in first_n_classes] else: titles = [] return tensors_as_images(first_n_images, titles=titles, **kw)
ed8394fc2a1b607597599f36545c9182a9bc8187
4,036
def unit_conversion(thing, units, length=False): """converts base data between metric, imperial, or nautical units""" if 'n/a' == thing: return 'n/a' try: thing = round(thing * CONVERSION[units][0 + length], 2) except TypeError: thing = 'fubar' return thing, CONVERSION[units][2 + length]
96bfb9cda575a8b2efc959b6053284bec1d286a6
4,037
import functools def timed(func): """Decorate function to print elapsed time upon completion.""" @functools.wraps(func) def wrap(*args, **kwargs): t1 = default_timer() result = func(*args, **kwargs) t2 = default_timer() print('func:{} args:[{}, {}] took: {:.4f} sec'.format( func.__name__, args, kwargs, t2 - t1)) return result return wrap
d572488c674607b94e2b80235103d6f0bb27738f
4,038
def fade_out(s, fade=cf.output.fade_out): """ Apply fade-out to waveform time signal. Arguments: ndarray:s -- Audio time series float:fade (cf.output.fade_out) -- Fade-out length in seconds Returns faded waveform. """ length = int(fade * sr) shape = [1] * len(s.shape) shape[0] = length win = np.hanning(length * 2)[length:] win = win.reshape(shape) if length < len(s): s[-length:] = s[-length:] * win return s
0b554dbb1da7253e39c651ccdc38ba91e67a1ee4
4,040
def create_arch(T, D, units=64, alpha=0, dr_rate=.3): """Creates the architecture of miint""" X = K.Input(shape=(T, D)) active_mask = K.Input(shape=(T, 1)) edges = K.Input(shape=(T, None)) ycell = netRNN(T=T, D=D, units=units, alpha=alpha, dr_rate=dr_rate) yrnn = K.layers.RNN(ycell, return_sequences=True) Y = yrnn((X, edges, active_mask)) return K.Model(inputs=[X, active_mask, edges], outputs=Y)
cc9723657a7a0822d73cc78f6e1698b33257f9e0
4,041
def redact(str_to_redact, items_to_redact): """ return str_to_redact with items redacted """ if items_to_redact: for item_to_redact in items_to_redact: str_to_redact = str_to_redact.replace(item_to_redact, '***') return str_to_redact
f86f24d3354780568ec2f2cbf5d32798a43fdb6a
4,042
def FibreDirections(mesh): """ Routine dedicated to compute the fibre direction of components in integration point for the Material in Florence and for the auxiliar routines in this script. First three directions are taken into the code for Rotation matrix, so always it should be present in this order, Normal, Tangential, Axial. """ ndim = mesh.InferSpatialDimension() nfibre = 2 # Geometric definitions per element divider = mesh.elements.shape[1] directrix = [0.,1.,0.] fibre_direction = np.zeros((mesh.nelem,nfibre,ndim),dtype=np.float64) # Loop throught the element in the mesh for elem in range(mesh.nelem): # Geometric definitions per element center = np.sum(mesh.points[mesh.elements[elem,:],:],axis=0)/divider tangential = np.cross(directrix,center) tangential = tangential/np.linalg.norm(tangential) normal = np.cross(tangential,directrix) # Define the anisotropic orientations fibre_direction[elem,0,:]=np.multiply(directrix,np.cos(np.pi/4.)) + np.multiply(tangential,np.sin(np.pi/4.)) fibre_direction[elem,1,:]=np.multiply(directrix,np.cos(np.pi/4.)) - np.multiply(tangential,np.sin(np.pi/4.)) return fibre_direction
9408702e72dde7586f42137ad25a0a944ed28a93
4,043
def put(consul_url=None, token=None, key=None, value=None, **kwargs): """ Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' """ ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error("No Consul URL found.") ret["message"] = "No Consul URL found." ret["res"] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') # Invalid to specified these together conflicting_args = ["cas", "release", "acquire"] for _l1 in conflicting_args: for _l2 in conflicting_args: if _l1 in kwargs and _l2 in kwargs and _l1 != _l2: raise SaltInvocationError( "Using arguments `{}` and `{}` together is invalid.".format( _l1, _l2 ) ) query_params = {} available_sessions = session_list(consul_url=consul_url, return_list=True) _current = get(consul_url=consul_url, token=token, key=key) if "flags" in kwargs: if kwargs["flags"] >= 0 and kwargs["flags"] <= 2 ** 64: query_params["flags"] = kwargs["flags"] if "cas" in kwargs: if _current["res"]: if kwargs["cas"] == 0: ret["message"] = "Key {} exists, index must be non-zero.".format(key) ret["res"] = False return ret if kwargs["cas"] != _current["data"]["ModifyIndex"]: ret["message"] = "Key {} exists, but indexes do not match.".format(key) ret["res"] = False return ret query_params["cas"] = kwargs["cas"] else: ret[ "message" ] = "Key {} does not exists, CAS argument can not be used.".format(key) ret["res"] = False return ret if "acquire" in kwargs: if kwargs["acquire"] not in available_sessions: ret["message"] = "{} is not a valid session.".format(kwargs["acquire"]) ret["res"] = False return ret query_params["acquire"] = kwargs["acquire"] if "release" in kwargs: if _current["res"]: if "Session" in _current["data"]: if _current["data"]["Session"] == kwargs["release"]: query_params["release"] = kwargs["release"] else: ret["message"] = "{} locked by another session.".format(key) ret["res"] = False return ret else: ret["message"] = "{} is not a valid session.".format(kwargs["acquire"]) ret["res"] = False else: log.error("Key {0} does not exist. Skipping release.") data = value function = "kv/{}".format(key) method = "PUT" res = _query( consul_url=consul_url, token=token, function=function, method=method, data=data, query_params=query_params, ) if res["res"]: ret["res"] = True ret["data"] = "Added key {} with value {}.".format(key, value) else: ret["res"] = False ret["data"] = "Unable to add key {} with value {}.".format(key, value) if "error" in res: ret["error"] = res["error"] return ret
3b80283da426e5515026fb8dc0db619b2a471f41
4,044
def prepare_w16(): """ Prepare a 16-qubit W state using sqrt(iswaps) and local gates, respecting linear topology """ ket = qf.zero_state(16) circ = w16_circuit() ket = circ.run(ket) return ket
74d0599e1520aab44088480616e2062153a789aa
4,045
import requests def get_All_Endpoints(config): """ :return: """ url = 'https://{}:9060/ers/config/endpoint'.format(config['hostname']) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json' } body = {} response = requests.request('GET', url, headers=headers, data=body, auth=HTTPBasicAuth('Admin', 'C1sco12345'), verify=False) result = response.json() return result['SearchResult']['resources']
f74ad8ba7d65de11851b71318b2818776c5dc29b
4,046
import pandas import numpy def synthetic_peptides_by_subsequence( num_peptides, fraction_binders=0.5, lengths=range(8, 20), binding_subsequences=["A?????Q"]): """ Generate a toy dataset where each peptide is a binder if and only if it has one of the specified subsequences. Parameters ---------- num_peptides : int Number of rows in result fraction_binders : float Fraction of rows in result where "binder" col is 1 lengths : dict, Series, or list If a dict or Series, then this should map lengths to the fraction of the result to have the given peptide length. If it's a list of lengths then all lengths are given equal weight. binding_subsequences : list of string Peptides with any of the given subsequences will be considered binders. Question marks ("?") in these sequences will be replaced by random amino acids. Returns ---------- pandas.DataFrame, indexed by peptide sequence. The "binder" column is a binary indicator for whether the peptide is a binder. """ if not isinstance(lengths, dict): lengths = dict((length, 1.0) for length in lengths) lengths_series = pandas.Series(lengths) lengths_series /= len(lengths) num_binders = int(round(num_peptides * fraction_binders)) num_non_binders = num_peptides - num_binders print(num_binders, num_non_binders) peptides = [] # Generate non-binders for (length, weight) in lengths_series.iteritems(): peptides.extend( random_peptides(round(weight * num_non_binders), round(length))) for binding_core in binding_subsequences: # Generate binders lengths_binders = lengths_series.ix[ lengths_series.index >= len(binding_core) ] normalized_lengths_binders = ( lengths_binders / lengths_binders.sum() / len(binding_subsequences)) for (length, weight) in normalized_lengths_binders.iteritems(): if length >= len(binding_core): num_peptides_to_make = int(round(weight * num_binders)) if length == len(binding_core): start_positions = [0] * num_peptides_to_make else: start_positions = numpy.random.choice( length - len(binding_core), num_peptides_to_make) peptides.extend( "".join([ random_peptides(1, length=start_position)[0], binding_core, random_peptides(1, length=length - len( binding_core) - start_position)[0], ]) for start_position in start_positions) df = pandas.DataFrame(index=set(peptides)) df["binder"] = False for binding_core in binding_subsequences: df["binder"] = df["binder"] | df.index.str.contains( binding_core, regex=False) def replace_question_marks(s): while "?" in s: s = s.replace("?", numpy.random.choice(AMINO_ACIDS)) return s df.index = df.index.map(replace_question_marks) df_shuffled = df.sample(frac=1) return df_shuffled
d4cfa202043e3a98a7960246a7d8775ff147201c
4,049
def gce_zones() -> list: """Returns the list of GCE zones""" _bcds = dict.fromkeys(['us-east1', 'europe-west1'], ['b', 'c', 'd']) _abcfs = dict.fromkeys(['us-central1'], ['a', 'b', 'c', 'f']) _abcs = dict.fromkeys( [ 'us-east4', 'us-west1', 'europe-west4', 'europe-west3', 'europe-west2', 'asia-east1', 'asia-southeast1', 'asia-northeast1', 'asia-south1', 'australia-southeast1', 'southamerica-east1', 'asia-east2', 'asia-northeast2', 'europe-north1', 'europe-west6', 'northamerica-northeast1', 'us-west2', ], ['a', 'b', 'c'], ) _zones_combo = {**_bcds, **_abcfs, **_abcs} zones = [f'{loc}-{zone}' for loc, zones in _zones_combo.items() for zone in zones] return zones
10e684b2f458fe54699eb9886af148b092ec604d
4,050
def empty_netbox_query(): """Return an empty list to a list query.""" value = { "count": 0, "next": None, "previous": None, "results": [], } return value
9b017c34a3396a82edc269b10b6bfc6b7f878bc3
4,051
import psutil import time def get_process_metrics(proc): """ Extracts CPU times, memory infos and connection infos about a given process started via Popen(). Also obtains the return code. """ p = psutil.Process(proc.pid) max_cpu = [0, 0] max_mem = [0, 0] conns = [] while proc.poll() is None: try: cpu = list(p.cpu_times()) mem = list(p.memory_info()) conns = p.connections('all') for child in p.children(recursive=True): c_cpu = list(child.cpu_times()) c_mem = list(child.memory_info()) cpu[0] += c_cpu[0] cpu[1] += c_cpu[1] mem[0] += c_mem[0] mem[1] += c_mem[1] if max_cpu[0] < cpu[0]: max_cpu = cpu if max_mem[0] < mem[0]: max_mem = mem except (psutil.AccessDenied, psutil.NoSuchProcess): pass time.sleep(1) retcode = proc.wait() return retcode, max_cpu, max_mem, conns
7be8688debbde33bbcfb43b483d8669241e029d6
4,052
def tau_from_T(Tobs, Tkin): """ Line optical depth from observed temperature and excitation temperature in Kelvin """ tau = -np.log(1.-(Tobs/Tkin)) return tau
089cdc9ae3692037fa886b5c168e03ee2b6ec9ce
4,053
def create_classes_names_list(training_set): """ :param training_set: dict(list, list) :return: (list, list) """ learn_classes_list = [] for k, v in training_set.items(): learn_classes_list.extend([str(k)] * len(v)) return learn_classes_list
0b30153afb730d4e0c31e87635c9ece71c530a41
4,054
from petastorm.spark import SparkDatasetConverter def get_petastorm_dataset(cache_dir: str, partitions: int=4): """ This Dataloader assumes that the dataset has been converted to Delta table already The Delta Table Schema is: root |-- sample_id: string (nullable = true) |-- value: string (nullable = true) |-- sample_label: string (nullable = true) |-- filepath: string (nullable = true) |-- filename: string (nullable = true) |-- extension: string (nullable = true) |-- set: string (nullable = true) |-- label: integer (nullable = true) See: TBD to Load and convert the aclImdb dataset from the tf sample dataset lib Args: cache_dir: Cache Directory for Peatstorm partitions: Num Partitions for Petastorm partitions need to match num horovod threads / gpus (TO CHECK) Returns: df_train: spark df of training data df_val: spark df of val data size_train: size of the training dataset for use in batch step calcs size_val: size of the val dataset for use in validation batch step calcs """ spark.conf.set(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, cache_dir) train_frame = spark.sql("select value, `label` \ from brian_petastorm_datasets.aclImdb_label \ where `set` = 'train'") df_test = spark.sql("select value, `label` \ from brian_petastorm_datasets.aclImdb_label \ where `set` = 'test'") df_train, df_val = train_frame.randomSplit([0.8,0.2], seed=12345) df_train.repartition(partitions) df_val.repartition(partitions) df_test.repartition(partitions) size_train = df_train.count() size_val = df_val.count() size_test = df_test.count() return df_train, df_val, df_test, size_train, size_val, size_test
27f6777b2c1cebad08f8a416d360a2b7096febec
4,056
def get_corners(square_to_edges, edge_to_squares): """Get squares ids of squares which place in grid in corner.""" return get_squares_with_free_edge(square_to_edges, edge_to_squares, 2)
1370f472aedf83f7aa17b64a813946a3a760968d
4,057
async def get_untagged_joke(): """ Gets an untagged joke from the jokes table and sends returns it :return: json = {joke_id, joke} """ df = jokes.get_untagged_joke() if not df.empty: response = {"joke": df["joke"][0], "joke_id": int(df["id"][0])} else: response = {"joke": "No more jokes to tag", "joke_id": -1} return response
a2dde1d5ddba47beb5e7e51b9f512f0a861336da
4,058
def map_parallel(function, xs): """Apply a remote function to each element of a list.""" if not isinstance(xs, list): raise ValueError('The xs argument must be a list.') if not hasattr(function, 'remote'): raise ValueError('The function argument must be a remote function.') # EXERCISE: Modify the list comprehension below to invoke "function" # remotely on each element of "xs". This should essentially submit # one remote task for each element of the list and then return the # resulting list of ObjectIDs. return [function.remote(x) for x in xs]
1fe75868d5ff12a361a6aebd9e4e49bf92c32126
4,059
def log_interp(x,y,xnew): """ Apply interpolation in logarithmic space for both x and y. Beyound input x range, returns 10^0=1 """ ynew = 10**ius(np.log10(x), np.log10(y), ext=3)(np.log10(xnew)) return ynew
16ef0cc494f61c031f9fd8f8e820a17bb6c83df8
4,060
def inten_sat_compact(args): """ Memory saving version of inten_scale followed by saturation. Useful for multiprocessing. Parameters ---------- im : numpy.ndarray Image of dtype np.uint8. Returns ------- numpy.ndarray Intensity scale and saturation of input. """ return ((inten_scale(args[0]) * saturation(args[0])) ** 2).astype(np.float32)
9624891f9d09c13d107907fcd30e2f102ff00ee2
4,061
def masseuse_memo(A, memo, ind=0): """ Return the max with memo :param A: :param memo: :param ind: :return: """ # Stop if if ind > len(A)-1: return 0 if ind not in memo: memo[ind] = max(masseuse_memo(A, memo, ind + 2) + A[ind], masseuse_memo(A, memo, ind + 1)) return memo[ind]
03d108cb551f297fc4fa53cf9575d03af497ee38
4,062
import torch def unique_pairs(bonded_nbr_list): """ Reduces the bonded neighbor list to only include unique pairs of bonds. For example, if atoms 3 and 5 are bonded, then `bonded_nbr_list` will have items [3, 5] and also [5, 3]. This function will reduce the pairs only to [3, 5] (i.e. only the pair in which the first index is lower). Args: bonded_nbr_list (list): list of arrays of bonded pairs for each molecule. Returns: sorted_pairs (list): same as bonded_nbr_list but without duplicate pairs. """ unique_pairs = [] for pair in bonded_nbr_list: # sort according to the first item in the pair sorted_pair = torch.sort(pair)[0].numpy().tolist() if sorted_pair not in unique_pairs: unique_pairs.append(sorted_pair) # now make sure that the sorting is still good (this may be unnecessary but I added # it just to make sure) idx = list(range(len(unique_pairs))) # first_arg = list of the the first node in each pair first_arg = [pair[0] for pair in unique_pairs] # sorted_idx = sort the indices of unique_pairs by the first node in each pair sorted_idx = [item[-1] for item in sorted(zip(first_arg, idx))] # re-arrange by sorted_idx sorted_pairs = torch.LongTensor(np.array(unique_pairs)[sorted_idx]) return sorted_pairs
e974728ad831a956f1489b83bb77b15833ae9b82
4,063
def permission(*perms: str): """ Decorator that runs the command only if the author has the specified permissions. perms must be a string matching any property of discord.Permissions. NOTE: this function is deprecated. Use the command 'permissions' attribute instead. """ def decorator(func): @wraps(func) async def wrapped(message: discord.Message, *args, **kwargs): member_perms = message.channel.permissions_for(message.author) if all(getattr(member_perms, perm, False) for perm in perms): await func(message, *args, **kwargs) return wrapped return decorator
b0ef0dfec36a243152dff4ca11ab779d2c417ab8
4,064
from re import T def validate_script(value): """Check if value is a valid script""" if not sabnzbd.__INITIALIZED__ or (value and sabnzbd.filesystem.is_valid_script(value)): return None, value elif (value and value == "None") or not value: return None, "None" return T("%s is not a valid script") % value, None
d4a5d6922fb14524bc9d11f57807d9a7f0e937f1
4,065