content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def render_curve(name, data, x_range=None, y_range=None, x_label=None, y_label=None, legends=None, legend_kwargs={}, img_height=None, img_width=None, dpi=300, figsize=(2, 2), **kwargs): """Plot 1D curves. Args: name (stor): rendering identifier data (Tensor|np.ndarray): a rank-1 or rank-2 tensor/np.array. If rank-2, then each row represents an individual curve. x_range (tuple[float]): min/max for x values. If None, ``x`` is the index sequence of curve points. If provided, ``x`` is evenly spaced by ``(x_range[1] - x_range[0]) / (N - 1)``. y_range (tuple[float]): a tuple of ``(min_y, max_y)`` for showing on the figure. If None, then it will be decided according to the ``y`` values. Note that this range won't change ``y`` data; it's only used by matplotlib for drawing ``y`` limits. x_label (str): shown besides x-axis y_label (str): shown besides y-axis legends (list[str]): label for each curve. No legends are shown if None. legend_kwargs (dict): optional legend kwargs img_height (int): height of the output image img_width (int): width of the output image dpi (int): resolution of each rendered image figsize (tuple[int]): figure size. For the relationship between ``dpi`` and ``figsize``, please refer to `this post <https://stackoverflow.com/questions/47633546/relationship-between-dpi-and-figure-size>`_. **kwargs: all other arguments to ``ax.plot()``. Returns: Image: an output image rendered for the tensor """ assert len(data.shape) in (1, 2), "Must be rank-1 or rank-2!" if not isinstance(data, np.ndarray): array = data.cpu().numpy() else: array = data if len(array.shape) == 1: array = np.expand_dims(array, 0) fig, ax = plt.subplots(figsize=figsize) M, N = array.shape x = range(N) if x_range is not None: delta = (x_range[1] - x_range[0]) / float(N - 1) x = delta * x + x_range[0] for i in range(M): ax.plot(x, array[i], **kwargs) if legends is not None: ax.legend(legends, loc="best", **legend_kwargs) if y_range: ax.set_ylim(y_range) if x_label: ax.set_xlabel(x_label) if y_label: ax.set_ylabel(y_label) return _convert_to_image(name, fig, dpi, img_height, img_width)
f0f60bf64c195f82ec91513f2c79a7c72a25599d
3,658,600
def CreateBooleanUnion1(breps, tolerance, manifoldOnly, multiple=False): """ Compute the Boolean Union of a set of Breps. Args: breps (IEnumerable<Brep>): Breps to union. tolerance (double): Tolerance to use for union operation. manifoldOnly (bool): If true, non-manifold input breps are ignored. Returns: Brep[]: An array of Brep results or None on failure. """ url = "rhino/geometry/brep/createbooleanunion-breparray_double_bool" if multiple: url += "?multiple=true" args = [breps, tolerance, manifoldOnly] if multiple: args = list(zip(breps, tolerance, manifoldOnly)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
ae397d73b9acbcdd52e9e83592322274047d9915
3,658,601
def make_singleton_class(class_reference, *args, **kwargs): """ Make the given class a singleton class. *class_reference* is a reference to a class type, not an instance of a class. *args* and *kwargs* are parameters used to instantiate a singleton instance. To use this, suppose we have a class called ``DummyClass`` and later instantiate a variable ``dummy_instnace`` as an instance of class ``DummyClass``. ``class_reference`` will be ``DummyClass``, not ``dummy_instance``. Note that this method is not for direct use. Always use ``@singleton`` or ``@singleton_with``. """ # Name of the attribute that store the singleton instance singleton_attr_name = '_singleton_instance' # The statice method to get the singleton instance of the reference class @staticmethod def instance(): """ Get a singleton instance. .. note:: This class is capable to act as a singleton class by invoking this method. """ return class_reference._singleton_instance # Intercept if the class has already been a singleton class. if singleton_attr_name in dir(class_reference): raise SingletonInitializationException( 'The attribute _singleton_instance is already assigned as instance of %s.'\ % type(class_reference._singleton_instance) ) # Instantiate an instance for a singleton class. class_reference._singleton_instance = class_reference(*args, **kwargs) class_reference.instance = instance return class_reference
c33b09f2eee16e23dd1a10a914a8735120efbbfe
3,658,602
def get_coaches(soup): """ scrape head coaches :param soup: html :return: dict of coaches for game """ coaches = soup.find_all('tr', {'id': "HeadCoaches"}) # If it picks up nothing just return the empty list if not coaches: return coaches coaches = coaches[0].find_all('td') return { 'Away': coaches[1].get_text(), 'Home': coaches[3].get_text() }
784b355adb885b0eb4f26e72168475e1abbe4d1f
3,658,603
import logging def create_app(config_name): """ Factory to create Flask application context using config option found in app.config :param config_name: (string) name of the chosen config option :return app: (Flask application context) """ logging.basicConfig( filename="app.log", filemode="w", format="%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, ) app = Flask(__name__) app.config.from_object(config[config_name]) logging.info("App initialized.") register_extensions(app) register_blueprints(app) configure_database(app) return app
8dea98c2393b575c7c353debe4b84eea67ff9353
3,658,604
import math def _rectify_countdown_or_bool(count_or_bool): """ used by recrusive functions to specify which level to turn a bool on in counting down yeilds True, True, ..., False conting up yeilds False, False, False, ... True Args: count_or_bool (bool or int): if positive will count down, if negative will count up, if bool will remain same Returns: int or bool: count_or_bool_ CommandLine: python -m utool.util_str --test-_rectify_countdown_or_bool Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import _rectify_countdown_or_bool # NOQA >>> count_or_bool = True >>> a1 = (_rectify_countdown_or_bool(2)) >>> a2 = (_rectify_countdown_or_bool(1)) >>> a3 = (_rectify_countdown_or_bool(0)) >>> a4 = (_rectify_countdown_or_bool(-1)) >>> a5 = (_rectify_countdown_or_bool(-2)) >>> a6 = (_rectify_countdown_or_bool(True)) >>> a7 = (_rectify_countdown_or_bool(False)) >>> result = [a1, a2, a3, a4, a5, a6, a7] >>> print(result) [1.0, 0.0, 0, 0.0, -1.0, True, False] [1.0, True, False, False, -1.0, True, False] """ if count_or_bool is True or count_or_bool is False: count_or_bool_ = count_or_bool elif isinstance(count_or_bool, int): if count_or_bool == 0: return 0 sign_ = math.copysign(1, count_or_bool) count_or_bool_ = int(count_or_bool - sign_) #if count_or_bool_ == 0: # return sign_ == 1 else: count_or_bool_ = False return count_or_bool_
63d02cfbd99652bc04cfbac57a7d9306465bbf2b
3,658,605
def POpen (inUV, access, err): """ Open an image persistent (disk) form inUV = Python UV object access = access 1=READONLY, 2=WRITEONLY, 3=READWRITE err = Python Obit Error/message stack """ ################################################################ if ('myClass' in inUV.__dict__) and (inUV.myClass=='AIPSUVData'): raise TypeError("Function unavailable for "+inUV.myClass) return inUV.Open(access, err) # end POpen
f365a9d5a4fc8a028203e8ea4a51b64d6d19f9bc
3,658,606
def geopad(lon, lat, data, /, nlon=1, nlat=0): """ Return array padded circularly along longitude and over the poles for finite difference methods. """ # Pad over longitude seams if nlon > 0: pad = ((nlon, nlon),) + (data.ndim - 1) * ((0, 0),) data = np.pad(data, pad, mode='wrap') lon = np.pad(lon, nlon, mode='wrap') # should be vector # Pad over poles if nlat > 0: if (data.shape[0] % 2) == 1: raise ValueError( 'Data must have even number of longitudes ' 'if you wish to pad over the poles.' ) append = np.roll( # descending in lat np.flip(data, axis=1), data.shape[0] // 2, axis=0 ) data = np.concatenate( ( append[:, -nlat:, ...], # -87.5, -88.5, -89.5 (crossover) data, # -89.5, -88.5, -87.5, ..., 87.5, 88.5, 89.5 (crossover) append[:, :nlat, ...], # 89.5, 88.5, 87.5 ), axis=1, ) lat = np.pad(lat, nlat, mode='symmetric') lat[:nlat] = 180 - lat[:nlat] # monotonic ascent lat[-nlat:] = 180 - lat[-nlat:] return lon, lat, data
8916dde690673b1d278ffab39ee3350f346a4182
3,658,607
def SL_EAKF(N,loc_rad,taper='GC',ordr='rand',infl=1.0,rot=False,**kwargs): """ Serial, covariance-localized EAKF. Ref: Karspeck, Alicia R., and Jeffrey L. Anderson. (2007): "Experimental implementation of an ensemble adjustment filter..." Used without localization, this should be equivalent (full ensemble equality) to the EnKF 'Serial'. """ def assimilator(stats,twin,xx,yy): f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0 N1 = N-1 R = h.noise Rm12 = h.noise.C.sym_sqrt_inv E = X0.sample(N) stats.assess(0,E=E) for k,kObs,t,dt in progbar(chrono.forecast_range): E = f(E,t-dt,dt) E = add_noise(E, dt, f.noise, kwargs) if kObs is not None: stats.assess(k,kObs,'f',E=E) y = yy[kObs] inds = serial_inds(ordr, y, R, anom(E)[0]) locf_at = h.loc_f(loc_rad, 'y2x', t, taper) for i,j in enumerate(inds): hE = h(E,t) hx = mean(hE,0) Y = hE - hx mu = mean(E ,0) A = E-mu # Update j-th component of observed ensemble Yj = Rm12[j,:] @ Y.T dyj = Rm12[j,:] @ (y - hx) # skk = Yj@Yj # N1 * prior var su = 1/( 1/skk + 1/N1 ) # N1 * KG alpha = (N1/(N1+skk))**(0.5) # update contraction factor # dy2 = su*dyj/N1 # mean update Y2 = alpha*Yj # anomaly update if skk<1e-9: continue # Update state (regress update from observation space) # Localized local, coeffs = locf_at(j) if len(local) == 0: continue Regression = (A[:,local]*coeffs).T @ Yj/np.sum(Yj**2) mu[ local] += Regression*dy2 A[:,local] += np.outer(Y2 - Yj, Regression) # Without localization: #Regression = A.T @ Yj/np.sum(Yj**2) #mu += Regression*dy2 #A += np.outer(Y2 - Yj, Regression) E = mu + A E = post_process(E,infl,rot) stats.assess(k,kObs,E=E) return assimilator
e7ca69f71cf83a4389086d14791902eb5a661b9e
3,658,608
def CalculateNMaxNCharge(mol): """ ################################################################# Most negative charge on N atoms -->QNmin Usage: result=CalculateNMaxNCharge(mol) Input: mol is a molecule object. Output: result is a numeric value. ################################################################# """ return _CalculateElementMaxNCharge(mol,AtomicNum=7)
ae63c3f2c6faa8b0d9f7d6ae3b320a9c3b1002d6
3,658,609
def cnn_5l4(image, **kwargs): """ :param in: (TensorFlow Tensor) Image input placeholder :param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN :return: (TensorFlow Tensor) The CNN output layer """ activ = tf.nn.relu layer_1 = activ(conv(image, 'c1', n_filters=222, filter_size=4, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_2 = activ(conv(layer_1, 'c2', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_3 = activ(conv(layer_2, 'c3', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_4 = activ(conv(layer_3, 'c4', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_5 = activ(conv(layer_4, 'c5', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_lin = conv_to_fc(layer_5) return layer_lin
af059b9a2899c1adcc9f11f4742ffaac8a971dba
3,658,610
def read_dns_data(dns_fn): """ Read data in from a DNS file :param str dns_fn: The filename of the DNS """ fed = open(dns_fn, 'r') begin_data = False dns_data = {} for line in fed.readlines(): if begin_data: if "t = " in line: tc = float(line[3:]) dns_data.update({ tc:{'N':np.empty((0, 3)), 'MP':np.empty((0, 3))} }) else: data = [s.replace(',', '') for s in line.split()] typ = data[0] pos = np.array([float(data[i]) for i in range(2, 5)]) dns_data[tc][typ] = np.vstack([dns_data[tc][typ], pos]) if (line.strip() == "BEGIN DATA"): begin_data = True fed.close() return dns_data
2c73289c6284b47901a8f7c91bce6df75849c822
3,658,611
def arithmetic_mean(iterable): """Zero-length-safe arithmetic mean.""" values = np.asarray(iterable) if not values.size: return 0 return values.mean()
3972885d92654d842a163d64c47b585ad6865c98
3,658,612
def play_process(url): """ Create and return process to read audio from url and send to analog output""" return FfmpegProcess(f'ffmpeg -i {url} -f alsa default')
2246f9385e48dda9398752ecd9fa70914d17c55f
3,658,613
from typing import Iterable def iterable_to_wikitext( items: Iterable[object], *, prefix: str = "\n* " ) -> str: """ Convert iterable to wikitext. Pages are converted to links. All other objects use their string representation. :param items: Items to iterate :param prefix: Prefix for each item when there is more than one item """ if not items: return "" if len(list(items)) == 1: prefix = "" text = "" for item in items: if isinstance(item, BasePage): item = item.title(as_link=True, textlink=True) text += f"{prefix}{item}" return text
775bed839d890ab40aeace76a82f881e076cafa2
3,658,614
def plot_timeSeries(df, col_name, divide=None, xlabel="Days", line=True, title="Time series values", figsize=(9,9)): """ Plot a column of the given time series DataFrame. Parameters ---------- df: pd.DataFrame DataFrame indexed by days (i.e. the index is a pd.DatetimeIndex). col_name: str Indicates the specified column to plot. divide: str Indicates if and how to divide the plotted values. It can either be None, "year", "month" or "season". (The meteorological seasons are considered, and not the astronomical ones). That division is simply made graphically using different colors. xlabel: str Label to put on the x axis. line: bool Indicates whether to connect the points with a line. title: str Title of the plot. figsize: tuple Dimensions of the plot. Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made. """ fig, ax = plt.subplots(figsize=figsize) if not divide: ax.plot(df.index, df[col_name], 'o:' if line else 'o') else: groups = group_days_by(df.index, criterion=divide) color = None for group in groups: if divide=="season": colors = {"Winter":"blue", "Spring":"green", "Summer":"yellow", "Fall":"red"} color = colors[group[0]] elif divide=="month": colors = {"January":"b", "February":"g", "March":"r", "April":"c", "May":"m", "June":"y", "July":"k", "August":"peru", "September":"crimson", "October":"orange", "November":"darkgreen", "December":"olivedrab"} color = colors[group[0]] ax.plot(group[1], df.loc[group[1],col_name], 'o:' if line else 'o', color=color , label=group[0]) ax.set_xlabel(xlabel) ax.set_ylabel(col_name) ax.set_title(title) ax.grid() if divide: ax.legend() return ax
279f74422ae6b186128347cc971a094c13f22c4b
3,658,615
import os def save_bedtools(cluster_regions, clusters, assigned_dir): """ Given cluster regions file saves all bedtools sanely and returns result :param cluster_regions: :return: """ for region in cluster_regions: output_file = "%s.%s.real.BED" % (clusters, region) cluster_regions[region]['real'] = cluster_regions[region]['real'].sort().saveas(os.path.join(assigned_dir, output_file)) if "rand" not in cluster_regions[region]: continue for n_rand in cluster_regions[region]['rand']: output_file = "%s.%s.rand.%s.BED" % (clusters, region, n_rand) cluster_regions[region]['rand'][n_rand] = cluster_regions[region]['rand'][n_rand].sort().saveas(os.path.join(assigned_dir, output_file)) return cluster_regions
5ef6ee5fd56eb3e6c0659b9979defcab6d9acefb
3,658,616
def is_bv(a): """Return `True` if `a` is a Z3 bit-vector expression. >>> b = BitVec('b', 32) >>> is_bv(b) True >>> is_bv(b + 10) True >>> is_bv(Int('x')) False """ return isinstance(a, BitVecRef)
7c1cd1d3d679cdceb12955e61f54861b248ff4a2
3,658,617
def bgsub_1D(raw_data, energy_axis, edge, **kwargs): """ Full background subtraction function for the 1D case- Optional LBA, log fitting, LCPL, and exponential fitting. For more information on non-linear fitting function, see information at https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html Inputs: raw_data - 1D spectrum energy_axis - corresponding energy axis edge - edge parameters defined by KEM convention **kawrgs: fit - choose the type of background fit, default == 'pl' == Power law. Can also use 'exp'== Exponential, 'lin' == Linear, 'lcpl' == LCPL. log - Boolean, if true, log transform data and fit using QR factorization, default == False. nstd - Standard deviation spread of r error from non-linear power law fitting. Default == 100. ftol - default to 0.0005, Relative error desired in the sum of squares. gtol - default to 0.00005, Orthogonality desired between the function vector and the columns of the Jacobian. xtol - default to None, Relative error desired in the approximate solution. maxfev - default to 50000, Only change if you are consistenly catching runtime errors and loosening gtol/ftols are not making a good enough fit. method - default is 'trf', see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares for description of methods Note: may need stricter tolerances on ftol/gtol for noisier data. Anecdotally, a stricter gtol (as low as 1e-8) has a larger effect on the quality of the bgsub. Outputs: bg_1D - background spectrum """ fit_start_ch = eVtoCh(edge[0], energy_axis) fit_end_ch = eVtoCh(edge[1], energy_axis) zdim = len(raw_data) ewin = energy_axis[fit_start_ch:fit_end_ch] esub = energy_axis[fit_start_ch:] bg_1D = np.zeros_like(raw_data) fy = np.zeros((1,zdim)) fy[0,:] = raw_data ## Either fast fitting -> log fitting, Or slow fitting -> non-linear fitting if 'log' in kwargs.keys(): log = kwargs['log'] else: log = False ## Fitting parameters for non-linear curve fitting if non-log based fitting if 'ftol' in kwargs.keys(): ftol = kwargs['ftol'] else: ftol = 1e-8 if 'gtol' in kwargs.keys(): gtol = kwargs['gtol'] else: gtol = 1e-8 if 'xtol' in kwargs.keys(): xtol = kwargs['xtol'] else: xtol = 1e-8 if 'maxfev' in kwargs.keys(): maxfev = kwargs['maxfev'] else: maxfev = 50000 if 'method' in kwargs.keys(): method = kwargs['method'] else: method = 'trf' ## Determine if fitting is power law or exponenetial if 'fit' in kwargs.keys(): fit = kwargs['fit'] if fit == 'exp': fitfunc = exponential bounds = ([0, 0], [np.inf, np.inf]) elif fit == 'pl': fitfunc = powerlaw elif fit == 'lcpl': fitfunc = lcpowerlaw elif fit == 'lin': fitfunc = linear else: print('Did not except fitting function, please use either \'pl\' for powerlaw, \'exp\' for exponential, \'lin\' for linear or \'lcpl\' for LCPL.') else: fitfunc = powerlaw ## If fast fitting linear background, find fit using qr factorization if fitfunc==linear: Blin = fy[:,fit_start_ch:fit_end_ch] Alin = np.zeros((len(ewin),2)) Alin[:,0] = np.ones(len(ewin)) Alin[:,1] = ewin Xlin = qrnorm(Alin,Blin.T) Elin = np.zeros((len(esub),2)) Elin[:,0] = np.ones(len(esub)) Elin[:,1] = esub bgndLINline = np.dot(Xlin.T,Elin.T) bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndLINline ## If fast log fitting and powerlaw, find fit using qr factorization elif log & (fitfunc==powerlaw): Blog = fy[:,fit_start_ch:fit_end_ch] Alog = np.zeros((len(ewin),2)) Alog[:,0] = np.ones(len(ewin)) Alog[:,1] = np.log(ewin) Xlog = qrnorm(Alog,np.log(abs(Blog.T))) Elog = np.zeros((len(esub),2)) Elog[:,0] = np.ones(len(esub)) Elog[:,1] = np.log(esub) bgndPLline = np.exp(np.dot(Xlog.T,Elog.T)) bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndPLline ## If fast log fitting and exponential, find fit using qr factorization elif log & (fitfunc==exponential): Bexp = fy[:,fit_start_ch:fit_end_ch] Aexp = np.zeros((len(ewin),2)) Aexp[:,0] = np.ones(len(ewin)) Aexp[:,1] = ewin Xexp = qrnorm(Aexp,np.log(abs(Bexp.T))) Eexp = np.zeros((len(esub),2)) Eexp[:,0] = np.ones(len(esub)) Eexp[:,1] = esub bgndEXPline = np.exp(np.dot(Xexp.T,Eexp.T)) bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndEXPline ## Power law non-linear curve fitting using scipy.optimize.curve_fit elif ~log & (fitfunc==powerlaw): popt_pl,pcov_pl=curve_fit(powerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0, ftol=ftol, gtol=gtol, xtol=xtol) c,r = popt_pl bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - powerlaw(energy_axis[fit_start_ch:],c,r) ## Exponential non-linear curve fitting using scipy.optimize.curve_fit elif ~log & (fitfunc==exponential): popt_exp,pcov_exp=curve_fit(exponential, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0,p0=[0,0], ftol=ftol, gtol=gtol, xtol=xtol) a,b = popt_exp bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - exponential(energy_axis[fit_start_ch:],a,b) ## LCPL non-linear curve fitting using scipy.optimize.curve_fit elif fitfunc==lcpowerlaw: if 'nstd' in kwargs.keys(): nstd = kwargs['nstd'] else: nstd = 100 popt_pl,pcov_pl=curve_fit(powerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0, ftol=ftol, gtol=gtol, xtol=xtol) c,r = popt_pl perr = np.sqrt(np.diag(pcov_pl)) rstd = perr[1] popt_lcpl,pcov_lcpl=curve_fit(lcpowerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0,p0=[c/2,r-nstd*rstd,c/2,r+nstd*rstd], ftol=ftol, gtol=gtol, xtol=xtol) c1,r1,c2,r2 = popt_lcpl bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - lcpowerlaw(energy_axis[fit_start_ch:],c1,r1,c2,r2) return bg_1D
a3f273e55f49811ce9af4ee5c23d4078fe83535a
3,658,618
import random def about_garble(): """ about_garble Returns one of several strings for the about page """ garble = ["leverage agile frameworks to provide a robust synopsis for high level overviews.", "iterate approaches to corporate strategy and foster collaborative thinking to further the overall value proposition.", "organically grow the holistic world view of disruptive innovation via workplace change management and empowerment.", "bring to the table win-win survival strategies to ensure proactive and progressive competitive domination.", "ensure the end of the day advancement, a new normal that has evolved from epistemic management approaches and is on the runway towards a streamlined cloud solution.", "provide user generated content in real-time will have multiple touchpoints for offshoring."] return garble[random.randint(0, len(garble) - 1)]
c391891f97a7bc6df5287173aa160713cdfff675
3,658,619
def parse_term_5_elems(expr_list, idx): """ Try to parse a terminal node from five elements of {expr_list}, starting from {idx}. Return the new expression list on success, None on error. """ # The only 3 items node is pk_h if expr_list[idx : idx + 2] != [OP_DUP, OP_HASH160]: return if not isinstance(expr_list[idx + 2], bytes): return if len(expr_list[idx + 2]) != 20: return if expr_list[idx + 3 : idx + 5] != [OP_EQUAL, OP_VERIFY]: return node = Node().construct_pk_h(expr_list[idx + 2]) expr_list[idx : idx + 5] = [node] return expr_list
8c0c365483c44a767b3e254f957af175125da2d6
3,658,620
def display_clusters(): """ Method to display the clusters """ offset = int(request.args.get('offset', '0')) limit = int(request.args.get('limit', '50')) clusters_id_sorted = sorted(clusters, key=lambda x : -len(clusters[x])) batches = chunks(range(len(clusters_id_sorted)), size=limit) return render_template('clusters.html', offset=offset, limit=limit, batches=batches, ordered_list=clusters_id_sorted[offset:offset+limit+1], idx_to_path=idx_to_path, clusters=clusters)
e3d578cff54e66ee4b096bcf1e7181a3bac1c845
3,658,621
def densify_sampled_item_predictions(tf_sample_predictions_serial, tf_n_sampled_items, tf_n_users): """ Turns the serial predictions of the sample items in to a dense matrix of shape [ n_users, n_sampled_items ] :param tf_sample_predictions_serial: :param tf_n_sampled_items: :param tf_n_users: :return: """ densified_shape = tf.cast(tf.stack([tf_n_users, tf_n_sampled_items]), tf.int32) densified_predictions = tf.reshape(tf_sample_predictions_serial, shape=densified_shape) return densified_predictions
e1dbe0e74c791e1d9b7613fbe52b034a60376497
3,658,622
def get_market_book(symbols=None, **kwargs): """ Top-level function to obtain Book data for a symbol or list of symbols Parameters ---------- symbols: str or list, default None A symbol or list of symbols kwargs: Additional Request Parameters (see base class) """ return Book(symbols, **kwargs).fetch()
8b1bc8ed07a611cef490f616996aae05ce445ff1
3,658,623
def ndarange(*args, shape: tuple = None, **kwargs): """Generate arange arrays of arbitrary dimensions.""" arr = np.array([np.arange(*args[i], **kwargs) for i in range(len(args))]) return arr.reshape(shape) if shape is not None else arr.T
42a5070e653386a71a9be7949f5e9341bfbc50c9
3,658,624
def runningSum(self, nums): """ :type nums: List[int] :rtype: List[int] 5% faster 100% less memory """ sum = 0 runningSum = [0] * len(nums) for i in range(len(nums)): for j in range(i+1): runningSum[i] += nums[j] return runningSum
393849c4aa1d23b15717748066e21abceaf6d5d9
3,658,625
import warnings def _select_features_1run(df, target, problem_type="regression", verbose=0): """ One feature selection run. Inputs: - df: nxp pandas DataFrame with n data points and p features; to avoid overfitting, only provide data belonging to the n training data points. The variables have to be scaled to have 0 mean and unit variance. - target: n dimensional array with targets corresponding to the data points in df - problem_type: str, either "regression" or "classification" (default: "regression") - verbose: verbosity level (int; default: 0) Returns: - good_cols: list of column names for df with which a prediction model can be trained """ if df.shape[0] <= 1: raise ValueError("n_samples = {}".format(df.shape[0])) # initial selection of too few but (hopefully) relevant features if problem_type == "regression": model = lm.LassoLarsCV(cv=5, eps=1e-8) elif problem_type == "classification": model = lm.LogisticRegressionCV(cv=5, penalty="l1", solver="saga", class_weight="balanced") else: print("[featsel] WARNING: Unknown problem_type %r - not performing feature selection!" % problem_type) return [] with warnings.catch_warnings(): warnings.simplefilter("ignore") # TODO: remove if sklearn least_angle issue is fixed try: model.fit(df, target) except ValueError: # try once more with shuffled data, if it still doesn't work, give up rand_idx = np.random.permutation(df.shape[0]) model.fit(df.iloc[rand_idx], target[rand_idx]) # model.fit(df, target) if problem_type == "regression": coefs = np.abs(model.coef_) else: # model.coefs_ is n_classes x n_features, but we need n_features coefs = np.max(np.abs(model.coef_), axis=0) # weight threshold: select at most 0.2*n_train initial features thr = sorted(coefs, reverse=True)[min(df.shape[1]-1, df.shape[0]//5)] initial_cols = list(df.columns[coefs > thr]) # noise filter initial_cols = _noise_filtering(df[initial_cols].to_numpy(), target, initial_cols, problem_type) good_cols = set(initial_cols) if verbose > 0: print("[featsel]\t %i initial features." % len(initial_cols)) # add noise features X_w_noise = _add_noise_features(df[initial_cols].to_numpy()) # go through all remaining features in splits of n_feat <= 0.5*n_train other_cols = list(np.random.permutation(list(set(df.columns).difference(initial_cols)))) if other_cols: n_splits = int(np.ceil(len(other_cols)/max(10, 0.5*df.shape[0]-len(initial_cols)))) split_size = int(np.ceil(len(other_cols)/n_splits)) for i in range(n_splits): current_cols = other_cols[i*split_size:min(len(other_cols), (i+1)*split_size)] X = np.hstack([df[current_cols].to_numpy(), X_w_noise]) if problem_type == "regression": model = lm.LassoLarsCV(cv=5, eps=1e-8) else: model = lm.LogisticRegressionCV(cv=5, penalty="l1", solver="saga", class_weight="balanced") with warnings.catch_warnings(): warnings.simplefilter("ignore") # TODO: remove if sklearn least_angle issue is fixed try: model.fit(X, target) except ValueError: rand_idx = np.random.permutation(X.shape[0]) model.fit(X[rand_idx], target[rand_idx]) # model.fit(X, target) current_cols.extend(initial_cols) if problem_type == "regression": coefs = np.abs(model.coef_) else: # model.coefs_ is n_classes x n_features, but we need n_features coefs = np.max(np.abs(model.coef_), axis=0) weights = dict(zip(current_cols, coefs[:len(current_cols)])) # only include features that are more important than our known noise features noise_w_thr = np.max(coefs[len(current_cols):]) good_cols.update([c for c in weights if abs(weights[c]) > noise_w_thr]) if verbose > 0: print("[featsel]\t Split %2i/%i: %3i candidate features identified." % (i+1, n_splits, len(good_cols)), end="\r") # noise filtering on the combination of features good_cols = list(good_cols) good_cols = _noise_filtering(df[good_cols].to_numpy(), target, good_cols, problem_type) if verbose > 0: print("\n[featsel]\t Selected %3i features after noise filtering." % len(good_cols)) return good_cols
a85fcc34ad5f49d202e120697960ecaf36a6d0ca
3,658,626
import argparse def main(): """Console script for github_terminal.""" parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group() group.add_argument("-v", "--verbose", action="store_true", help="Show verbose information") group.add_argument("-q", "--quiet", action="store_true", help="Display less information") parser.add_argument( 'category', help='Use the task you want to create like issue, pr, repo ', choices=["issue", "pr", "repo"]) parser.add_argument( 'action', help='Use the action to perform in the category.', choices=["create", "list", "edit", "delete", "close", "status"]) parser.add_argument("-t", "--title", help="Title of issue or PR or name of repository") parser.add_argument("-d", "--description", help="Description of issue or PR or repo.") parser.add_argument("-c", "--config", help="Configuration file to use.") parser.add_argument("-T", "--token", help="Personal access token for github.") parser.add_argument("-u", "--username", help="Username of the user") parser.add_argument("-a", "--assignee", help="Filter by assignee or set assignee") parser.add_argument("-b", "--base", help="Filter by base branch the pull request are being merged to (ONLY FOR PR AND REPO)") parser.add_argument("-A", "--author", help="Filter by or set author") parser.add_argument("-l", "--label", help="Filter or set label separated by comma") parser.add_argument("-L", "--limit", help="Maximum number to fetch") parser.add_argument("-s", "--state", help="Filter by state") parser.add_argument( "-S", "--since", help="List issues that have been updated at or after the given date." " (You can also use value like 2 weeks ago)") parser.add_argument("-r", "--repo", help="Repository to perform action on.") args = parser.parse_args() category_specific_action = handle_category_action(args) category_specific_action(args) return 0
1598085f30a42559dc20415425057edebb797993
3,658,627
def edit_recovery(request, recovery_id): """This view is used to edit/update existing tag recoveries.""" clip_codes = sorted(list(CLIP_CODE_CHOICES), key=lambda x: x[0]) tag_types = sorted(list(TAG_TYPE_CHOICES), key=lambda x: x[0]) tag_origin = sorted(list(TAG_ORIGIN_CHOICES), key=lambda x: x[0]) tag_colours = sorted(list(TAG_COLOUR_CHOICES), key=lambda x: x[0]) tag_position = sorted(list(TAG_POSITION_CHOICES), key=lambda x: x[0]) recovery = get_object_or_404(Recovery, id=recovery_id) report = recovery.report form = RecoveryForm( report_id=report.id, instance=recovery, data=request.POST or None ) if request.method == "POST": if form.is_valid(): recovery = form.save(report) return redirect("tfat:recovery_detail", recovery_id=recovery.id) return render( request, "tfat/recovery_form.html", { "form": form, "action": "edit", "clip_codes": clip_codes, "tag_types": tag_types, "tag_origin": tag_origin, "tag_colours": tag_colours, "tag_position": tag_position, }, )
f9da1a4377efd436e93cf2be0af2c2e09cc3e31d
3,658,628
def e(string, *args): """Function which formats error messages.""" return string.format(*[pformat(arg) for arg in args])
8734d01544211fde3f8ee24f0f91dc06763d4a1f
3,658,629
def membership_ending_task(user): """ :return: Next task that will end the membership of the user """ task = (UserTask.q .filter_by(user_id=user.id, status=TaskStatus.OPEN, type=TaskType.USER_MOVE_OUT) # Casting jsonb -> bool directly is only supported since PG v11 .filter(UserTask.parameters_json['end_membership'].cast(String).cast(Boolean) == True) .order_by(UserTask.due.asc())).first() return task
2043c87eaabbf3360f1bec331a03e1c7db8bc783
3,658,630
import warnings def hmsstr_to_rad(hmsstr): """Convert HH:MM:SS.SS sexigesimal string to radians. """ hmsstr = np.atleast_1d(hmsstr) hours = np.zeros(hmsstr.size) for i,s in enumerate(hmsstr): # parse string using regular expressions match = hms_re.match(s) if match is None: warnings.warn("Input is not a valid sexigesimal string: %s" % s) hours[i] = np.nan continue d = match.groupdict(0) # default value is 0 # Check sign of hms string if d['sign'] == '-': sign = -1 else: sign = 1 hour = float(d['hour']) + \ float(d['min'])/60.0 + \ float(d['sec'])/3600.0 hours[i] = sign*hour return hour_to_rad(hours)
e57266c43e3b0f8893f9c71cfbea609cf7c93709
3,658,631
def find_optimum_transformations(init_trans, s_pts, t_pts, template_spacing, e_func, temp_tree, errfunc): """ Vary the initial transformation by a translation of up to three times the grid spacing and compute the transformation with the smallest least square error. Parameters: ----------- init_trans : 4-D transformation matrix Initial guess of the transformation matrix from the subject brain to the template brain. s_pts : Vertex coordinates in the subject brain. t_pts : Vertex coordinates in the template brain. template_spacing : float Grid spacing of the vertices in the template brain. e_func : str Error function to use. Either 'balltree' or 'euclidian'. temp_tree : BallTree(t_pts) if e_func is 'balltree'. errfunc : The error function for the computation of the least squares error. Returns: -------- poss_trans : list of 4-D transformation matrices List of one transformation matrix for each variation of the intial transformation with the smallest least squares error. """ # template spacing in meters tsm = template_spacing / 1e3 # Try different initial translations in space to avoid local minima # No label should require a translation by more than 3 times the grid spacing (tsm) auto_match_iters = np.array([[0., 0., 0.], [0., 0., tsm], [0., 0., tsm * 2], [0., 0., tsm * 3], [tsm, 0., 0.], [tsm * 2, 0., 0.], [tsm * 3, 0., 0.], [0., tsm, 0.], [0., tsm * 2, 0.], [0., tsm * 3, 0.], [0., 0., -tsm], [0., 0., -tsm * 2], [0., 0., -tsm * 3], [-tsm, 0., 0.], [-tsm * 2, 0., 0.], [-tsm * 3, 0., 0.], [0., -tsm, 0.], [0., -tsm * 2, 0.], [0., -tsm * 3, 0.]]) # possible translation matrices poss_trans = [] for p, ami in enumerate(auto_match_iters): # vary the initial translation value by adding ami tx, ty, tz = init_trans[0, 3] + ami[0], init_trans[1, 3] + ami[1], init_trans[2, 3] + ami[2] sx, sy, sz = init_trans[0, 0], init_trans[1, 1], init_trans[2, 2] rx, ry, rz = 0, 0, 0 # starting point for finding the transformation matrix trans which # minimizes the error between np.dot(s_pts, trans) and t_pts x0 = np.array([tx, ty, tz, rx, ry, rz]) def error(x): tx_, ty_, tz_, rx_, ry_, rz_ = x trans0 = np.zeros([4, 4]) trans0[:3, :3] = rotation3d(rx_, ry_, rz_) * [sx, sy, sz] trans0[0, 3] = tx_ trans0[1, 3] = ty_ trans0[2, 3] = tz_ # rotate and scale estim = np.dot(s_pts, trans0[:3, :3].T) # translate estim += trans0[:3, 3] if e_func == 'balltree': err = errfunc(estim[:, :3], temp_tree) else: # e_func == 'euclidean' err = errfunc(estim[:, :3], t_pts) return err est, _, info, msg, _ = leastsq(error, x0, full_output=True) est = np.concatenate((est, (init_trans[0, 0], init_trans[1, 1], init_trans[2, 2]) )) trans = _trans_from_est(est) poss_trans.append(trans) return poss_trans
bbc4786827c22158eee33ff9a5e4aaa2939b9705
3,658,632
def execute_transaction(query): """Execute Transaction""" return Neo4jHelper.run_single_query(query)
51e8e58bb4cad30b9ae9c7b7d7901ee212c9d26a
3,658,633
from scipy.linalg import null_space from angle_set import create_theta, get_n_linear, perturbe_points def generate_linear_constraints(points, verbose=False): """ Given point coordinates, generate angle constraints. """ N, d = points.shape num_samples = get_n_linear(N) * 2 if verbose: print('N={}, generating {}'.format(N, num_samples)) M = int(N * (N - 1) * (N - 2) / 2) thetas = np.empty((num_samples, M + 1)) for i in range(num_samples): points_pert = perturbe_points(points, magnitude=0.0001) theta, __ = create_theta(points_pert) thetas[i, :-1] = theta thetas[i, -1] = -1 CT = null_space(thetas) A = CT[:-1, :].T b = CT[-1, :] return A, b
b98354cd6b57d7a33c6e8a43da80b358e358138c
3,658,634
def add_node_to_parent(node, parent_node): """ Add given object under the given parent preserving its local transformations :param node: str :param parent_node: str """ return maya.cmds.parent(node, parent_node, add=True, s=True)
1f264b7e30c6ebc2285faa987ffc6142ec62d87f
3,658,635
def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """ def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor
61ccce8b9ffbec3e76aa9e78face469add28437e
3,658,636
import os import logging def get_module_config_filename(): """Returns the path of the module configuration file (e.g. 'app.yaml'). Returns: The path of the module configuration file. Raises: KeyError: The MODULE_YAML_PATH environment variable is not set. """ module_yaml_path = os.environ['MODULE_YAML_PATH'] logging.info('Using module_yaml_path from env: %s', module_yaml_path) return module_yaml_path
b46b7d51817b93dd6b5e026b4e8b52503b2b432a
3,658,637
def Binary(value): """construct an object capable of holding a binary (long) string value.""" return value
2a33d858b23ac2d72e17ea8ede294c5311cb74be
3,658,638
def _get_domain_session(token, domain_name=None): """ Return v3 session for token """ domain_name = domain_name or 'default' auth = v3.Token(auth_url=get_auth_url(), domain_id=domain_name, token=token) return session.Session(auth=auth, user_agent=USER_AGENT, verify=verify_https())
1ad7dcd8a9b6ea12e1a73886581c86252602a438
3,658,639
from typing import Tuple from typing import Any import os import pickle import logging def load_model(name: str, root: str = "") -> Tuple[Model, Any]: """Load the trained model (structure, weights) and vectorizer from files.""" json_file, h5_file, vec_file = ( os.path.join(root, "{}.{}".format(name, ext)) for ext in ("json", "h5", "pkl") ) with open(json_file) as fp: model = model_from_json(fp.read()) # type: Model model.load_weights(h5_file) with open(vec_file, "rb") as bfp: # type: BinaryIO vectorizer = pickle.load(bfp) logging.info("Model loaded from {}".format(root + "/")) return model, vectorizer
d1728b892669b7e17942a5992a71eb27841c82a7
3,658,640
import six def fix_troposphere_references(template): """"Tranverse the troposphere ``template`` looking missing references. Fix them by adding a new parameter for those references.""" def _fix_references(value): if isinstance(value, troposphere.Ref): name = value.data['Ref'] if name not in (list(template.parameters.keys()) + list(template.resources.keys())) and not name.startswith('AWS::'): template.add_parameter( troposphere.Parameter( name, Type=getattr(value, '_type', 'String'), ) ) elif isinstance(value, troposphere.Join): for v in value.data['Fn::Join'][1]: _fix_references(v) elif isinstance(value, troposphere.BaseAWSObject): for _, v in six.iteritems(value.properties): _fix_references(v) for _, resource in six.iteritems(template.resources): for _, value in six.iteritems(resource.properties): _fix_references(value) return template
9570e10262d7293a79b76f78508e57289d9b1e2d
3,658,641
import configparser def parse_config_to_dict(cfg_file, section): """ Reads config file and returns a dict of parameters. Args: cfg_file: <String> path to the configuration ini-file section: <String> section of the configuration file to read Returns: cfg: <dict> configuration parameters of 'section' as a dict """ cfg = configparser.ConfigParser() cfg.read(cfg_file) if cfg.has_section(section): return dict(cfg.items(section)) else: print("Section '%s' not found in file %s!" % (section, cfg_file)) return None
021e3594f3130e502934379c0f5c1ecea228017b
3,658,642
def cnn_net(data, dict_dim, emb_dim=128, hid_dim=128, hid_dim2=96, class_dim=2, win_size=3): """ Conv net """ # embedding layer emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) # convolution layer conv_3 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, filter_size=win_size, act="tanh", pool_type="max") # full connect layer fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2) # softmax layer prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act="softmax") return prediction, fc_1
47127d5124f48b2be187d15291c2f2bc63f072d7
3,658,643
def get_commands(servo): """Get specific flash commands for the build target. Each board needs specific commands including the voltage for Vref, to turn on and turn off the SPI flash. The get_*_commands() functions provide a board-specific set of commands for these tasks. The voltage for this board needs to be set to 1.8 V. Args: servo (servo_lib.Servo): The servo connected to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"], ["cmd2", "arg3", "arg4"]] where cmd1 will be run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via futility """ dut_control_on = [] dut_control_off = [] # TODO: Add the supported servo cases and their commands. if servo: programmer = '' else: raise Exception('%s not supported' % servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]
727101bd4cf87dba14f02639a44f05e2047b5da2
3,658,644
def FormatRow(Cn, Row, COLSP): """ """ fRow = "" for i, c in enumerate(Row): sc = str(c) lcn = len(Cn[i]) sc = sc[ 0 : min(len(sc), lcn+COLSP-2) ] fRow += sc + " "*(COLSP+lcn-len(sc)) return fRow
53d43fc897d1db5ed3c47d6046d90548939b1298
3,658,645
def handle_release(pin, evt): """ Clears the last tone/light when a button is released. """ if pin > 4: return False pin -= 1 explorerhat.light[pin].off() tone.power_off()
f4833bb289c9dfc45cd572ad754bd270c758ed09
3,658,646
from typing import List def makeRoute(start : str, end : str) -> List[str]: """Find the shortest route between two systems. :param str start: string name of the starting system. Must exist in bbData.builtInSystemObjs :param str end: string name of the target system. Must exist in bbData.builtInSystemObjs :return: list of string system names where the first element is start, the last element is end, and all intermediary systems are adjacent :rtype: list[str] """ return bbAStar(start, end, bbData.builtInSystemObjs)
6045b07ff5ceceacea4ad43ae2d52a67a0f46ec9
3,658,647
from typing import List from typing import Optional from typing import Dict def update_node_categories( target_graph: BaseGraph, clique_graph: nx.MultiDiGraph, clique: List, category_mapping: Optional[Dict[str, str]], strict: bool = True, ) -> List: """ For a given clique, get category for each node in clique and validate against Biolink Model, mapping to Biolink Model category where needed. For example, If a node has ``biolink:Gene`` as its category, then this method adds all of its ancestors. Parameters ---------- target_graph: kgx.graph.base_graph.BaseGraph The original graph clique_graph: networkx.Graph The clique graph clique: List A list of nodes from a clique category_mapping: Optional[Dict[str, str]] Mapping for non-Biolink Model categories to Biolink Model categories strict: bool Whether or not to merge nodes in a clique that have conflicting node categories Returns ------- List The clique """ updated_clique_graph_properties = {} updated_target_graph_properties = {} for node in clique: # For each node in a clique, get its category property data = clique_graph.nodes()[node] if 'category' in data: categories = data['category'] else: categories = get_category_from_equivalence(target_graph, clique_graph, node, data) # differentiate between valid and invalid categories ( valid_biolink_categories, invalid_biolink_categories, invalid_categories, ) = check_all_categories(categories) log.debug( f"valid biolink categories: {valid_biolink_categories} invalid biolink categories: {invalid_biolink_categories} invalid_categories: {invalid_categories}" ) # extend categories to have the longest list of ancestors extended_categories: List = [] for x in valid_biolink_categories: ancestors = get_biolink_ancestors(x) if len(ancestors) > len(extended_categories): extended_categories.extend(ancestors) log.debug(f"Extended categories: {extended_categories}") clique_graph_update_dict: Dict = {'category': list(extended_categories)} target_graph_update_dict: Dict = {} if invalid_biolink_categories: if strict: clique_graph_update_dict['_excluded_from_clique'] = True target_graph_update_dict['_excluded_from_clique'] = True clique_graph_update_dict['invalid_biolink_category'] = invalid_biolink_categories target_graph_update_dict['invalid_biolink_category'] = invalid_biolink_categories if invalid_categories: clique_graph_update_dict['_invalid_category'] = invalid_categories target_graph_update_dict['_invalid_category'] = invalid_categories updated_clique_graph_properties[node] = clique_graph_update_dict updated_target_graph_properties[node] = target_graph_update_dict nx.set_node_attributes(clique_graph, updated_clique_graph_properties) target_graph.set_node_attributes(target_graph, updated_target_graph_properties) return clique
965d1c3076e6fac67ef0fcd00f7f178a2a519be5
3,658,648
def norm_error(series): """Normalize time series. """ # return series new_series = deepcopy(series) new_series[:,0] = series[:,0] - np.mean(series[:,0]) return 2*(new_series)/max(abs(new_series[:,0]))
a7af6be8b8ddc800609c3385a96f5a80dfd02853
3,658,649
def f1d(x): """Non-linear function for simulation""" return(1.7*(1/(1+np.exp(-(x-0.5)*20))+0.75*x))
75e3bd8a90fe41dfded9b6063868b6766351a8b0
3,658,650
def get_field_map(src, flds): """ Returns a field map for an arcpy data itme from a list or dictionary. Useful for operations such as renaming columns merging feature classes. Parameters: ----------- src: str, arcpy data item or arcpy.mp layer or table Source data item containing the desired fields. flds: dict <str: str> Mapping between old (keys) and new field names (values). Returns: -------- arcpy.FieldMappings """ mappings = arcpy.FieldMappings() if isinstance(flds, list): flds = {n: n for n in flds} for old_name, new_name in flds.items(): fm = arcpy.FieldMap() fm.addInputField(src, old_name) out_f = fm.outputField out_f.name = new_name out_f.aliasName = new_name fm.outputField = out_f fm.outputField.name = new_name mappings.addFieldMap(fm) return mappings
18e6bbae491659b7819aa3584eb40242dea93f11
3,658,651
def b32qlc_decode(value): """ Decodes a value in qlc encoding to bytes using base32 algorithm with a custom alphabet: '13456789abcdefghijkmnopqrstuwxyz' :param value: the value to decode :type: bytes :return: decoded value :rtype: bytes >>> b32qlc_decode(b'fxop4ya=') b'okay' """ return b32decode(value.translate(QLC_DECODE_TRANS))
8b5bbb0f1900a3b89486c81561fd4c253604287e
3,658,652
def createPreProcessingLayers(): """ Creates a model with the initial pre-processing layers. """ model = Sequential() model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3))) model.add(Cropping2D(cropping=((50, 20), (0, 0)))) return model
1e087ae4bdd1a942845f4f7554e1b27436c6783e
3,658,653
def get_random_atoms(a=2.0, sc_size=2, numbers=[6, 8], set_seed: int = None): """Create a random structure.""" if set_seed: np.random.seed(set_seed) cell = np.eye(3) * a positions = np.array([[0, 0, 0], [a/2, a/2, a/2]]) unit_cell = Atoms(cell=cell, positions=positions, numbers=numbers, pbc=True) multiplier = np.identity(3) * sc_size atoms = make_supercell(unit_cell, multiplier) atoms.positions += (2 * np.random.rand(len(atoms), 3) - 1) * 0.1 flare_atoms = FLARE_Atoms.from_ase_atoms(atoms) return flare_atoms
710592af7db3e24529b68b84e112641b5da63a98
3,658,654
from re import DEBUG import os def pocsense(kspace, sensitivities, i=None, r=None, l=None, g=None, o=None, m=None): """ Perform POCSENSE reconstruction. :param kspace array: :param sensitivities array: :param i int: max. number of iterations :param r float: regularization parameter :param l int: toggle l1-wavelet or l2 regularization :param g bool: () :param o float: () :param m float: () """ usage_string = "pocsense [-i d] [-r f] [-l d] kspace sensitivities output" cmd_str = f'{BART_PATH} ' cmd_str += 'pocsense ' flag_str = '' opt_args = f'' multituples = [] if i is not None: flag_str += f'-i {i} ' if r is not None: flag_str += f'-r {r} ' if l is not None: flag_str += f'-l {l} ' if g is not None: flag_str += f'-g ' if o is not None: flag_str += f'-o {o} ' if m is not None: flag_str += f'-m {m} ' cmd_str += flag_str + opt_args + ' ' cmd_str += f"{' '.join([' '.join([str(x) for x in arg]) for arg in zip(*multituples)]).strip()} {NAME}kspace {NAME}sensitivities {NAME}output " cfl.writecfl(NAME + 'kspace', kspace) cfl.writecfl(NAME + 'sensitivities', sensitivities) if DEBUG: print(cmd_str) os.system(cmd_str) outputs = cfl.readcfl(NAME + 'output') return outputs
a9bde663258ee106357724f7641df68880f3fc03
3,658,655
def vgg16_bn(pretrained=False, **kwargs): """VGG 16-layer model (configuration "D") with batch normalization Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn'])) return model
34f8e4965555ed4cb046c8ab4e5cde799d887040
3,658,656
import numpy def tau(x, cval): """Robust estimators of location and scale, with breakdown points of 50%. Also referred to as: Tau measure of location by Yohai and Zamar Source: Yohai and Zamar JASA, vol 83 (1988), pp 406-413 and Maronna and Zamar Technometrics, vol 44 (2002), pp. 307-317""" med = median(x) mad = median(numpy.abs(x - med)) zscore = 0.675 # Z-score of the 75th percentile of the normal distribution s = zscore * mad wnom = 0 wden = 0 for i in range(len(x)): y = (x[i] - med) / s temp = (1 - (y / cval)**2)**2 if abs(temp) <= cval: wnom += temp * x[i] wden += temp return wnom / wden
6f75ee23f50e94d1ee2754949f5c102d63ac4cab
3,658,657
def shn_gis_location_represent(id, showlink=True): """ Represent a location given its id """ table = db.gis_location try: location = db(table.id == id).select(table.id, table.name, table.level, table.parent, table.lat, table.lon, cache=(cache.ram, 60), limitby=(0, 1)).first() return shn_gis_location_represent_row(location, showlink) except: try: # "Invalid" => data consistency wrong represent = location.id except: represent = NONE return represent
758dfb8e32178e864f838a790eadf598f65ae6ec
3,658,658
def de_pearson_dataframe(df, genes, pair_by='type', gtex=True, tcga=True): """ PearsonR scores of gene differential expression between tumor and normal types. 1. Calculate log2FC of genes for TCGA tumor samples with matching TCGA normal types 2. Compare log2fc to tumor type compared to all other normal types 3. Calculate PearsonR and save :param pd.DataFrame df: Exp/TPM dataframe containing "type"/"tissue/tumor/label" metadata columns :param list genes: Genes to use in differential expression calculation :param str pair_by: How to pair tumors/normals. Either by "type" or "tissue" :param bool gtex: If True, includes GTEx in normal set :param bool tcga: If True, includes TCGA in normal set :return: PearsonR dataframe :rtype: pd.DataFrame """ # Subset by Tumor/Normal tumor = df[df.label == 'tcga-tumor'] tcga_n = df[df.label == 'tcga-normal'] # Determine normal comparison group based on options if gtex and tcga: normal = df[df.tumor == 'no'] elif gtex: normal = df[df.label == 'gtex'] else: normal = tcga_n # Identify tumor types with paired tcga-normal tum_types = [x for x in sorted(tumor[pair_by].unique()) if x in sorted(df[df.label == 'tcga-normal'][pair_by].unique())] norm_types = [] # For all paired tumor_types, calculate l2fc, then PearsonR of l2fc to all normal tumor types pearson_l2fc = defaultdict(list) for tum_type in tum_types: # First calculate TCGA tumor/normal prior for comparison t_med = tumor[tumor[pair_by] == tum_type][genes].median() n_med = tcga_n[tcga_n[pair_by] == tum_type][genes].median() prior_l2fc = log2fc(t_med, n_med) # For every normal type, calculate pearsonR correlation for (norm_type, label), _ in normal.groupby(pair_by).label.value_counts().iteritems(): if tum_type == norm_type: l2fc = prior_l2fc else: n_med = normal[normal[pair_by] == norm_type][genes].median() l2fc = log2fc(t_med, n_med) # Calculate PearsonR of l2fc and comparison tissue/type pearson_r = round(pearsonr(prior_l2fc, l2fc)[0], 2) pearson_l2fc[tum_type[:20]].append(pearson_r) norm_label = '{}_{}'.format(label, norm_type[:20]) if norm_label not in norm_types: norm_types.append(norm_label) return pd.DataFrame(pearson_l2fc, index=norm_types)
29423402b24acc67a278cbdee03916add4228d7d
3,658,659
def load_YUV_as_dic_tensor(path_img): """ Construct a dic with 3 entries ('y','u', 'v'), each of them is a tensor and is loaded from path_img + key + '.png'. ! Return a dictionnary of 3D tensor (i.e. without a dummy batch index) """ dic_res = {} key = ['y', 'u', 'v'] for k in key: img = Image.open(path_img + '_' + k + '.png') # check if image mode is correct: it should be a one # canal uint8 image (i.e. mode L) if img.mode != 'L': img = img.convert('L') dic_res[k] = to_tensor(img) return dic_res
b0fe081b36c70ba8a185f151b13c5f046ef26ad6
3,658,660
def tensor_log10(t1, out_format, dtype=None): """ Takes the log base 10 of each input in the tensor. Note that this is applied to all elements in the tensor not just non-zeros. Warnings --------- The log10 of 0 is undefined and is performed on every element in the tensor regardless of sparsity. Parameters ------------ t1: tensor, array_like input tensor or array_like object out_format: format, mode_format, optional * If a :class:`format` is specified, the result tensor is stored in the format out_format. * If a :class:`mode_format` is specified, the result the result tensor has a with all of the dimensions stored in the :class:`mode_format` passed in. dtype: Datatype The datatype of the output tensor. Examples ---------- >>> import pytaco as pt >>> pt.tensor_log10([10, 100], out_format=pt.compressed, dtype=pt.float32).to_array() array([1., 2.], dtype=float32) Returns -------- log10: tensor The element wise log10 of the input tensor. """ t1 = as_tensor(t1, copy=False) cast_val = _cm.max_type(_cm.float32, t1.dtype) f = lambda x: _cm.log10(_cm.cast(x, cast_val)) return _compute_unary_elt_eise_op(f, t1, out_format, dtype)
ff5c1a2f4cee9bc287ac81d3d3e524c1292fa2a7
3,658,661
def get_file_phenomena_i(index): """ Return file phenomena depending on the value of index. """ if index <= 99: return [phen[0]] elif index >= 100 and index <= 199: return [phen[1]] elif index >= 200 and index <= 299: return [phen[2]] elif index >= 300 and index <= 399: return [phen[3]] elif index >= 400 and index <= 499: return phen[0:2] elif index >= 500 and index <= 599: return phen[0:3] elif index >= 600 and index <= 699: tmp_l = phen[0:2] tmp_l.append(phen[3]) return tmp_l
18beac08b59aec18b33f6472866a50decd01db30
3,658,662
def resource_cache_map(resource_id, flush=True): """cache resource info""" if flush: map_resources(resource_ids=[resource_id, ]) if resource_id not in CDNRESOURCE: raise InvalidArgument('Resource not exit') return CDNRESOURCE[resource_id]
5e67546db9008e805b80c1ed7545d3787444c402
3,658,663
def _preprocess_html(table_html): """Parses HTML with bs4 and fixes some glitches.""" table_html = table_html.replace("<br />", "<br /> ") table = bs4.BeautifulSoup(table_html, "html5lib") table = table.find("table") # Delete hidden style annotations. for tag in table.find_all(attrs={"style": "display:none"}): tag.decompose() # Make sure "rowspan" is not set to an illegal value. for tag in table.find_all("td"): for attr in list(tag.attrs): if attr == "rowspan": tag.attrs[attr] = "" return table
1062c5cdbb058ea36b1c877d7787aebbde87c642
3,658,664
def parse_campus_hours(data_json, eatery_model): """Parses a Cornell Dining json dictionary. Returns 1) a list of tuples of CampusEateryHour objects for a corresponding CampusEatery object and their unparsed menu 2) an array of the items an eatery serves. Args: data_json (dict): a valid dictionary from the Cornell Dining json eatery_model (CampusEatery): the CampusEatery object to which to link the hours. """ eatery_hours_and_menus = [] dining_items = [] for eatery in data_json["data"]["eateries"]: eatery_slug = eatery.get("slug", "") if eatery_model.slug == eatery_slug: dining_items = get_trillium_menu() if eatery_slug == TRILLIUM_SLUG else parse_dining_items(eatery) hours_list = eatery["operatingHours"] for hours in hours_list: new_date = hours.get("date", "") hours_events = hours["events"] if hours_events: for event in hours_events: start, end = format_time(event.get("start", ""), event.get("end", ""), new_date) eatery_hour = CampusEateryHour( eatery_id=eatery_model.id, date=new_date, event_description=event.get("descr", ""), event_summary=event.get("calSummary", ""), end_time=end, start_time=start, ) eatery_hours_and_menus.append((eatery_hour, event.get("menu", []))) else: eatery_hour = CampusEateryHour( eatery_id=eatery_model.id, date=new_date, event_description=None, event_summary=None, end_time=None, start_time=None, ) eatery_hours_and_menus.append((eatery_hour, [])) return eatery_hours_and_menus, dining_items
95e7bbc898f4516b9812d3f68749651a32f3535f
3,658,665
from typing import Dict from typing import Tuple def _change_relationships(edge: Dict) -> Tuple[bool, bool]: """Validate relationship.""" if 'increases' in edge[1]['relation'] or edge[1]['relation'] == 'positive_correlation': return True, True elif 'decreases' in edge[1]['relation'] or edge[1]['relation'] == 'negative_correlation': return True, False return False, False
b826eb1eb7bd1e7eed7fd8577b5c04d827a75e56
3,658,666
def extract_behaviour_sync(sync, chmap=None, display=False, tmax=np.inf): """ Extract wheel positions and times from sync fronts dictionary :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans :param chmap: dictionary containing channel index. Default to constant. chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15} :param display: bool or matplotlib axes: show the full session sync pulses display defaults to False :return: trials dictionary """ bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax) if bpod.times.size == 0: raise err.SyncBpodFpgaException('No Bpod event found in FPGA. No behaviour extraction. ' 'Check channel maps.') frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax) audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax) # extract events from the fronts for each trace t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod( bpod['times'], bpod['polarities']) t_ready_tone_in, t_error_tone_in = _assign_events_audio( audio['times'], audio['polarities']) trials = Bunch({ 'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'), 'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in), 'valveOpen_times': _assign_events_to_trial(t_trial_start, t_valve_open), 'stimFreeze_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2), 'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'), 'stimOff_times': _assign_events_to_trial(t_trial_start, frame2ttl['times']), 'itiIn_times': _assign_events_to_trial(t_trial_start, t_iti_in) }) # feedback times are valve open on good trials and error tone in on error trials trials['feedback_times'] = np.copy(trials['valveOpen_times']) ind_err = np.isnan(trials['valveOpen_times']) trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err] trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']] if display: width = 0.5 ymax = 5 if isinstance(display, bool): plt.figure("Ephys FPGA Sync") ax = plt.gca() else: ax = display r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0']) plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k') plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k') plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k') plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k') plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax, ax=ax, label='goCue_times', color='b', linewidth=width) plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax, ax=ax, label='start_trial', color='m', linewidth=width) plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax, ax=ax, label='error tone', color='r', linewidth=width) plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax, ax=ax, label='valveOpen_times', color='g', linewidth=width) plots.vertical_lines(trials['stimFreeze_times'], ymin=0, ymax=ymax, ax=ax, label='stimFreeze_times', color='y', linewidth=width) plots.vertical_lines(trials['stimOff_times'], ymin=0, ymax=ymax, ax=ax, label='stim off', color='c', linewidth=width) plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax, ax=ax, label='stimOn_times', color='tab:orange', linewidth=width) c = _get_sync_fronts(sync, chmap['left_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['right_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['body_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k') ax.legend() ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', '']) ax.set_yticks([0, 1, 2, 3, 4, 5]) ax.set_ylim([0, 5]) return trials
b02ec14a5714f1387acb12f1ec2d5bbbc1684f67
3,658,667
import os from pathlib import Path import torch def download_and_load_model(model_files) -> RecursiveScriptModule: """ Downloads and torch.jit.load the model from google drive, the downloaded model is saved in /tmp since in heroku we get /tmp to save all our stuff, if the app is not running in production the model must be saved in load storage, hence the model is directly loaded Args: model_files: the dict containing the model information Returns: (RecursiveScriptModule): the loaded torch.jit model """ if "PRODUCTION" in os.environ: logger.info( f"=> Downloading Model {model_files['model_file']} from {model_files['model_url']}" ) # heroku gives you `/tmp` to store files, which can be cached model_path: Path = Path("/tmp") / f"{model_files['model_file']}.pt" if not model_path.exists(): gdown.cached_download(url=model_files["model_url"], path=model_path) logger.info(f"=> Loading {model_files['model_file']} from download_cache") model: RecursiveScriptModule = torch.jit.load(str(model_path)) else: logger.info(f"=> Loading {model_files['model_file']} from Local") model = torch.jit.load( str((Path("models") / (model_files["model_file"] + ".pt"))) ) return model
7f00cd1a79ec87eb553b44045c5e4752851bbb2a
3,658,668
def is_attr_defined(attrs,dic): """ Check if the sequence of attributes is defined in dictionary 'dic'. Valid 'attrs' sequence syntax: <attr> Return True if single attrbiute is defined. <attr1>,<attr2>,... Return True if one or more attributes are defined. <attr1>+<attr2>+... Return True if all the attributes are defined. """ if OR in attrs: for a in attrs.split(OR): if dic.get(a.strip()) is not None: return True else: return False elif AND in attrs: for a in attrs.split(AND): if dic.get(a.strip()) is None: return False else: return True else: return dic.get(attrs.strip()) is not None
542388846fabc79e126203d80a63db6901a71897
3,658,669
def c_str_repr(str_): """Returns representation of string in C (without quotes)""" def byte_to_repr(char_): """Converts byte to C code string representation""" char_val = ord(char_) if char_ in ['"', '\\', '\r', '\n']: return '\\' + chr(char_val) elif (ord(' ') <= char_val <= ord('^') or char_val == ord('_') or ord('a') <= char_val <= ord('~')): return chr(char_val) else: return '\\x%02x' % char_val return '"%s"' % ''.join((byte_to_repr(x) for x in str_))
e7cce729a00a7d2a35addf95eb097a3caa06bedd
3,658,670
from datetime import datetime import os def hour_paths_for_range(hours_path, start, end): """Generate a list of hour paths to check when looking for segments between start and end.""" # truncate start and end to the hour def truncate(dt): return dt.replace(microsecond=0, second=0, minute=0) current = truncate(start) end = truncate(end) # Begin in the hour prior to start, as there may be a segment that starts in that hour # but contains the start time, eg. if the start time is 01:00:01 and there's a segment # at 00:59:59 which goes for 3 seconds. # Checking the entire hour when in most cases it won't be needed is wasteful, but it's also # pretty quick and the complexity of only checking this case when needed just isn't worth it. current -= datetime.timedelta(hours=1) while current <= end: yield os.path.join(hours_path, current.strftime("%Y-%m-%dT%H")) current += datetime.timedelta(hours=1)
2ba8c2ccb914fcf0eb7bbbddb8e0e5ecad6adec0
3,658,671
def getActiveTeamAndID(): """Returns the Team ID and CyTeam for the active player.""" return getActiveTeamID(), getActiveTeam()
edf58aee8d9126ddc25afd94becf641330e13ca2
3,658,672
from typing import Union from typing import BinaryIO from typing import Tuple from typing import Optional def is_nitf( file_name: Union[str, BinaryIO], return_version=False) -> Union[bool, Tuple[bool, Optional[str]]]: """ Test whether the given input is a NITF 2.0 or 2.1 file. Parameters ---------- file_name : str|BinaryIO return_version : bool Returns ------- is_nitf_file: bool Is the file a NITF file, based solely on checking initial bytes. nitf_version: None|str Only returned is `return_version=True`. Will be `None` in the event that `is_nitf_file=False`. """ header = _fetch_initial_bytes(file_name, 9) if header is None: if return_version: return False, None else: return False ihead = header[:4] vers = header[4:] if ihead == b'NITF': try: vers = vers.decode('utf-8') return (True, vers) if return_version else True except ValueError: pass return (False, None) if return_version else False
6e28baa09d6b8e173db00671e1ed08023630110b
3,658,673
import argparse import os import sys def main(): """A simple main for testing via command line.""" parser = argparse.ArgumentParser( description='A manual test for ros-pull-request-builder access' 'to a GitHub repo.') parser.add_argument('user', type=str) parser.add_argument('repo', type=str) parser.add_argument('--callback-url', type=str, default='http://build.ros.org/ghprbhook/') parser.add_argument('--hook-user', type=str, default='ros-pull-request-builder') parser.add_argument('--password-env', type=str, default='ROSGHPRB_TOKEN') args = parser.parse_args() password = os.getenv(args.password_env) if not password: parser.error( 'OAUTH Token with hook and organization read access' 'required in ROSGHPRB_TOKEN environment variable') errors = [] result = check_hooks_on_repo( args.user, args.repo, errors, args.hook_user, args.callback_url, password) if errors: print('Errors detected:', file=sys.stderr) for e in errors: print(e, file=sys.stderr) if result: return 0 return 1
df0ab1cf1fe7f048a35673280002efc26fe483cc
3,658,674
def get_xlsx_filename() -> str: """ Get the name of the excel file. Example filename: kesasetelihakemukset_2021-01-01_23-59-59.xlsx """ local_datetime_now_as_str = timezone.localtime(timezone.now()).strftime( "%Y-%m-%d_%H-%M-%S" ) filename = f"kesasetelihakemukset_{local_datetime_now_as_str}.xlsx" return filename
fb8715f30bd91f39d9836bf59504ad85c205bdf3
3,658,675
from pathlib import Path def get_content_directory() -> Path: """ Get the path of the markdown `content` directory. """ return get_base_directory() / "content"
2b6f7a9c676e8128fafd43b26cf62aa736aa957c
3,658,676
import math def mag_inc(x, y, z): """ Given *x* (north intensity), *y* (east intensity), and *z* (vertical intensity) all in [nT], return the magnetic inclincation angle [deg]. """ h = math.sqrt(x**2 + y**2) return math.degrees(math.atan2(z, h))
f4036358625dd9d032936afc373e53bef7c1e6e1
3,658,677
import torch def rgb_to_rgba(image, alpha_val): """ Convert an image from RGB to RGBA. """ if not isinstance(image, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError(f"Input size must have a shape of (*, 3, H, W).Got {image.shape}") if not isinstance(alpha_val, (float, torch.Tensor)): raise TypeError(f"alpha_val type is not a float or torch.Tensor. Got {type(alpha_val)}") # add one channel r, g, b = torch.chunk(image, image.shape[-3], dim=-3) if isinstance(alpha_val, float): a = torch.full_like(r, fill_value=float(alpha_val)) return torch.cat([r, g, b, a], dim=-3)
5bab73c37ff81c431ed88ce7d39743cce6c15c56
3,658,678
import os def has_labels(dataset_dir, filename=LABELS_FILENAME): """Specifies whether or not the dataset directory contains a label map file. Args: dataset_dir: The directory in which the labels file is found. filename: The filename where the class names are written. Returns: `True` if the labels file exists and `False` otherwise. """ return tf.io.gfile.exists(os.path.join(dataset_dir, filename))
e9ff2dcb52559ec88d45e3350ec22a6f9f421f27
3,658,679
def get(identifier): """get the activation function""" if identifier is None: return linear if callable(identifier): return identifier if isinstance(identifier, str): activations = { "relu": relu, "sigmoid": sigmoid, "tanh": tanh, "linear": linear, } return activations[identifier]
005789e8cdadff97875f002b9776d8d8bdb22d56
3,658,680
def df_add_column_codelines(self, key): """Generate code lines to add new column to DF""" func_lines = df_set_column_index_codelines(self) # provide res_index = ... results = [] for i, col in enumerate(self.columns): col_loc = self.column_loc[col] type_id, col_id = col_loc.type_id, col_loc.col_id res_data = f'res_data_{i}' func_lines += [ f' data_{i} = self._data[{type_id}][{col_id}]', f' {res_data} = pandas.Series(data_{i}, index=res_index, name="{col}")', ] results.append((col, res_data)) res_data = 'new_res_data' literal_key = key.literal_value func_lines += [f' {res_data} = pandas.Series(value, index=res_index, name="{literal_key}")'] results.append((literal_key, res_data)) data = ', '.join(f'"{col}": {data}' for col, data in results) func_lines += [f' return pandas.DataFrame({{{data}}}, index=res_index)'] return func_lines
742241d973bb46da2a75b40bf9a76c91ba759d98
3,658,681
import torch def resize_bilinear_nd(t, target_shape): """Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor """ shape = list(t.shape) target_shape = list(target_shape) assert len(shape) == len(target_shape) # We progressively move through the shape, resizing dimensions... d = 0 while d < len(shape): # If we don't need to deal with the next dimension, step over it if shape[d] == target_shape[d]: d += 1 continue # Otherwise, we'll resize the next two dimensions... # If d+2 doesn't need to be resized, this will just be a null op for it new_shape = shape[:] new_shape[d:d+2] = target_shape[d:d+2] # The helper collapse_shape() makes our shapes 4-dimensional with # the two dimensions we want to deal with on the outside. shape_ = collapse_shape(shape, d, d+2) new_shape_ = collapse_shape(new_shape, d, d+2) # We can then reshape and use torch.nn.Upsample() on the # outer two dimensions. t_ = t.view(shape_) # transpose [0, 1, 2, 3] to [0, 3, 1, 2] t_ = torch.transpose(t_, 1, 3) t_ = torch.transpose(t_, 2, 3) upsample = torch.nn.Upsample(size=new_shape_[1:3], mode='bilinear', align_corners=True) t_ = upsample(t_) t_ = torch.transpose(t_, 2, 3) t_ = torch.transpose(t_, 1, 3) # And then reshape back to our uncollapsed version, having finished resizing # two more dimensions in our shape. t = t_.reshape(new_shape) shape = new_shape d += 2 return t
005266983cca744437826673ff8dd379afb699e2
3,658,682
def _parse_disambiguate(disambiguatestatsfilename): """Parse disambiguation stats from given file. """ disambig_stats = [-1, -1, -1] with open(disambiguatestatsfilename, "r") as in_handle: header = in_handle.readline().strip().split("\t") if header == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']: disambig_stats_tmp = in_handle.readline().strip().split("\t")[1:] if len(disambig_stats_tmp) == 3: disambig_stats = [int(x) for x in disambig_stats_tmp] return disambig_stats
bb05ec857181f032ae9c0916b4364b772ff7c412
3,658,683
def clean_vigenere(text): """Convert text to a form compatible with the preconditions imposed by Vigenere cipher.""" return ''.join(ch for ch in text.upper() if ch.isupper())
d7c3fc656ede6d07d6e9bac84a051581364c63a0
3,658,684
def select_artist(df_by_artists, df_rate): """This method selects artists which perform the same genre as artists were given :param df_by_artists: :param df_rate: """ # save the indices of artists, which include any of the genres in the genre profile list_of_id = [] for index, row in df_by_artists.iterrows(): for genre in row["genres"]: if(genre in df_rate.index): list_of_id.append(index) #find the unique indices list_of_id = list(set(list_of_id)) #select the artists and genres columns of the artists including any of the genres in the genre profile df_select_columns = df_by_artists.iloc[list_of_id, [col(df_by_artists, "artists"), col(df_by_artists, "genres")]] df_select = df_select_columns.copy() #create the artist-genre-matrix of new artists for index, row in df_select_columns.iterrows(): for genre in row['genres']: #artist includes genre: 1 df_select.at[index, genre] = 1 #artist does not include genre: 0 df_select = df_select.fillna(0)[df_rate.index] return df_select
85c09b62553a3257b4f325dd28d26335c9fcb033
3,658,685
import uuid def generate_uuid(class_name: str, identifier: str) -> str: """ Generate a uuid based on an identifier :param identifier: characters used to generate the uuid :type identifier: str, required :param class_name: classname of the object to create a uuid for :type class_name: str, required """ return str(uuid.uuid5(uuid.NAMESPACE_DNS, class_name + identifier))
10e85effbce04dec62cc55ee709247afa0fb0da7
3,658,686
def fetch(model, key): """Fetch by ID.""" return db.session.query(model).get(key)
4c3008bec5ed5eac593f2ad8ba2816f121362677
3,658,687
from typing import Optional def construct_filename(prefix: str, suffix: Optional[str] = '.csv') -> str: """Construct a filename containing the current date. Examples -------- .. code:: python >>> filename = construct_filename('my_file', '.txt') >>> print(filename) 'my_file_31_May_2019.txt' Parameters ---------- prefix : :class:`str` A prefix for the to-be returned filename. The current date will be appended to this prefix. sufix : :class:`str`, optional An optional sufix of the to be returned filename. No sufix will be attached if ``None``. Returns ------- :class:`str` A filename consisting of **prefix**, the current date and **suffix**. """ today = date.today() suffix = suffix or '' return prefix + today.strftime('_%d_%b_%Y') + suffix
8269947952d4c8d81cc2855a5776c3677c6a5c57
3,658,688
def make_friedman_model(point1, point2): """ Makes a vtk line source from two set points :param point1: one end of the line :param point2: other end of the line :returns: The line """ line = vtkLineSource() line.SetPoint1(point1) line.SetPoint2(point2) return line
f33046307c7c0c2bfeadfbdb4e0815bc5d42d73f
3,658,689
import re def breadcrumbs_pcoa_plot(pcl_fname, output_plot_fname, **opts): """Use breadcrumbs `scriptPcoa.py` script to produce principal coordinate plots of pcl files. :param pcl_fname: String; file name of the pcl-formatted taxonomic profile to visualize via `scriptPcoa.py`. :param output_plot_fname: String; file name of the resulting image file. :keyword **opts: Any additional keyword arguments are passed to `scriptPcoa.py` as command line flags. By default, it passes `meta=None`, `id=None` and `noShape=None`, which are converted into `--meta`, `--id`, and `--noShape`, respectively. External dependencies - Breadcrumbs: https://bitbucket.org/biobakery/breadcrumbs """ pcoa_cmd = ("scriptPcoa.py ") default_opts = { "meta" : True, "id" : True, "noShape" : True, "outputFile" : output_plot_fname } default_opts.update(opts) def sample_id(fname): id_ = str() with open(fname) as f: for line in f: if line.startswith("#"): id_ = line.split('\t')[0] continue else: return id_ or line.split('\t')[0] def last_meta_name(fname): prev_line = str() with open(fname) as f: for line in f: if re.search(r'[Bb]acteria|[Aa]rchaea.*\s+\d', line): return prev_line.split('\t')[0] prev_line = line return prev_line.split('\t')[0] def run(pcoa_cmd=pcoa_cmd): if default_opts['meta'] is True or not default_opts['meta']: default_opts['meta'] = last_meta_name(pcl_fname) if default_opts['id'] is True or not default_opts['id']: default_opts['id'] = sample_id(pcl_fname) pcoa_cmd += dict_to_cmd_opts(default_opts) pcoa_cmd += " "+pcl_fname+" " return CmdAction(pcoa_cmd, verbose=True).execute() targets = [output_plot_fname] if 'CoordinatesMatrix' in default_opts: targets.append(default_opts['CoordinatesMatrix']) yield { "name": "breadcrumbs_pcoa_plot: "+output_plot_fname, "actions": [run], "file_dep": [pcl_fname], "targets": targets }
06fc9511b21ec3c0111ba91cea8c08852eb2bcaf
3,658,690
def _parse_xml(buff): """\ Parses XML and returns the root element. """ buff.seek(0) return etree.parse(buff).getroot()
fa3876f93c0a71b9e4bf6d95dfadbf0714e7c17c
3,658,691
def After(interval): """ After waits for the duration to elapse and then sends the current time on the returned channel. It is equivalent to Timer(interval).c """ return Timer(interval).c
1011151471f839b3e9f7edad369699d76d9f7601
3,658,692
def f_score(r: float, p: float, b: int = 1): """ Calculate f-measure from recall and precision. Args: r: recall score p: precision score b: weight of precision in harmonic mean Returns: val: value of f-measure """ try: val = (1 + b ** 2) * (p * r) / (b ** 2 * p + r) except ZeroDivisionError: val = 0 return val
d12af20e30fd80cb31b2cc119d5ea79ce2507c4b
3,658,693
import os def is_container_system_config_file(file): """Determine whether a given file is one of the files created by setup_container_system_config(). @param file: Absolute file path as string. """ if not file.startswith("/etc/"): return False return file in [os.path.join("/etc", f.decode()) for f in CONTAINER_ETC_FILE_OVERRIDE]
8c0c87b6925044f31eb89ed47be5ff39d54139b4
3,658,694
def show_inventory(): """Show the user what is in stock.""" context = { 'inventory': [ # Could contain any items {'name': 'apple', 'price': 1.00}, {'name': 'banana', 'price': 1.20}, {'name': 'carrot', 'price': 2.00}, ] } return render_template('show_inventory.html', **context)
be2b67abb1ebd60bacfad117dab166a08d6915b1
3,658,695
def grid_optimizer( data, params, args, xset, yset=None, verbose=False, visualize=False, save_path=None): """ This function optimizes the ESN parameters, x and y, over a specified range of values. The optimal values are determined by minimizing the mean squared error. Those optimal values are returned. Parameters ---------- data : numpy array This is the dataset that the ESN should train and predict. If the training length plus the future total exceed the length of the data, an error will be thrown. **The shape of the transpose of the data will determine the number of inputs and outputs.** params : dictionary A dictionary containing all of the parameters required to initialize an ESN. Required parameters are: * "n_reservoir" : int, the reservoir size * "sparsity" : float, the sparsity of the reservoir * "rand_seed" : int or None, specifies the initial seed * "rho" : float, the spectral radius * "noise" : the noise used for regularization * "trainlen" : int, the training length * "future" : int, the total prediction length * "window" : int or None, the window size args : list or tuple The list of variables you want to optimize. Must be less than or equal to two. xset : numpy array The first set of values to be tested. Cannot be None. yset : numpy array or None The second set of values to be tested at the same time as the xset. Can be None. verbose : boolean Specifies if the simulation outputs should be printed. Useful for debugging. visualize : boolean, string Specifies if the results should be visualized. * 'surface' will plot a 3D error surface. save_path : string Specifies where the data should be saved. Default is None. Returns ------- loss : numpy array The array or matrix of loss values. """ assert(len(args) <= 2), "Too many variables to optimize. Pick two or fewer." for variable in args: assert(variable in list(params.keys()) ), f"{variable} not in parameters" if len(args) > 1: assert(yset is not None), "Two variables specified, two sets not given." xvar = args[0] loss = np.zeros(len(xset)) if yset is not None: assert(len(args) > 1), "Second parameter set given, but not specified." yvar = args[1] loss = np.zeros([len(xset), len(yset)]) if verbose: print(f"Optimizing over {args}:") predictLen = params['future'] for x, xvalue in enumerate(xset): params[xvar] = xvalue if yset is not None: for y, yvalue in enumerate(yset): params[yvar] = yvalue predicted = esn_prediction(data, params) loss[x, y] = MSE(predicted, data[-predictLen:]) if verbose: print( f"{variables[xvar]} = {xvalue}," f"{variables[yvar]} = {yvalue}, MSE={loss[x][y]}") else: predicted = esn_prediction(data, params) loss[x] = MSE(predicted, data[-predictLen:]) if verbose: print(f"{xvar} = {xvalue}, MSE={loss[x]}") # ======================================================================= # Visualization # ======================================================================= if visualize is True and yset is not None: plt.figure(figsize=(16, 9), facecolor='w', edgecolor='k') plt.title((f"Hyper-parameter Optimization over {variables[xvar]}", f"and {variables[yvar]}")) im = plt.imshow(loss.T, vmin=abs(loss).min(), vmax=abs(loss).max(), origin='lower', cmap='PuBu') plt.xticks(np.linspace(0, len(xset) - 1, len(xset)), xset) plt.yticks(np.linspace(0, len(yset) - 1, len(yset)), yset) plt.xlabel(f'{variables[xvar]}', fontsize=16) plt.ylabel(f'{variables[yvar]}', fontsize=16) cb = plt.colorbar(im) cb.set_label(label="Mean Squared Error", fontsize=16, rotation=-90, labelpad=25) elif visualize is True and yset is None: plt.figure(figsize=(16, 9), facecolor='w', edgecolor='k') plt.plot(xset, loss, '-ok', alpha=0.6) plt.title(f'MSE as a Function of {variables[xvar]}', fontsize=20) plt.xlabel(f'{variables[xvar]}', fontsize=18) plt.ylabel('MSE', fontsize=18) elif visualize is 'surface' and yset is not None: fig = plt.figure(figsize=(16, 9), facecolor='w', edgecolor='k') ax = plt.axes(projection='3d') X = np.array(xset) Y = np.array(yset) Z = np.array(loss).T print(f"Shape X {X.shape}") print(f"Shape Y {Y.shape}") print(f"Shape Z {Z.shape}") mappable = plt.cm.ScalarMappable() mappable.set_array(Z) ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=mappable.cmap, norm=mappable.norm) ax.set_xlabel(f'{variables[xvar]}', fontsize=18) ax.set_ylabel(f'{variables[yvar]}', fontsize=18) ax.set_zlabel('MSE', fontsize=18) cb = plt.colorbar(mappable) cb.set_label(label="Mean Squared Error", fontsize=16, rotation=-90, labelpad=25) fig.tight_layout() plt.show() # ======================================================================= # Save data # ======================================================================= if save_path is not None: if yset is not None: fname = f"_{xvar}_{yvar}_loss" np.save('./data/' + save_path + fname, loss) else: fname = f"_{xvar}_loss" np.save('./data/' + save_path + fname, loss) return loss
c3c9749827997793ab0ee40e454143d26ed37cc9
3,658,696
import numpy as np import re def rebuild_schema(doc, r, df): """Rebuild the schema for a resource based on a dataframe""" # Re-get the resource in the doc, since it may be different. try: r = doc.resource(r.name) except AttributeError: # Maybe r is actually a resource name r = doc.resource(r) def alt_col_name(name, i): if not name: return 'col{}'.format(i) return re.sub('_+', '_', re.sub('[^\w_]', '_', str(name)).lower()).rstrip('_') df_types = { np.dtype('O'): 'text', np.dtype('int64'): 'integer', np.dtype('float64'): 'number' } try: df_index_frame = df.index.to_frame() except AttributeError: df_index_frame = None def get_col_dtype(c): c = str(c) try: return df_types[df[c].dtype] except KeyError: # Maybe it is in the index? pass try: return df_types[df_index_frame[c].dtype] except TypeError: # Maybe not a multi-index pass if c == 'id' or c == df.index.name: return df_types[df.index.dtype] return 'unknown' columns = [] schema_term = r.schema_term[0] if schema_term: old_cols = {c['name'].value: c.properties for c in schema_term.children} for c in schema_term.children: schema_term.remove_child(c) schema_term.children = [] else: old_cols = {} schema_term = doc['Schema'].new_term('Table', r.schema_name) index_names = [n if n else "id" for n in df.index.names] for i, col in enumerate(index_names + list(df.columns)): acn = alt_col_name(col, i) if alt_col_name(col, i) != str(col) else '' d = {'name': col, 'datatype': get_col_dtype(col), 'altname': acn} if col in old_cols.keys(): lookup_name = col elif acn in old_cols.keys(): lookup_name = acn else: lookup_name = None if lookup_name and lookup_name in old_cols: for k, v in schema_term.properties.items(): old_col = old_cols.get(lookup_name) for k, v in old_col.items(): if k != 'name' and v: d[k] = v columns.append(d) for c in columns: name = c['name'] del c['name'] datatype = c['datatype'] del c['datatype'] altname = c['altname'] del c['altname'] schema_term.new_child('Column', name, datatype=datatype, altname=altname, **c)
ed212e5cff26dcfece99e3361df9d61823c2bfde
3,658,697
def compute_similarity(image, reference): """Compute a similarity index for an image compared to a reference image. Similarity index is based on a the general algorithm used in the AmphiIndex algorithm. - identify slice of image that is a factor of 256 in size - rebin image slice down to a (256,256) image - rebin same slice from reference down to a (256,256) image - sum the differences of the rebinned slices - divide absolute value of difference scaled by reference slice sum .. note:: This index will typically return values < 0.1 for similar images, and values > 1 for dis-similar images. Parameters ---------- image : ndarray Image (as ndarray) to measure reference : ndarray Image which serves as the 'truth' or comparison image. Returns ------- similarity_index : float Value of similarity index for `image` """ # Insure NaNs are replaced with 0 image = np.nan_to_num(image[:], nan=0) reference = np.nan_to_num(reference[:], nan=0) imgshape = (min(image.shape[0], reference.shape[0]), min(image.shape[1], reference.shape[1])) minsize = min(imgshape[0], imgshape[1]) # determine largest slice that is a power of 2 in size window_bit = maxBit(minsize) window = 2**window_bit # Define how big the rebinned image should be for computing the sim index # Insure a minimum rebinned size of 64x64 sim_bit = (window_bit - 2) if (window_bit - 2) > 6 else window_bit sim_size = 2**sim_bit # rebin image and reference img = rebin(image[:window, :window], (sim_size, sim_size)) ref = rebin(reference[:window, :window], (sim_size, sim_size)) # Compute index diffs = np.abs((img - ref).sum()) sim_indx = diffs / img.sum() return sim_indx
0b49009bfdd0697999e61825390a8f883ae8dd79
3,658,698
def _create_npu_quantization( scale, zero_point, ): """This is a helper function to capture a list of arguments to create Vela NpuQuantization object """ # Scale could be an ndarray if per-channel quantization is available if not isinstance(scale, tvm.tir.expr.Load): if isinstance(scale.value, float): scale = np.single(scale.value) else: assert isinstance(scale.value.value, float) scale = np.single(scale.value.value) q_params = vapi.NpuQuantization(scale_f32=scale, zero_point=zero_point.value) return q_params
71f7e20a760940e6d46301ccd9130265de140b29
3,658,699