content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def cal_covered_users(positions, heat_map, radius): """ :param positions: $k$ positions array of !!!(y, x)!!! :param heat_map: grid data with count :param radius: 0(1 grid), 1(8 grids), 2(25 grids) :return: coverage score """ row_num, col_num = heat_map.shape mask = np.zeros(heat_map.shape, dtype=int) for position in positions: center_x = position[1] center_y = position[0] max_x = center_x + radius if center_x + radius < col_num else col_num - 1 min_x = center_x - radius if center_x - radius >= 0 else 0 max_y = center_y + radius if center_y + radius < row_num else row_num - 1 min_y = center_y - radius if center_y - radius >= 0 else 0 for x in range(min_x, max_x + 1): for y in range(min_y, max_y + 1): mask[y, x] = 1 return np.sum(np.multiply(mask, heat_map))
52e3fec6b7aa01c9882c15ca3331b3199fa554a2
3,656,200
from typing import Union import pathlib def certificate_from_file( filename: Union[str, pathlib.Path], format=OpenSSL.crypto.FILETYPE_PEM, ) -> TS.X509: """Load an X509 certificate from ``filename``. :param filename: The path to the certificate on disk. :param format: The format of the certificate, from :doc:`OpenSSL:api/crypto`. """ with open(filename, 'r') as handle: return certificate_from_string(handle.read(), format)
1cc3cb514454118ed6af9257b35aa39586bce31b
3,656,201
def get_welcome_response(session): """ Welcome the user to my python skill """ card_title = "Welcome" speech_output = "Welcome to my python skill. You can search for GitHub repositories. " # If the user either does not reply to the welcome message or says something # that is not understood, they will be prompted again with this text. reprompt_text = "Ask me to search GitHub for a repository. " session_attributes = session.get('attributes', {}) speechlet_response = build_speechlet_response( card_title, speech_output, reprompt_text ) return build_response(session_attributes, speechlet_response)
d90bbd14bef29f1d7400042bbc593e4bb63b8713
3,656,202
import numpy as np def rotate_quaternion ( angle, axis, old ): """Returns a quaternion rotated by angle about axis relative to old quaternion.""" # Note that the axis vector should be normalized and we test for this # In general, the old quaternion need not be normalized, and the same goes for the result # although in our applications we only ever use unit quaternions (to represent orientations) assert old.size==4, 'Error in old quaternion dimension' assert axis.size==3, 'Error in axis dimension' assert np.isclose (np.sum(axis**2),1.0), 'axis normalization error {} {} {}'.format(*axis) # Standard formula for rotation quaternion, using half angles rot = np.sin(0.5*angle) * axis rot = np.array([np.cos(0.5*angle),rot[0],rot[1],rot[2]],dtype=np.float_) e = quatmul ( rot, old ) # Apply rotation to old quaternion return e
ccc67dbcd2153b40a4e4c560d423d4c495912d8e
3,656,203
import os def apogeeSpectroReduxDirPath(dr=None): """ NAME: apogeeSpectroReduxDirPath PURPOSE: returns the path of the spectro dir INPUT: dr= return the path corresponding to this data release OUTPUT: path string HISTORY: 2014-11-25 - Written - Bovy (IAS) """ if dr is None: dr= _default_dr() if dr.lower() == 'current': return os.path.join(_APOGEE_DATA,'apogeework', 'apogee','spectro','redux') else: return os.path.join(_APOGEE_DATA,'dr%s' % dr, 'apogee','spectro','redux')
172a9919b0b7aef0dcdd9486f51900a9a8c2b28f
3,656,204
def rochepot_dl(x, y, z, q): """ Dimensionless Roche potential (:math:`\\Phi_n`, synchronous rotation) More massive component (:math:`m_1`) is centered at (x,y,z) = (0,0,0). Less massive component (:math:`m_2`) is at (1,0,0). The unit of length is the distance between the objects. Both objects are in the x,y plane (x-axis along the connecting line and z perpendicular to the orbital plane). Parameters ---------- x, y, z : float or array Location(s) at which to calculate the potential. Unit of length is the distance between the masses m1 and m2. q : float Mass ratio (0 <= m2/m1 <= 1) Returns ------- Potential : float or array The potential at the specified location(s) """ _checkq(q) r1, r2 = _r1r2_dl(x, y, z) p = 2/((1+q)*r1) + 2*q/((1+q)*r2) + (x - q/(1+q))**2 + y**2 return p
f3d15ea27e6b4c476d345fa8af254b2a14cbdfbc
3,656,205
def health_check(config): """ Tests the API to ensure it is working. """ itglue = ITGlue(config['api_key'], config['itglue_host']) try: itglue._make_request('organizations', {}) return True except: return False
02b9a582b506f590adcdcdbd661abbc7aec52d26
3,656,206
import io import time def capture_image(resolution=(1024, 768), size=(320, 240), sleep=2): """ Captures image from raspberry pi camera resolution -- resolution of capture size -- size of output sleep -- sleep time in seconds """ stream = io.BytesIO() with picamera.PiCamera() as camera: #camera.led = False camera.resolution = resolution camera.start_preview() time.sleep(sleep) camera.capture(stream, format='jpeg', resize=size) # "Rewind" the stream to the beginning so we can read its content stream.seek(0) image = Image.open(stream) return image
c8967d6bce5f953d11878fb31fa02dbffbe4e283
3,656,207
import numpy def MLVR(XDATA,YDATA,xreference=0,residual=1,xlabel='',ylabel='',title='',alpha = 0.01,iters = 1000,plot=1): """Does Multivariant Linear Regression properties: XDATA = The Feature Dataframe YDATA = The Target Dataframe xreference = 1/0 -> The column index in XDATA for ploting graph xlabel = Label for X in Graph ylabel = Label for Y in Graph title = title for graph] alpha = Learning rate for model iters = the number of iteration to train the model """ XDATA.conv_type('float',change_self=True) xpure = XDATA[xreference] XDATA.normalize(change_self=True) YDATA.conv_type('float',change_self=True) ypure = YDATA.tolist[0] YDATA.normalize(change_self=True) X=XDATA y=YDATA df =DataFrame() ones = df.new(X.shape[0],1,elm=1.) X = df.concat(ones,X,axis=1) theta = DataFrame().new(1,length(X.columns),elm=0.) def computeCost(X,y,theta): dot_product = DataFrame().dot(X,theta.T) return float( ( (dot_product - y)**2 ).sum(axis=0) )/(2 * X.shape[0]) def gradientDescent(X,y,theta,iters,alpha): #cost = np.zeros(iters) cost = [] for i in range(iters): dot_product = DataFrame().dot(X,theta.T) derivative = DataFrame(dataframe = [[(alpha/X.shape[0])]]) * ( X*(dot_product - y) ).sum(axis = 0 ) theta = theta - derivative cost.append( computeCost(X, y, theta) ) #cost[i] = computeCost(X, y, theta) return theta,cost def print_equation(g): stra = "Estimated equation, y = %s"%g[0] g0 = g[0] del g[0] for c in range(length(g)): stra += " + %s*x%s"%(g[c],c+1) print(stra) def predict_li(XDATA,g): g0 = g[0] del g[0] y_pred = [] for row in range(XDATA.shape[0]): suma = 0 suma += sum(list_multiplication( g , XDATA.row(row) ) ) yres = g0 + suma y_pred.append(yres) return y_pred g,cost = gradientDescent(X,y,theta,iters,alpha) finalCost = computeCost(X,y,g) #g = g.T g = g.two2oneD() print("Thetas = %s"%g) #print("cost = ",cost) print("finalCost = %s" % finalCost) gN = g[:] print_equation(gN) gN = g[:] y_pred = predict_li(XDATA,gN) y_PRED = reference_reverse_normalize(ypure,y_pred) emin,emean,emax = minResidual(ypure , y_PRED),meanResidual(ypure , y_PRED),maxResidual(ypure , y_PRED) print("Min,Mean,Max residual = %s, %s, %s"%( emin,emean,emax ) ) print("Residual Min - Max Range = %s"%(emax-emin)) print("Residual range percentage = %s" %((emax-emin)/(max(ypure) - min(ypure))) ) print("Residual mean percentage = %s" %(emean/ArithmeticMean(ypure)) ) #-- If finalcost is lowest mean Residual or mean Error distance also will be lowest #y_pred = [g[0] + g[1]*my_data[0][c] + g[2]*my_data[1][c] for c in range(my_data.shape[0])] y_actual = YDATA.tolist[0] x = XDATA[xreference] if plot == 1: fig, ax = plt.subplots() ax.plot(numpy.arange(iters), cost, 'r') ax.set_xlabel('Iterations') ax.set_ylabel('Cost') ax.set_title('Error vs. Training Epoch') plt.show() x_a, y_a = give_time_series(xpure,y_PRED)#give_time_series(x,y_pred) plt.plot(x_a,y_a,color='r',marker='.',label='Prediction') x_a, y_a = give_time_series(xpure,ypure)#give_time_series(x,y_actual) plt.plot(x_a,y_a,color='g',marker='.',label='Real') if residual == 1: plot_error_distance(xpure,y_PRED,ypure) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.legend() plt.show() else: print('plot off') return finalCost
84509c2e8ccc9b4f52b5d90432e74a18da226b0a
3,656,208
def FilterAndTagWrapper(target, dontRemoveTag=False): """\ Returns a component that wraps a target component, tagging all traffic going into its inbox; and filtering outany traffic coming out of its outbox with the same unique id. """ if dontRemoveTag: Filter = FilterButKeepTag else: Filter = FilterTag return Graphline( TAGGER = UidTagger(), FILTER = Filter(), TARGET = target, linkages = { ("TARGET", "outbox") : ("FILTER", "inbox"), # filter data coming from target ("FILTER", "outbox") : ("self", "outbox"), ("TAGGER", "uid") : ("FILTER", "uid"), # ensure filter uses right uid ("self", "inbox") : ("TAGGER", "inbox"), # tag data going to target ("TAGGER", "outbox") : ("TARGET", "inbox"), ("self", "control") : ("TARGET", "control"), # shutdown signalling path ("TARGET", "signal") : ("TAGGER", "control"), ("TAGGER", "signal") : ("FILTER", "control"), ("FILTER", "signal") : ("self", "signal"), }, )
045cdd4f0716ba187211fbb1a4536f1f4c863bc1
3,656,209
from typing import Union def format_time(seconds: Union[int, float]) -> str: """Convert the seconds to human readable string with days, hours, minutes and seconds.""" s = int(np.rint(seconds)) if s < 60: return "{0}s".format(s) elif s < 60 * 60: return "{0}m {1:02}s".format(s // 60, s % 60) elif s < 24 * 60 * 60: return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) else: return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
f50b7d96a91e6e261169f0f0c9d71186e3c208fe
3,656,210
def run_model(df, i, name, gscv, calibrate=True): """Given customercode values in dict_folds, 1. create balanced dataset 2. split into train, test sets 3. run grid search 4. get probability scores 5. calibrate as directed 6. find optimal cutoff from precision-recall 7. return predictions, data, scores """ df_undersampled = pd.concat([ df.query(target==0).sample(frac=0.3, random_state=0), df.query("target==1") ]) X = df_undersampled.drop("target", axis=1).copy() y = df_undersampled.loc[:, "target"].copy() X_tr, X_te, y_tr, y_te = train_test_split(X, y, train_size=0.7, stratify=y) model = gscv.fit(X_tr, y_tr) # Probabilities y_scores = model.predict_proba(X_te)[:, 1] if calibrate: sigmoid = CalibratedClassifierCV(model, cv=2, method="sigmoid") sigmoid.fit(X_tr, y_tr) y_probs = sigmoid.predict_proba(X_te)[:, 1] else: y_probs = np.array(y_scores) # Cutoff p, r, t = precision_recall_curve(y_te, y_probs, pos_label=1) df_pr = (pd.DataFrame(data=zip(p, r, t), columns=["precision", "recall", "threshold"]) .set_index("threshold")) cutoff = (pd.Series(data=np.abs(df_pr["precision"] - df_pr["recall"]), index=df_pr.index) .idxmin() .round(2)) # Predictions y_pred = (y_probs >= cutoff).astype(int) dict_data = { "X_tr": X_tr, "X_te": X_te, "y_tr": y_tr, "y_te": y_te, "y_scores": y_scores, "y_probs": y_probs, "y_pred": y_pred, } dict_scores = { "precision": precision_score(y_te, y_pred), "recall": recall_score(y_te, y_pred), } payload = { "name": name, "model": model, "data": dict_data, "scores": dict_scores } return payload
0f5513b7e4117580dd297ee5e9b7a88afc691b3a
3,656,211
import os from functools import reduce def prepare_runkos(main_dir, discard_file=None): """Identify the positions with variations between 0.2 to 0.8 in the training population and calculate the mean and std for the variation. """ THRESHOLD_DROPNA = 32 # more than 40 columns should have a value not a nan. file_count = 0 list_of_dfs = [] list_dfs_var = [] file_csv_list = [] for file_csv in os.listdir(main_dir): # Ignore the file that is given as the validation dataset if discard_file and file_csv == discard_file: continue file_path = "%s/%s" % (main_dir, file_csv) dataframe = [] dataframe = pd.read_csv(file_path) # Ignore Insertions by getting only the first entity of the nucleotide at each row dataframe['mut'] = dataframe.apply(lambda row: row['mut'][0], axis=1) # Ignore insertion and deletions by merging the rows with equal pos and mut dataframe = dataframe.groupby(['pos', 'mut']).sum().reset_index() dataframe_pivot = [] dataframe_pivot = pd.pivot_table(dataframe, index='pos', columns='mut', values='freq', aggfunc='sum') # Rename columns dataframe_pivot.rename(lambda x: "%s_%s" % (x, file_count), axis='columns', inplace=True) list_dfs_var.append(dataframe_pivot.copy()) file_count += 1 file_csv_list.append(file_csv) df_merged = reduce(lambda x, y: pd.merge(x, y, how='outer', right_index=True, left_index=True), list_dfs_var) df_merged_filtered = df_merged.dropna(thresh=THRESHOLD_DROPNA) df_mean_std = calc_mean_std_population(df_merged_filtered, discard_file) return df_mean_std, list_of_dfs
91ec730e31ea90d47239c00a00d444fbb99e3f69
3,656,212
import re def extract_date(db): """Extract Release Date from metadata and convert it into YYYY MM format""" date_pattern = 'releaseDate\":(\d{9,10})' def format_date(x): """Takes epoch time as argument and returns date in YYYY MM format""" date = re.search(date_pattern, x) if date: val = pd.to_datetime(date.group(1), unit='s') val = val.strftime('%Y %b') return val else: return 'No Date' db['date'] = db['meta'].apply(format_date) db = db.drop('meta', axis=1) return db
9d4d8c19846a49967f9e3deb3be8808df9d69812
3,656,213
def split_test_image(aa): """ Separate image created by mk_test_image into x,y components """ if aa.dtype.kind == 'f': y = np.round((aa % 1)*1024) x = np.floor(aa) else: nshift = (aa.dtype.itemsize*8)//2 mask = (1 << nshift) - 1 y = aa & mask x = aa >> nshift return x, y
4a06a0c0fb80dfcb8a58d9509971bfdc0b026d27
3,656,214
def sphdist(ra1, dec1, ra2, dec2): """measures the spherical distance between 2 points Inputs: (ra1, dec1) in degrees (ra2, dec2) in degrees Outputs: returns a distance in degrees """ dec1_r = deg2rad(dec1) dec2_r = deg2rad(dec2) return 2. * rad2deg( arcsin( sqrt( ( sin((dec1_r - dec2_r) / 2)) ** 2 + cos(dec1_r) * cos(dec2_r) * ( sin((deg2rad(ra1 - ra2)) / 2)) ** 2)))
517f7c67370c6e065c8860b2be59470a2801567d
3,656,215
def parse_kwargs(kwargs, a_list): """ extract values from kwargs or set default """ if a_list is not None: num_colors = len(a_list) default_colors = generate_colors(num_colors) else: num_colors = 1 default_colors = 'k' logscale = kwargs.get('logscale', [False, False]) Range = kwargs.get('Range', [[], []]) colors = kwargs.get('colors', default_colors) figure_name = kwargs.get('figure_name', None) show = kwargs.get('show', True) dist = kwargs.get('dist', None) values = [logscale, Range, colors, figure_name, show, dist] return values
3f1006a8f638b3304ec6aa975346be1a4b6e8189
3,656,216
def talib_WCLPRICE(DataFrame): """WCLPRICE - Weighted Close Price 加权收盘价""" res = talib.WCLPRICE(DataFrame.high.values, DataFrame.low.values, DataFrame.close.values) return pd.DataFrame({'WCLPRICE': res}, index=DataFrame.index)
6e2d4530fcb33d64b9fbe8a3f0a8a5d64c8f8107
3,656,217
def is_pi_parallel(ring1_center: np.ndarray, ring1_normal: np.ndarray, ring2_center: np.ndarray, ring2_normal: np.ndarray, dist_cutoff: float = 8.0, angle_cutoff: float = 30.0) -> bool: """Check if two aromatic rings form a parallel pi-pi contact. Parameters ---------- ring1_center, ring2_center: np.ndarray Positions of centers of the two rings. Can be computed with the compute_ring_center function. ring1_normal, ring2_normal: np.ndarray Normals of the two rings. Can be computed with the compute_ring_normal function. dist_cutoff: float Distance cutoff. Max allowed distance between the ring center (Angstroms). angle_cutoff: float Angle cutoff. Max allowed deviation from the ideal (0deg) angle between the rings (in degrees). Returns ------- bool True if two aromatic rings form a parallel pi-pi. """ dist = np.linalg.norm(ring1_center - ring2_center) angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and dist < dist_cutoff): return True return False
def4eaba9e25e9034fce7559041e5142f82fc3c8
3,656,218
def _fetch_alleninf_coords(*args, **kwargs): """ Gets updated MNI coordinates for AHBA samples, as shipped with `alleninf` Returns ------- coords : :class:`pandas.DataFrame` Updated MNI coordinates for all AHBA samples References ---------- Updated MNI coordinates taken from https://github.com/chrisfilo/alleninf, which is licensed under the BSD-3 (reproduced here): Copyright (c) 2018, Krzysztof Gorgolewski All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ coords = resource_filename('abagen', 'data/corrected_mni_coordinates.csv') coords = pd.read_csv(coords).rename(dict(corrected_mni_x='mni_x', corrected_mni_y='mni_y', corrected_mni_z='mni_z'), axis=1) return coords.set_index('well_id')
dae30a0f5404151a3e7d82f129ff36cfec14caa0
3,656,219
from typing import List from typing import Union from typing import DefaultDict from typing import Dict from typing import Tuple def get_classes_for_mol_network(can: canopus.Canopus, hierarchy: List[str], npc_hierarchy: List[str], class_p_cutoff: float, max_class_depth: Union[int, None]) -> \ DefaultDict[str, List[Union[str, Dict[str, List[Tuple[ Union[str, float]]]]]]]: """Loop through mol network and gather CF and NPC classes :param can: Canopus object of canopus results with gnps mol network data :param hierarchy: the CF class level names to be included in output in order of hierarchy :param npc_hierarchy: the NPC class level names to be included in output in order of hierarchy :param class_p_cutoff: probability cutoff for including a class :param max_class_depth: max class depth for finding CF class :return: classes output - dict of lists of {componentindex: [cluster index, formula, {CF_level: [(class, prob)]}, {NPC_level: [(class, prob)]}]} CF classes are found by looking for the class at deepest depth (or max_class_depth) and then ordering these deepest classes based on priority. Then, the classes are traced back to higher hierarchy and sorted in output, again based on priority of deepest classes. """ results = defaultdict(list) for node_id, node in can.gnps.nodes.items(): # get canopus compound obj compound = can.sirius.compounds.get(node_id) if compound: cf_classes_dict = get_cf_classes(can, compound, hierarchy, class_p_cutoff, max_class_depth) npc_classes_dict = get_npc_classes(can, compound, npc_hierarchy) formula = compound.formula comp_id = node.componentId if comp_id == '-1': # handling of singleton -1 components comp_id += f"_{node_id}" results[comp_id].append( [node_id, formula, cf_classes_dict, npc_classes_dict]) return results
6808a751ed1873b7fb573bb3ecc55586d94590b1
3,656,220
def list_books(books): """Creates a string that, on each line, informs about a book.""" return '\n'.join([f'+ {book.name}: {book.renew_count}: {book.return_date}' for book in books])
fce770a39def7f40ed12820a578b4e327df7da43
3,656,221
def getHSPLNamespace(): """ Retrieve the namespace of the HSPL XML. @return: The namespace of the HSPL XML. """ return HSPL_NAMESPACE
481db5781ff9d0b4a4e4702cccafb088379e38a4
3,656,222
def add_lead_zero(num,digit,IgnoreDataManipulation=False,RaiseDataManipulationError=False,DigitMustAtLeastTwo=False): """Add leading the letters '0' to inputted integer 'num' according to defined 'digit' and return as string. Required keyword arguments: - num (int) : Integer (can be positive, zero, or negative) - digit (int) : How much digits of number should be in returned string. Optional keyword arguments: - IgnoreDataManipulation (bool) : Avoid raising acceptable data manipulation warning. - RaiseDataManipulationError (bool) : Raise every data manipulation warning as error exception. (IgnoreDataManipulation must be False.) - DigitMustAtLeastTwo (bool) : Raise warning or error if defined digit is less than 2. Data manipulation error: - Digit should be at least 2. (Ignore by default) - Amount of defined digits is less than digits of number in inputted integer. """ if type(num) is not int or type(digit) is not int: raise TypeError('parameters \'num\', \'digit\' should be integer.') if type(IgnoreDataManipulation) is not bool or type(RaiseDataManipulationError) is not bool or type(DigitMustAtLeastTwo) is not bool: raise TypeError('parameters \'IgnoreDataManipulation\', \'RaiseDataManipulationError\', and \'DigitMustAtLeastTwo\' should be boolean.') if IgnoreDataManipulation: RaiseDataManipulationError=False if digit<1: raise ValueError('Digit should be at least one.') if digit<2 and DigitMustAtLeastTwo: msg='Amount of digits should be at least 2.' if not IgnoreDataManipulation and not RaiseDataManipulationError: alternative_warn(msg,ValueWarning,'add_lead_zero') if RaiseDataManipulationError: raise ValueError(msg) # Reuse variable 'digit' if num>=0: num=str(num) IsNegative=False else: num=str(abs(num)) IsNegative=True digit=digit-len(num) if digit>0: for x in range(0,digit): # Reuse variable 'num' num='0'+num if not IsNegative: return num else: return '-'+num elif digit==0: if not IsNegative: return num else: return '-'+num else: msg='Defined digits amount is less than digits of number in inputted integer. It possibly means that some of used data has been manipulated incorrectly.' if not IgnoreDataManipulation and not RaiseDataManipulationError: alternative_warn(msg,ValueWarning,'add_lead_zero') if RaiseDataManipulationError: raise ValueError(msg) if not IsNegative: return num else: return '-'+num
ae3cffa2470a2acf5900a41b342366fb7c6e92da
3,656,223
def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): """ Attaches servers to a monitoring policy. """ try: attach_servers = [] for _server_id in servers: server_id = get_server(oneandone_conn, _server_id) attach_server = oneandone.client.AttachServer( server_id=server_id ) attach_servers.append(attach_server) if module.check_mode: if attach_servers: return True return False monitoring_policy = oneandone_conn.attach_monitoring_policy_server( monitoring_policy_id=monitoring_policy_id, servers=attach_servers) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex))
bf096804ec6be47fa4e41c9f4e50d51313f8ef3f
3,656,224
from typing import Union def get_generator_contingency_fcas_availability_term_2(data, trader_id, trade_type, intervention) -> Union[float, None]: """Get generator contingency FCAS term 2""" # Parameters lower_slope_coefficient = get_lower_slope_coefficient(data, trader_id, trade_type) if lower_slope_coefficient == 0: return None enablement_min = lookup.get_trader_quantity_band_attribute(data, trader_id, trade_type, '@EnablementMin', float) reg_target = lookup.get_trader_solution_attribute(data, trader_id, '@L5RegTarget', float, intervention) energy_target = lookup.get_trader_solution_attribute(data, trader_id, '@EnergyTarget', float, intervention) return (energy_target - reg_target - enablement_min) / lower_slope_coefficient
8ec9c76c1941713511f8b472c4649954fd725d58
3,656,225
def format_pvalue(p_value, alpha=0.05, include_equal=True): """ If p-value is lower than 0.05, change it to "<0.05", otherwise, round it to two decimals :param p_val: input p-value as a float :param alpha: significance level :param include_equal: include equal sign ('=') to pvalue (e.g., '=0.06') or not (e.g., '0.06') :return: p_val: processed p-value (replaced by "<0.05" or rounded to two decimals) as a str """ if p_value < alpha: p_value = "<" + str(alpha) else: if include_equal: p_value = '=' + str(round(p_value, 3)) else: p_value = str(round(p_value, 3)) return p_value
aa6506b14b68746f4fa58d951f246321e8b5a627
3,656,226
def _compute_y(x, ll): """Computes y.""" return np.sqrt(1 - ll ** 2 * (1 - x ** 2))
773a0695676e43984bb0ca8c1d8af2e0bc3bb4fd
3,656,227
def create_axis(length=1.0, use_neg=True): """ Create axis. :param length: :param use_neg: If False, Only defined in Positive planes :return: Axis object """ # Defining the location and colors of each vertex of the shape vertices = [ # positions colors -length * use_neg, 0.0, 0.0, 1.0, 0.0, 0.0, length, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -length * use_neg, 0.0, 0.0, 1.0, 0.0, 0.0, length, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -length * use_neg, 0.0, 0.0, 1.0, 0.0, 0.0, length, 0.0, 0.0, 1.0] # Defining connections among vertices # We have a triangle every 3 indices specified indices = [ 0, 1, 2, 3, 4, 5] return Shape(vertices, indices)
fe9c9d49de786147a382e1fda1e6ab92d26a1fe9
3,656,228
def genmatrix(list, combinfunc, symmetric=False, diagonal=None): """ Takes a list and generates a 2D-matrix using the supplied combination function to calculate the values. PARAMETERS list - the list of items combinfunc - the function that is used to calculate teh value in a cell. It has to cope with two arguments. symmetric - Whether it will be a symmetric matrix along the diagonal. For example, it the list contains integers, and the combination function is abs(x-y), then the matrix will be symmetric. Default: False diagonal - The value to be put into the diagonal. For some functions, the diagonal will stay constant. An example could be the function "x-y". Then each diagonal cell will be "0". If this value is set to None, then the diagonal will be calculated. Default: None """ matrix = [] row_index = 0 for item in list: row = [] col_index = 0 for item2 in list: if diagonal is not None and col_index == row_index: # if this is a cell on the diagonal row.append(diagonal) elif symmetric and col_index < row_index: # if the matrix is symmetric and we are "in the lower left triangle" row.append( matrix[col_index][row_index] ) else: # if this cell is not on the diagonal row.append(combinfunc(item, item2)) col_index += 1 matrix.append(row) row_index += 1 return matrix
b7d8ebc916f57621a20c371139162cb0504470cd
3,656,229
def get_all_raw_codes_by_area(area: EmisPermArea) -> list: """ Returns a list of code names for all permissions within a logical area, for all possible modes. """ return get_raw_codes_by_area( area, EmisPermMode.CREATE | EmisPermMode.UPDATE | EmisPermMode.VIEW )
d5887af92ba5fb7c373078dca84a8f9e74a089dc
3,656,230
def cartesian_pair(df1, df2, **kwargs): """ Make a cross join (cartesian product) between two dataframes by using a constant temporary key. Also sets a MultiIndex which is the cartesian product of the indices of the input dataframes. See: https://github.com/pydata/pandas/issues/5401 :param df1 dataframe 1 :param df1 dataframe 2 :param kwargs keyword arguments that will be passed to pd.merge() :return cross join of df1 and df2 """ df1['_tmpkey'] = 1 df2['_tmpkey'] = 1 res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1) df1.drop('_tmpkey', axis=1, inplace=True) df2.drop('_tmpkey', axis=1, inplace=True) return res
e4ec1526f7a7906c5349bff20f5d4f83244c8878
3,656,231
def get_cases_by_landkreise_3daysbefore(): """ Return all Hospitals """ hospitals_aggregated = db.session.query(CasesPerLandkreis3DaysBefore).all() return jsonify(__as_feature_collection(hospitals_aggregated)), 200
0442f66ff78549617dd582bc0d1529c0041e7edb
3,656,232
def shape_list(x, out_type=tf.int32): """Deal with dynamic shape in tensorflow cleanly.""" static = x.shape.as_list() dynamic = tf.shape(x, out_type=out_type) return [dynamic[i] if s is None else s for i, s in enumerate(static)]
80eea7ccdd4ebfa5a3318fb6070ec996df5b4972
3,656,233
import ast import os import re import subprocess # noqa import sys def coerce_file(fn): """Coerce content of given file to something useful for setup(), turn : .py into mock object with description and version fields, .md into rst text. Remove images with "nopypi" alt text along the way. :url: https://github.com/Kraymer/setupgoon """ text = open(os.path.join(os.path.dirname(__file__), fn)).read() if fn.endswith('.py'): # extract version, docstring etc out of python file mock = type('mock', (object,), {})() for attr in ('version', 'author', 'author_email', 'license'): regex = r'^__%s__\s*=\s*[\'"]([^\'"]*)[\'"]$' % attr m = re.search(regex, text, re.MULTILINE) setattr(mock, attr, m.group(1) if m else None) mock.docstring = ast.get_docstring(ast.parse(text)) return mock if fn.endswith('md') and 'upload' in sys.argv: # convert md to rest on pypi package upload text = '\n'.join([l for l in text.split('\n') if '![nopypi' not in l]) p = subprocess.Popen(['pandoc', '-t', 'rst'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) text, stderr = p.communicate(text) return text
4fdd44f476186c62d704bcf549534f154fb75c35
3,656,234
import json from typing import cast def load_config( config_file: str, print_warnings: bool = False ) -> InfestorConfiguration: """ Loads an infestor configuration from file and validates it. """ try: with open(config_file, "r") as ifp: raw_config = json.load(ifp) except: raise ConfigurationError(f"Could not read configuration: {config_file}") configuration, warnings, errors = parse_config(raw_config) if print_warnings: warning_items = "\n".join([f"- {warning}" for warning in warnings]) if warnings: print( f"Warnings when loading configuration file ({config_file}):\n{warning_items}" ) if errors: error_items = "\n".join([f"- {error}" for error in errors]) error_message = ( f"Errors loading configuration file ({config_file}):\n{error_items}" ) raise ConfigurationError(error_message) return cast(InfestorConfiguration, configuration)
b1d4a1385bb8855530f7043ddff5cc8d2f48be79
3,656,235
def what_do_you_mean_response(ctx: Context) -> REPLY_TYPE: """Generate response when we are asked about subject of the dialog Returns: template phrase based on previous skill or intent or topic confidence (can be 0.0, DONTKNOW_CONF, UNIVERSAL_RESPONSE_CONF, SUPER_CONF) human attributes (empty), bot attributes (empty), attributes (empty or MUST_CONTINUE) """ dialog = ctx.misc["agent"]["dialog"] attr = {} try: what_do_you_mean_intent = get_what_do_you_mean_intent(dialog["human_utterances"][-1]) if not (what_we_talk_about(dialog["human_utterances"][-1]) or what_do_you_mean_intent): reply, confidence = "", 0 elif len(dialog.get("human_utterances", [])) < 2: reply, confidence = DONTKNOW_PHRASE, DONTKNOW_CONF else: reply = get_bot_based_on_skill_reply(dialog.get("bot_utterances", [])) if reply is None: reply = get_bot_based_on_topic_or_intent_reply( dialog["human_utterances"][-2] if len(dialog["human_utterances"]) > 1 else [] ) if reply is None: reply, confidence = DONTKNOW_PHRASE, DONTKNOW_CONF else: if what_we_talk_about(dialog["human_utterances"][-1]): confidence = SUPER_CONF attr = {"can_continue": MUST_CONTINUE} else: # what_do_you_mean_intent but not regexp confidence = UNIVERSAL_RESPONSE_CONF except Exception as e: logger.exception("exception in grounding skill") logger.info(str(e)) sentry_sdk.capture_exception(e) reply = "" confidence = 0 return reply, confidence, {}, {}, attr
694b693d5ed1595781fdfe975f716cca4ff2dcd2
3,656,236
def procrustes(X,Y): """Finds the optimal affine transformation T to minimize ||x-Ty||_F Parameters ---------- x - reference, shape(x)=nxd where n is number of samples and d is dimension y - to be aligned, shape(x)=nxd Returns ------- Z - the transformed y TODO: return T - the transformation TODO: make scaling, reflection, centering optional TODO: allow different dimension """ assert(X.shape == Y.shape) # Center muX = np.mean(X,axis=0) muY = np.mean(Y,axis=0) X0 = X-muX Y0 = Y-muY # Scale varX = np.var(X0,axis=0) varY = np.var(Y0,axis=0) #Rotate l,d,m = linalg.svd(X0.T.dot(Y0)) Z = np.sqrt(np.sum(varX)/np.sum(varY))*Y0.dot(m).dot(l.T)+muX return Z
eae8b396c190286a426e3eed1726c4c0f75f2c49
3,656,237
import warnings def get_market_tops(symbols=None, **kwargs): """ MOVED to iexfinance.iexdata.get_tops """ warnings.warn(WNG_MSG % ("get_market_tops", "iexdata.get_tops")) return TOPS(symbols, **kwargs).fetch()
4c94e35f447762a3d3ed9c076708450f1d1f200b
3,656,238
def get_query_results(query_execution_id): """Retrieve result set from Athena query""" athena_client = SESSION.client('athena') result_set = [] query = athena_client.get_query_execution(QueryExecutionId=query_execution_id) logger.debug(query) query_state = query['QueryExecution']['Status']['State'] logger.debug(query_state) if query_state in ['FAILED', 'CANCELLED']: raise QueryFailed("Query failed to execute") if query_state in ['QUEUED', 'RUNNING']: raise QueryStillRunning("Query still running") try: results = athena_client.get_query_results(QueryExecutionId=query_execution_id) logger.debug(results) for result in results["ResultSet"]["Rows"][1:]: result_set.append(result["Data"]) logger.debug(result_set) except ClientError as cle: logger.debug(cle) if not result_set: raise NoResults("Athena ResultSet {result_set}".format(result_set=result_set)) return result_set
c0288ceca458a6805c5f8a4e5da5e8e7a1f36b69
3,656,239
from pathlib import Path def reduce_output_path(path=None, pdb_name=None): """Defines location of Reduce output files relative to input files.""" if not path: if not pdb_name: raise NameError( "Cannot save an output for a temporary file without a PDB" "code specified") pdb_name = pdb_name.lower() output_path = Path(global_settings['structural_database']['path'], pdb_name[1:3].lower(), pdb_name[:4].lower(), 'reduce', pdb_name + '_reduced.mmol') else: input_path = Path(path) if len(input_path.parents) > 1: output_path = input_path.parents[1] / 'reduce' / \ (input_path.stem + '_reduced' + input_path.suffix) else: output_path = input_path.parent / \ (input_path.stem + '_reduced' + input_path.suffix) return output_path
0add37e0d5b71998112045af34aba4c0a17310f9
3,656,240
import os import requests def get_recommended_meals(): """[summary] Returns: [type]: [description] """ url = "https://themealdb.p.rapidapi.com/randomselection.php" headers = { "x-rapidapi-host": "themealdb.p.rapidapi.com", "x-rapidapi-key": os.getenv("RAPIDAPI"), } response = requests.request("GET", url, headers=headers).json() list_of_food = [] list_of_image = [] for food in response["meals"]: list_of_food.append(food["strMeal"]) for image in response["meals"]: list_of_image.append(image["strMealThumb"]) return list_of_food, list_of_image
6d7376e94f4bad9767d81537b8ddb4808d71ca01
3,656,241
def link_discord(request: HttpRequest): """Page to prompt user to link their discord account to their user account.""" skip_confirmation = request.GET.get("skip-confirm") if skip_confirmation and skip_confirmation == "true": return redirect("discord_register") return render(request, "link_discord.html")
05aba45b508e5a23cf62f5791a04e2525bbbbae0
3,656,242
import six import functools def rpc(f=None, **kwargs): """Marks a method as RPC.""" if f is not None: if isinstance(f, six.string_types): if 'name' in kwargs: raise ValueError('name option duplicated') kwargs['name'] = f else: return rpc(**kwargs)(f) return functools.partial(_rpc, **kwargs)
37ac21bd800bb202a78542636e9249ac3519c54e
3,656,243
def fig_fits_h(fig, y): """Lista ut of figuren *fig* far plats pa hojden pa skarmen vid position *x*, *y* """ _, h = _get_max_width() win_h = fig.window.winfo_height() result = (y + win_h) < h return result
4e3254d7a4fad2d8de816b36aacbfd069378c1fc
3,656,244
import os def find_executable(name): """ Find executable by ``name`` by inspecting PATH environment variable, return ``None`` if nothing found. """ for dir in os.environ.get('PATH', '').split(os.pathsep): if not dir: continue fn = os.path.abspath(os.path.join(dir, name)) if os.path.exists(fn): return os.path.abspath(fn)
dd4b10e4b043715d211bb9be2d2c78d0218f6a86
3,656,245
def index(): """ Handler for the root url. Loads all movies and renders the first page. """ if path_set(): load_movies() return flask.render_template('main.html')
8f5c3295175cfd45b3604d523ac6b7de086702e9
3,656,246
def __hitScore__(srcMZ, targetMZ, srcRT, targetRT, parameters): # type: (float, float, float, float, LFParameters) -> float """Return the hit score of the target frame for the given source frame. Keyword Arguments: srcMZ -- source m/z targetMZ -- target m/z srcRT -- source retention time targetRT -- target retention time parameters -- LipidFinder's Amalgamator parameters instance """ mzDelta = mz_delta(srcMZ, parameters['mzFixedError'], parameters['mzPPMError']) mzDiff = abs(srcMZ - targetMZ) rtDelta = rt_delta(parameters['maxRTDiffAdjFrame']) rtDiff = abs(srcRT - targetRT) return sqrt(min(mzDiff / mzDelta, 1.0) ** 2 \ + min(rtDiff / rtDelta, 1.0) ** 2)
1b35cfbb2f1e028ccbb53d4fed16459d5a469ac1
3,656,247
def compute_propeller_nonuniform_freestream(prop, upstream_wake,conditions): """ Computes the inflow velocities in the frame of the rotating propeller Inputs: prop. SUAVE propeller tip_radius - propeller radius [m] rotation - propeller rotation direction [-] thrust_angle - thrust angle of prop [rad] number_radial_stations - number of propeller radial stations [-] number_azimuthal_stations - number of propeller azimuthal stations [-] upstream_wake. u_velocities - Streamwise velocities from upstream wake v_velocities - Spanwise velocities from upstream wake w_velocities - Downwash velocities from upstream wake VD - Vortex distribution from upstream wake conditions. frames Outputs: Va Axial velocities at propeller [m/s] Vt Tangential velocities at propeller [m/s] Vr Radial velocities at propeller [m/s] """ # unpack propeller parameters Vv = conditions.frames.inertial.velocity_vector R = prop.tip_radius rotation = prop.rotation c = prop.chord_distribution Na = prop.number_azimuthal_stations Nr = len(c) ua_wing = upstream_wake.u_velocities uv_wing = upstream_wake.v_velocities uw_wing = upstream_wake.w_velocities VD = upstream_wake.VD # Velocity in the Body frame T_body2inertial = conditions.frames.body.transform_to_inertial T_inertial2body = orientation_transpose(T_body2inertial) V_body = orientation_product(T_inertial2body,Vv) body2thrust = prop.body_to_prop_vel() T_body2thrust = orientation_transpose(np.ones_like(T_body2inertial[:])*body2thrust) V_thrust = orientation_product(T_body2thrust,V_body) # azimuth distribution psi = np.linspace(0,2*np.pi,Na+1)[:-1] psi_2d = np.tile(np.atleast_2d(psi),(Nr,1)) # 2 dimensiona radial distribution non dimensionalized chi = prop.radius_distribution /R # Reframe the wing induced velocities: y_center = prop.origin[0][1] # New points to interpolate data: (corresponding to r,phi locations on propeller disc) points = np.array([[VD.YC[i], VD.ZC[i]] for i in range(len(VD.YC))]) ycoords = np.reshape((R*chi*np.cos(psi_2d).T).T,(Nr*Na,)) zcoords = prop.origin[0][2] + np.reshape((R*chi*np.sin(psi_2d).T).T,(Nr*Na,)) xi = np.array([[y_center+ycoords[i],zcoords[i]] for i in range(len(ycoords))]) ua_w = sp.interpolate.griddata(points,ua_wing,xi,method='linear') uv_w = sp.interpolate.griddata(points,uv_wing,xi,method='linear') uw_w = sp.interpolate.griddata(points,uw_wing,xi,method='linear') ua_wing = np.reshape(ua_w,(Nr,Na)) uw_wing = np.reshape(uw_w,(Nr,Na)) uv_wing = np.reshape(uv_w,(Nr,Na)) if rotation == [1]: Vt_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.cos(psi_2d) + np.array(uv_wing)*np.sin(psi_2d) ) # velocity tangential to the disk plane, positive toward the trailing edge eqn 6.34 pg 165 Vr_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.sin(psi_2d) - np.array(uv_wing)*np.cos(psi_2d) ) # radial velocity , positive outward Va_2d = V_thrust[:,0]* np.array(ua_wing) # velocity perpendicular to the disk plane, positive downward eqn 6.36 pg 166 else: Vt_2d = V_thrust[:,0]*( np.array(uw_wing)*np.cos(psi_2d) - np.array(uv_wing)*np.sin(psi_2d) ) # velocity tangential to the disk plane, positive toward the trailing edge Vr_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.sin(psi_2d) - np.array(uv_wing)*np.cos(psi_2d) ) # radial velocity , positive outward Va_2d = V_thrust[:,0]* np.array(ua_wing) # velocity perpendicular to the disk plane, positive downward # Append velocities to propeller prop.tangential_velocities_2d = Vt_2d prop.radial_velocities_2d = Vr_2d prop.axial_velocities_2d = Va_2d return prop
c7dc48066356e4d79e512812976a3e1a80b16749
3,656,248
def _expect_const(obj): """Return a Constant, or raise TypeError.""" if obj in (0, "0"): return ZERO if obj in (1, "1"): return ONE if obj in ("x", "X"): return LOGICAL if obj == "?": return ILLOGICAL if isinstance(obj, Constant): return obj raise TypeError("Expected obj to be a Constant")
33aff48ff285b89f36d28a99148afeea97302a05
3,656,249
def _eval_input_receiver_fn(tf_transform_output, schema, label_key): """Build everything needed for the tf-model-analysis to run the model. Args: tf_transform_output: A TFTransformOutput. schema: the schema of the input data. label_key: the name of the transformed label Returns: EvalInputReceiver function, which contains: - Tensorflow graph which parses raw untransformed features, applies the tf-transform preprocessing operators. - Set of raw, untransformed features. - Label against which predictions will be compared. """ # Notice that the inputs are raw features, not transformed features here. raw_feature_spec = _get_raw_feature_spec(schema) raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn( raw_feature_spec, default_batch_size=None) serving_input_receiver = raw_input_fn() features = serving_input_receiver.features.copy() transformed_features = tf_transform_output.transform_raw_features(features) # NOTE: Model is driven by transformed features (since training works on the # materialized output of TFT, but slicing will happen on raw features. features.update(transformed_features) return tfma.export.EvalInputReceiver( features=features, receiver_tensors=serving_input_receiver.receiver_tensors, labels=transformed_features[label_key])
60f0a6cf9a87894f7e37495b8b4e9f7bd9e85e22
3,656,250
def get_lpar_names(adp): """Get a list of the LPAR names. :param adp: A pypowervm.adapter.Adapter instance for the PowerVM API. :return: A list of string names of the PowerVM Logical Partitions. """ return [x.name for x in pvm_lpar.LPAR.search(adp, is_mgmt_partition=False)]
4009ed95b23ba6a35cbe38e1354f109e29fb7fc7
3,656,251
def init_mlp(in_dim, out_dim, hidden_dim, num_layers, non_linearity=None, bias=True): """Initializes a MultilayerPerceptron. Args: in_dim: int out_dim: int hidden_dim: int num_layers: int non_linearity: differentiable function (tanh by default) bias (bool) Returns: a MultilayerPerceptron with the architecture x -> Linear(in_dim, hidden_dim) -> non_linearity -> ... Linear(hidden_dim, hidden_dim) -> non_linearity -> Linear(hidden_dim, out_dim) -> y where num_layers = 0 corresponds to x -> Linear(in_dim, out_dim) -> y """ if non_linearity is None: non_linearity = nn.Tanh() dims = [in_dim] + [hidden_dim for _ in range(num_layers)] + [out_dim] return MultilayerPerceptron(dims, non_linearity, bias)
a2d5b8535af5d363df459cf0d2138b29b2356f30
3,656,252
def c_grad_curry_regularized(data, target): """A closure constructor with regularization term for functional.""" def loss(layerweight): model = (lambda x: layerweight @ x.t()) reg = 1e-3 * (layerweight**2).sum()/2 return criterion(model(data).t(), target) + reg return loss
4571c8849bb1643b4d27bad7d2d0ed88ed23c2fa
3,656,253
from typing import Counter def convert_examples_to_features_yake(examples, label_list, max_seq_length, tokenizer, output_mode, cls_token_at_end=False, pad_on_left=False, cls_token='[CLS]', sep_token='[SEP]', noi_token='[NOI]', pad_token=0, sequence_a_segment_id=0, cls_token_segment_id=1, pad_token_segment_id=0, mask_padding_with_zero=True, args=None): """ Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ noi_token_id = tokenizer.convert_tokens_to_ids(noi_token) num_exm = len(examples) idf_dict = {} for (ex_index, example) in enumerate(examples): if ex_index % 100000 == 0: logger.info("Writing idf example %d of %d" % (ex_index, len(examples))) if args.model_name_or_path == 'bert-base-uncased' or args.model_name_or_path == 'bert-large-uncased': tokens_a = tokenizer.tokenize(example.text_a) elif args.model_name_or_path == 'bert-base-cased': tokens_a = example.text_a.split() tokens = ["[CLS]"] + tokens_a + ["[SEP]"] for t in tokens: idf_dict[t] = idf_dict.get(t, 0) + 1 for t in idf_dict.keys(): idf_dict[t] = idf_dict[t] / num_exm stop_words = set(stopwords.words('english') ) for t in stop_words: if t in idf_dict: idf_dict[t] *= 0.001 inp = " ".join(idf_dict.keys()) spacy_nlp = spacy.load('en_core_web_sm') inp_results = [(token.text, token.tag_) for token in spacy_nlp(inp)] allowed_tags = ['VB','NN','JJ','RB'] # UH for "yes", "no", etc. ignored_words = ['was','were','be','is','are','am',"'s","'re"] + ['do','did','done','does'] # verb of no info for word, tag in inp_results: if word in idf_dict.keys(): if len(tag)>=2 and tag[:2] in allowed_tags and (word not in ignored_words): if tag[:2] in ['VB','NN']: idf_dict[word] *= 4 else: idf_dict[word] *= 2 features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.tokenize(example.text_a) # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = tokens_a + [sep_token] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens = tokens + [cls_token] segment_ids = segment_ids + [cls_token_segment_id] else: tokens = [cls_token] + tokens segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) tf = Counter(tokens) tokens_len = float(len(tokens)) # score: higher will be more likely to be keeped prob_list = np.array([idf_dict[t] * tf[t] / tokens_len for t in tokens]) # prob_list = np.array([idf_dict[t] for t in tokens]) # add yake key_word_len = 100 kw_extractor = yake.KeywordExtractor() keywords = kw_extractor.extract_keywords(" ".join(tokens)) key_word_len = len(keywords) for i, t in enumerate(tokens): if t in keywords: prob_list[i] *= 100 # Repeat words for i, t in enumerate(tokens): if t in tokens[:i]: prob_list[i] /= 10 prob_list = max(prob_list) - prob_list N = len(tokens) lm_label_ids = [noi_token_id] * max_seq_length # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids else: input_ids = input_ids + ([pad_token] * padding_length) input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("lm_label_ids: %s" % " ".join([str(x) for x in lm_label_ids])) # logger.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids)) while N > 1: mask_pos = np.array(house_robber(prob_list)) unmask_pos = np.setdiff1d(np.arange(N), mask_pos) tokens = [t for i,t in enumerate(tokens) if i in unmask_pos] N = len(tokens) # mask_lm_label_ids = input_ids lm_label_ids = [pad_token] * max_seq_length j=0 i = 1 while i < len(prob_list): if i in mask_pos: lm_label_ids[j] = input_ids[i] i += 2 else: lm_label_ids[j] = noi_token_id i += 1 j += 1 # print(i,j) while j < len(unmask_pos): lm_label_ids[j] = noi_token_id # no input for last token of new sequence j+= 1 prob_list = prob_list[unmask_pos] # Zero-pad up to the sequence length. padding_length = max_seq_length - len(unmask_pos) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) segment_ids = [sequence_a_segment_id] * len(tokens) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids else: input_ids = input_ids + ([pad_token] * padding_length) input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("lm_label_ids: %s" % " ".join([str(x) for x in lm_label_ids])) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids)) return features
4af89357339a2a63ff765f9da8660ca3895ba8b5
3,656,254
def sq_to_hr(bins, rho, S_k, k, axis=1): """ Takes the structure factor s(q) and computes the real space total correlation function h(r) """ # setup scales dr = np.pi / (k[0] * bins) radius = dr * np.arange(1, bins + 1, dtype=np.float) # Rearrange to find total correlation function from structure factor H_k = (S_k - 1.) / rho # # Transform back to real space iFT = idst(H_k * k[:bins], type=1, axis=axis) normalisation = bins * k[0] / (4 * np.pi**2 * radius) / (bins + 1) h_r = normalisation * iFT return h_r, radius
870e535ee3cdec3b138da1c205b000292eaee8ba
3,656,255
def scale17(data, factor): """Solution to exercise C-1.17. Had we implemented the scale function (page 25) as follows, does it work properly? def scale(data, factor): for val in data: val *= factor Explain why or why not. -------------------------------------------------------------------------- Solution: -------------------------------------------------------------------------- No, it doesn't work. Per the text, page 21: "It is worth noting that val is treated as a standard identifier. If the element of the original data happens to be mutable, the val identifier can be used to invoke its methods. But a reassignment of identifier val to a new value has no affect on the original data, nor on the next iteration of the loop." The code above fails because it tries to assign a new value to the "val" identifier. This merely breaks the alias without changing the list. """ for val in data: val *= factor return data
84ac4012e0c839b78cb8617b6b9b7c2e8c54caa2
3,656,256
import sqlite3 def initialize_database() -> sqlite3.Connection: """Create a sqlite3 database stored in memory with two tables to hold users, records and history. Returns the connection to the created database.""" with sqlite3.connect("bank_buds.db") as conn: conn.execute("""CREATE TABLE IF NOT EXISTS user( customer_id TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, userName TEXT NOT NULL, userPass TEXT NOT NULL, balance INTEGER NOT NULL)""") conn.execute("""CREATE TABLE IF NOT EXISTS user_record( rec_id TEXT REFERENCES user NOT NULL, wins INTEGER NOT NULL, losses INTEGER NOT NULL)""") conn.execute("""CREATE TABLE IF NOT EXISTS challenge_history( challenge_id INTEGER NOT NULL, challenge_starter TEXT REFERENCES user NOT NULL, challenge_opponent TEXT REFERENCES user NOT NULL, challenge_winner TEXT REFERENCES user NOT NULL, challenge_loser TEXT REFERENCES user NOT NULL, is_active INTEGER NOT NULL, goal INTEGER NOT NULL)""") return conn
c3e32534de39a53686672c5c537a2c277fa2d06d
3,656,257
def stateless_multinomial(logits, num_samples, seed, output_dtype=dtypes.int64, name=None): """Draws deterministic pseudorandom samples from a multinomial distribution. This is a stateless version of `tf.random.categorical`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.stateless_categorical( tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17]) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] integer Tensor of seeds to the random number generator. output_dtype: integer type to use for the output. Defaults to int64. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`. """ with ops.name_scope(name, "stateless_multinomial", [logits, seed]): return stateless_multinomial_categorical_impl(logits, num_samples, output_dtype, seed)
da750b8a33348b4f6ff0b47897b4421a8099f12e
3,656,258
def calc_kss(tag,vj): """ calculate Kolmogorov-Smirnov statistics as in CMap; Lamb J, Science, 2006 Parameters ---------- tag: tuple tuple of up-/down-gene lists; (up,down) sorted with the values in the descending order vj: dict dictionary corresponding to V(j) in CMap; Lamb J, Science, 2006 key, gene; val, rank """ a_up,b_up = _ab(tag[0],vj) a_dn,b_dn = _ab(tag[1],vj) if a_up > b_up: ks_up = a_up else: ks_up = -1*b_up if a_dn > b_dn: ks_dn = a_dn else: ks_dn = -1*b_dn if ks_up*ks_dn > 0: ks = 0 else: ks = ks_up - ks_dn n = len(vj) tu = len(tag[0]) td = len(tag[1]) kssmax = _kss_max(n,tu,td) return ks/kssmax
8dbb6233fb82a65a3ffad347f8444d3c16f8f4a9
3,656,259
def encode(elem): """This is the general function to call when you wish to encode an element and all its children and sub-children. Encode in this context means to convert from pymm elements to xml.etree.ElementTree elements. Typically this is called by pymm.write() """ converter = ConversionHandler() return converter.convert_element_hierarchy(elem, 'encode')
13578267efb0a6e21b61d86a6c60f5ecd9235b05
3,656,260
def register_blueprints(app: "Flask") -> "Flask": """A function to register flask blueprint. To register blueprints add them like the example Example usage: from app.blueprints import blueprint app.register_blueprint(blueprint) Args: app (Flask): Flask Application instance Returns: Flask: Flask Application instance """ app.register_blueprint(api_v1_bp) return app
13564aa6f95d995362a56e9be02a51e50475e446
3,656,261
def build_history_class( cls: declarative.DeclarativeMeta, prop: T_PROPS, schema: str = None) -> nine.Type[TemporalProperty]: """build a sqlalchemy model for given prop""" class_name = "%s%s_%s" % (cls.__name__, 'History', prop.key) table = build_history_table(cls, prop, schema) base_classes = ( TemporalProperty, declarative.declarative_base(metadata=table.metadata), ) class_attrs = { '__table__': table, 'entity': orm.relationship( lambda: cls, backref=orm.backref('%s_history' % prop.key, lazy='dynamic'), ), } if isinstance(prop, orm.RelationshipProperty): class_attrs[prop.key] = orm.relationship(prop.argument, lazy='noload') model = type(class_name, base_classes, class_attrs) return model
696b379172c57145c215b64e3e3dc4648b42e535
3,656,262
def geo_distance(left, right): """ Compute distance between two geo spatial data Parameters ---------- left : geometry or geography right : geometry or geography Returns ------- distance : double scalar """ op = ops.GeoDistance(left, right) return op.to_expr()
8a7f1bc14eacf38cecda874d8b16d6c38d9d2049
3,656,263
def svn_dirent_local_style(*args): """svn_dirent_local_style(char dirent, apr_pool_t pool) -> char""" return _core.svn_dirent_local_style(*args)
afe170a321713c9d0f671303fa71d86bc93d8167
3,656,264
def make_generator_model(): """ The Generator The generator uses `tf.keras.layers.Conv2DTranspose` (upsampling) tf.keras.layers.to produce an image from a seed (random noise). Start with a `Dense` layer that takes this seed as input, then upsample several times until you reach the desired image size of 28x28x1. Notice the `tf.keras.layers.LeakyReLU` activation for each layer, except the output layer which uses tanh. :return: """ model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add(tf.keras.layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size model.add( tf.keras.layers.Conv2DTranspose( 128, (5, 5), strides=(1, 1), padding="same", use_bias=False ) ) assert model.output_shape == (None, 7, 7, 128) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add( tf.keras.layers.Conv2DTranspose( 64, (5, 5), strides=(2, 2), padding="same", use_bias=False ) ) assert model.output_shape == (None, 14, 14, 64) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add( tf.keras.layers.Conv2DTranspose( 1, (5, 5), strides=(2, 2), padding="same", use_bias=False, activation="tanh" ) ) assert model.output_shape == (None, 28, 28, 1) return model
e02fa5487805a85aaa5830ce90a6cc26cb2f27a4
3,656,265
def find_simple_cycles(dg): """ Find all simple cycles given a networkx graph. Args: dg (obj): a networkx directed graph Returns: simple_cycles (list of lists): a list of simple cycles ordered by number of segments. """ simple_cycles = [c for c in nx.simple_cycles(dg) if len(c) > 2] #simple_cycles.sort(key=lambda cycle: len(cycle), reverse=True) # sort by number of segments return simple_cycles
4ed18ec26df80631c415086b99e470567e2641ae
3,656,266
from typing import Optional def augment_edge(edge_index: np.ndarray, nodes: np.ndarray, edge_weight: np.ndarray = None, *, nbrs_to_link: Optional[np.ndarray] = None, common_nbrs: Optional[np.ndarray] = None, fill_weight: float = 1.0) -> tuple: """Augment a set of edges by connecting nodes to element in ``nbrs_to_link``. Parameters ---------- edge_index: shape [M, 2] or [2, M] edge indices of a Scipy sparse adjacency matrix. nodes: the nodes that will be linked to the graph. list or np.array: the nodes connected to `nbrs_to_link` int: new added nodes connected to ``nbrs_to_link``, node ids [num_nodes, ..., num_nodes+nodes-1]. edge_weight: shape [M,] edge weights of a Scipy sparse adjacency matrix. nbrs_to_link: a list of N elements, where N is the length of 'nodes'. the specified neighbor(s) for each added node. if `None`, it will be set to `[0, ..., N-1]`. common_nbrs: shape [None,]. specified common neighbors for each added node. fill_weight: edge weight for the augmented edges. NOTE: ----- Both ``nbrs_to_link`` and ``common_nbrs`` should NOT be specified together. See Also -------- graphgallery.functional.augment_adj """ if nbrs_to_link is not None and common_nbrs is not None: raise RuntimeError("Only one of them should be specified.") edge_index = asedge(edge_index, shape="col_wise") if edge_weight is None: edge_weight = np.ones(edge_index.shape[1], dtype=gg.floatx()) num_nodes = edge_index.max() + 1 if gg.is_intscalar(nodes): # int, add nodes to the graph nodes = np.arange(num_nodes, num_nodes + nodes, dtype=edge_index.dtype) else: # array-like, link nodes to the graph nodes = np.asarray(nodes, dtype=edge_index.dtype) if common_nbrs is None and nbrs_to_link is None: nbrs_to_link = np.arange(nodes.size, dtype=edge_index.dtype) if not nodes.size == len(nbrs_to_link): raise ValueError("The length of 'nbrs_to_link' should equal to 'nodes'.") if nbrs_to_link is not None: edges_to_link = np.hstack([np.vstack([np.tile(node, get_length(nbr)), nbr]) for node, nbr in zip(nodes, nbrs_to_link)]) else: num_repeat = len(common_nbrs) edges_to_link = np.hstack([np.vstack([np.tile(node, num_repeat), common_nbrs]) for node in nodes]) edges_to_link = np.hstack([edges_to_link, edges_to_link[[1, 0]]]) added_edge_weight = np.zeros(edges_to_link.shape[1], dtype=edge_weight.dtype) + fill_weight augmented_edge_index = np.hstack([edge_index, edges_to_link]) augmented_edge_weight = np.hstack([edge_weight, added_edge_weight]) return augmented_edge_index, augmented_edge_weight
9b128dfd4bcefa7912af857de6998183ef4da3c2
3,656,267
import sys def _get_str(j_data, key, default=None, range_val=None): """ Get data as str :param j_data: Result of loading JSON :param key: The value key to retrieve :param default: Default value if not set :param range_val: Range of values that can be set :return: """ value = j_data.get(key, default) if value is None: sys.stderr.write('"%s" is required\n' % key) return None if not isinstance(value, unicode): sys.stderr.write('"%s" choose from %s\n' % (key, range_val)) return None if value not in range_val: sys.stderr.write('"%s" choose from %s\n' % (key, range_val)) return None return value
bdde6d8c223d875c6189bad3e2a79aee297587f6
3,656,268
def status(proc): """Check for processes status""" if proc.is_alive==True: return 'alive' elif proc.is_alive==False: return 'dead' else: return proc.is_alive()
e257385f06979643e19fd9facc2118f4ae07c909
3,656,269
def is_plumed_file(filename): """ Check if given file is in PLUMED format. Parameters ---------- filename : string, optional PLUMED output file Returns ------- bool wheter is a plumed output file """ headers = pd.read_csv(filename, sep=" ", skipinitialspace=True, nrows=0) is_plumed = True if " ".join(headers.columns[:2]) == "#! FIELDS" else False return is_plumed
b6fca7c82efb2b07779406f06c15bf195bb4b5e9
3,656,270
def detect_llj_xarray(da, inverse=False): """ Identify local maxima in wind profiles. args: - da : xarray.DataArray with wind profile data - inverse : to flip the array if the data is stored upside down returns: : xarray.Dataset with vertical dimension removed containing: - falloff : 0 or largest difference between local max and subseq min - strength : 0 or wind speed at jet height - index : -1 or index along <axis> Note: vertical dimension should be labeled 'level' and axis=1 """ # Move <axis> to first dimension, to easily index and iterate over it. xv = np.rollaxis(da.values, 1) if inverse: xv = xv[::-1, ...] # Set initial arrays min_elem = xv[-1].copy() max_elem = np.zeros(min_elem.shape) max_diff = np.zeros(min_elem.shape) max_idx = np.ones(min_elem.shape, dtype=int) * (-1) # Start at end of array and search backwards for larger differences. for i, elem in reversed(list(enumerate(xv))): min_elem = np.minimum(elem, min_elem) new_max_identified = elem - min_elem > max_diff max_diff = np.where(new_max_identified, elem - min_elem, max_diff) max_elem = np.where(new_max_identified, elem, max_elem) max_idx = np.where(new_max_identified, i, max_idx) # Combine the results in a dataframe get_height = lambda i: np.where(i > 0, da.level.values[i], da.level.values[ -1]) dims = da.isel(level=0).drop('level').dims coords = da.isel(level=0).drop('level').coords lljs = xr.Dataset( { 'falloff': (dims, max_diff), 'strength': (dims, max_elem), 'level': (dims, get_height(max_idx)), }, coords=coords) print( 'Beware! Level is also filled if no jet is detected! ' 'Use ds.sel(level=lljs.level).where(lljs.falloff>0) to get rid of them' ) return lljs
3fbe444e5eed6ff1ec4f525145276e2bc974050c
3,656,271
def gen_blinds(depth, width, height, spacing, angle, curve, movedown): """Generate genblinds command for genBSDF.""" nslats = int(round(height / spacing, 0)) slat_cmd = "!genblinds blindmaterial blinds " slat_cmd += "{} {} {} {} {} {}".format( depth, width, height, nslats, angle, curve) slat_cmd += "| xform -rz -90 -rx -90 -t " slat_cmd += f"{-width/2} {-height/2} {-movedown}\n" return slat_cmd
2e8a2751f2bb2be0c2ffdff8218961b0b1c0191b
3,656,272
def dev_Sonic(Mach, gamma=defg._gamma): """computes the deviation angle for a downstream SONIC Mach number Args: Mach: param gamma: (Default value = defg._gamma) gamma: (Default value = defg._gamma) Returns: """ return deflection_Mach_sigma(Mach, sigma_Sonic(Mach, gamma=gamma), gamma=gamma)
a29f90ec1de25a3b86c2dcc1a1a6becedbfbf696
3,656,273
import json def query(request): """传入一个查询字符串,返回匹配到的文章id。 Args: request (GET): queryString:String 查询的字符串 categories:String/Int 文章所属的领域,多个领域使用逗号分隔,例如"math.CO,quant-ph" timeStart:String yyyy-mm 最早发表日期(含),both included timeEnd: String yyyy-mm 最晚发表日期(含),both included offset: int 起始位置(例如,offset=100,默认一页显示20条,那么返回搜索结果的第100-119项,方便前端分页。) Returns: json 一个排序好的list,按相关性从高到低,最多count项。 一个int,表示一共多少个结果。 例: {[(arxiv_id, title, abstract, authors, update_date)*20],50} 表示一共有50个搜索结果,本次查询返回的20个结果是上面显示的20个 """ ret_list = [] ret_dict = {'ret_list': ret_list, 'num': 0} # 解析request信息 query_string_raw = request.GET.get("queryString") categories_raw = request.GET.get("categories") time_start_raw = request.GET.get("timeStart") time_end_raw = request.GET.get("timeEnd") offset = int(request.GET.get("offset")) # 时间提取 time_start_year = time_start_raw[:4] time_start_month = time_start_raw[-2:] time_end_year = time_end_raw[:4] time_end_month = time_end_raw[-2:] # category info extraction categories = categories_raw.split(',') # preprocess and stemming query_string_list_1 = [stem(query) for query in preprocess(query_string_raw)] query_string_list_1.sort() query_string_list = tuple(query_string_list_1) # return arxiv_ids by search words arxiv_ids, wc = search_by_words(word_list=query_string_list) # return arxiv_docs by arxiv_ids arxiv_docs = get_arxiv_document_by_ids(arxiv_ids) # 条件筛选 for doc in arxiv_docs: flag = True # 使用文章类别筛选 if judge_category(categories, doc.categories): flag = flag and True else: flag = False # 使用发表年、月筛选 # TODO:如果doc的update_date为空怎么办 doc_year = doc.update_date.split('-')[0] doc_month = doc.update_date.split('-')[1] if (time_start_year == doc_year) and (time_start_month <= doc_month): flag = flag and True elif (time_end_year == doc_year) and (doc_month <= time_end_month): flag = flag and True elif time_start_year <= doc_year <= time_end_year: flag = flag and True else: flag = False if flag: ret_list.append((doc.arxiv_id, doc.title, doc.abstract, doc.authors, doc.update_date)) ret_dict['num'] = len(ret_list) ret_dict['total'] = wc # 边界条件 if len(ret_list) <= offset: ret_dict['ret_list'] = ret_list[:] elif offset < len(ret_list) <= (offset + 20): ret_dict['ret_list'] = ret_list[offset:] else: ret_dict['ret_list'] = ret_list[offset:offset + 20] return HttpResponse(json.dumps(ret_dict))
33de1daca6d0edb197d2c6caeb69b6596915ea2a
3,656,274
def get_prev_and_next_lexemes(request, current_lexeme): """Get the previous and next lexeme from the same language, ordered by meaning and then alphabetically by form""" lexemes = list(Lexeme.objects.filter( language=current_lexeme.language).order_by( "meaning", "phon_form", "romanised", "id")) ids = [l.id for l in lexemes] try: current_idx = ids.index(current_lexeme.id) except ValueError: current_idx = 0 prev_lexeme = lexemes[current_idx - 1] try: next_lexeme = lexemes[current_idx + 1] except IndexError: next_lexeme = lexemes[0] return (prev_lexeme, next_lexeme)
ca33f582049d055d35196595fd0c23a06fb0d791
3,656,275
def _sanitize_and_check(indexes): """ Verify the type of indexes and convert lists to Index. Cases: - [list, list, ...]: Return ([list, list, ...], 'list') - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...]) Lists are sorted and converted to Index. - [Index, Index, ...]: Return ([Index, Index, ...], TYPE) TYPE = 'special' if at least one special type, 'array' otherwise. Parameters ---------- indexes : list of Index or list objects Returns ------- sanitized_indexes : list of Index or list objects type : {'list', 'array', 'special'} """ kinds = list({type(index) for index in indexes}) if list in kinds: if len(kinds) > 1: indexes = [ Index(com.try_sort(x)) if not isinstance(x, Index) else x for x in indexes ] kinds.remove(list) else: return indexes, "list" if len(kinds) > 1 or Index not in kinds: return indexes, "special" else: return indexes, "array"
1c158934d49270fb17d99477082c49b7839c1fbb
3,656,276
def get_tetranuc_freqs(given_seq): """ Returns dictionary mapping each of the 4^4 = 256 possible tetranucleotides to its observed frequency in the given sequence. Args: given_seq: Returns: """ return {tetranuc : get_observed_oligonuc_freq(given_seq, tetranuc) for tetranuc in TETRANUCLEOTIDES}
b2279248961b747526bedb14a7fcddf7015fde45
3,656,277
import math def calculate3_onemetric(pred_ccm, pred_ad, truth_ccm, truth_ad, rnd=0.01, method="orig_nc", verbose=False, full_matrix=True, in_mat=2): """Calculate the score for subchallenge 3 using the given metric :param pred_ccm: predicted co-clustering matrix :param pred_ad: predicted ancestor-descendant matrix :param truth_ccm: true co-clustering matrix :param truth_ad: trus ancestor-descendant matrix :param method: method to use when evaluating the submission :param verbose: boolean for whether to display information about the score calculations :param full_matrix: boolean for whether to use the full CCM/AD matrix when calculating the score :param in_mat: number representing which matrices to use in calculating the SC3 scoring metric Options: 1 - use all input matrics i.e. CCM, ADM, ADM^T and CM 2 - use all except co-clustering matrix (CCM) 3 - use all except ancestor descendant matrix (ADM) 4 - use all except ADM^T 5 - use all except cousin matrix (CM) :return: score for the given submission to subchallenge 3 using the given metric """ # Get the cousin matrices truth_cous = 1 - truth_ccm - truth_ad - truth_ad.T pred_cous = 1 - pred_ccm - pred_ad - pred_ad.T if verbose: if(np.amax(truth_cous) > 1 or np.amin(truth_cous) < 0): Warning("Cousin Truth is wrong. Maximum matrix entry is greater than 1 or minimum matrix entry is less than 0") if(np.amax(pred_cous) > 1 or np.amin(pred_cous) < 0): Warning("Cousin Predicted is wrong. Maximum matrix entry is greater than 1 or minimum matrix entry is less than 0") # Calculate the metric measure for each specified matrix func = method_funcs[method] results = [] ccm_res, ad_res, ad_res_t, cous_res = [float('nan')] * 4 if method in ("pseudoV", "simpleKL", "sym_pseudoV"): if in_mat != 2: ccm_res = func(pred_ccm, truth_ccm, rnd, full_matrix=full_matrix) results.append(ccm_res) if in_mat != 3: ad_res = func(pred_ad, truth_ad, rnd, full_matrix=full_matrix) results.append(ad_res) if in_mat != 4: ad_res_t = func(np.transpose(pred_ad), np.transpose(truth_ad), rnd, full_matrix=full_matrix) results.append(ad_res_t) if in_mat != 5: cous_res = func(pred_cous, truth_cous, rnd, full_matrix=full_matrix) results.append(cous_res) else: if in_mat != 2: ccm_res = func(pred_ccm, truth_ccm, full_matrix=full_matrix) results.append(ccm_res) if in_mat != 3: ad_res = func(pred_ad, truth_ad, full_matrix=full_matrix) results.append(ad_res) if in_mat != 4 or method in ('mcc', 'pearson', 'spearman'): ad_res_t = func(np.transpose(pred_ad), np.transpose(truth_ad), full_matrix=full_matrix) results.append(ad_res_t) if in_mat != 5: cous_res = func(pred_cous, truth_cous, full_matrix=full_matrix) results.append(cous_res) res = 0 n = 0 for r in results: # TODO: fix the NA's if not math.isnan(r): n += 1 res += r if n > 0: res = res / float(n) if verbose: print("%s for Matrices\nCC: %s, AD: %s, AD Transpose: %s, Cousin: %s\nResult: %s" % (method, str(ccm_res), str(ad_res),str(ad_res_t),str(cous_res), str(res))) return res
6655447568d9a7c6a94790c8d52a159874b34b65
3,656,278
from typing import Optional def _prv_keyinfo_from_wif( wif: String, network: Optional[str] = None, compressed: Optional[bool] = None ) -> PrvkeyInfo: """Return private key tuple(int, compressed, network) from a WIF. WIF is always compressed and includes network information: here the 'network, compressed' input parameters are passed only to allow consistency checks. """ if isinstance(wif, str): wif = wif.strip() payload = b58decode(wif) net = network_from_key_value("wif", payload[:1]) if net is None: raise BTClibValueError(f"invalid wif prefix: {payload[:1]!r}") if network is not None and net != network: raise BTClibValueError(f"not a {network} wif: {wif!r}") ec = NETWORKS[net].curve if len(payload) == ec.n_size + 2: # compressed WIF compr = True if payload[-1] != 0x01: # must have a trailing 0x01 raise BTClibValueError("not a compressed WIF: missing trailing 0x01") prv_key = payload[1:-1] elif len(payload) == ec.n_size + 1: # uncompressed WIF compr = False prv_key = payload[1:] else: raise BTClibValueError(f"wrong WIF size: {len(payload)}") if compressed is not None and compr != compressed: raise BTClibValueError("compression requirement mismatch") q = int.from_bytes(prv_key, byteorder="big") if not 0 < q < ec.n: raise BTClibValueError(f"private key {hex(q)} not in [1, n-1]") return q, net, compr
d9eef56ea212fafcd7aa5af718aa0b1280e9555d
3,656,279
def build_cmake_defines(args, dirs, env_vars, stage): """ Generate cmake defines :param args: The args variable generated by parse_parameters :param dirs: An instance of the Directories class with the paths to use :param env_vars: An instance of the EnvVars class with the compilers/linker to use :param stage: What stage we are at :return: A set of defines """ defines = {} # Get slim defines if we are not building a full toolchain if not args.full_toolchain: defines.update(slim_cmake_defines()) # Add compiler/linker defines, which change based on stage defines.update(cc_ld_cmake_defines(dirs, env_vars, stage)) # Add distribution specific defines defines.update(distro_cmake_defines()) # Add project and target defines, which change based on stage defines.update(project_cmake_defines(args, stage)) defines.update(target_cmake_defines(args, stage)) # Add other stage specific defines defines.update(stage_specific_cmake_defines(args, dirs, stage)) # Add the vendor string if necessary if args.clang_vendor: defines['CLANG_VENDOR'] = args.clang_vendor # Removes system dependency on terminfo to keep the dynamic library dependencies slim defines['LLVM_ENABLE_TERMINFO'] = 'OFF' return defines
227fb680e42786356adbace344cea98433a29aab
3,656,280
def server() -> None: """Старт сервера""" class PredictionServicer(predictions_pb2_grpc.PredictionServicer): def PredictIris(self, request, context): response = predictions_pb2.PredictResponse() response.iris_type = predictions.predict_iris(request.sepal_length, request.sepal_width, request.petal_length, request.petal_width) return response server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) predictions_pb2_grpc.add_PredictionServicer_to_server(PredictionServicer(), server) print('Стартовал сервер. Порт 50051.') server.add_insecure_port('[::]:50051') # CTRL+C server.start() server.wait_for_termination()
eaa71a36763ffee0d6b201e0900b4f1fcf397fe9
3,656,281
def wasLastResponseHTTPError(): """ Returns True if the last web request resulted in an erroneous HTTP code (like 500) """ threadData = getCurrentThreadData() return threadData.lastHTTPError and threadData.lastHTTPError[0] == threadData.lastRequestUID
cbe2a21752387cfb5b0cba41ecc3bdbacbcdcbb3
3,656,282
import select async def update_rates( user_id: str = None, client_id: str = None, new_amount: str = None, session: Session = Depends(get_session), ): """Update a rate.""" statement = ( select(Rate) .where(Rate.user_id == user_id) .where(Rate.client_id == client_id) .where(Rate.is_active == True) ) rate_to_update = session.exec(statement).one() rate_to_update.amount = new_amount session.add(rate_to_update) session.commit() session.refresh(rate_to_update) return True
c5ef142dda27f27217d71ed811ce8b6f049a0d98
3,656,283
import sys def setup_page(choice, pagepanel, frame): """ Creates a :class:`Page` inside a :class:`Notebook`. :Args: - choice (tuple) A tuple of (name, module path, module alias) - pagepanel """ if isinstance(choice.module, str): try: __import__(choice.module) except ImportError as e: wx.MessageBox('%s' % e, 'Info', wx.OK | wx.ICON_ERROR) return False else: class_aliases, class_obj = _get_classes(sys.modules[choice.module], class_order=choice.order) else: class_aliases, class_obj = _get_classes(choice.module, class_order=choice.order) nb = wx.Notebook(pagepanel) for class_alias, class_obj in class_aliases: nb.AddPage(Page(nb, class_obj, choice.alias, class_alias, frame, pagepanel), class_alias) panelsizer = wx.BoxSizer() panelsizer.Add(nb, 1, wx.EXPAND|wx.ALL) pagepanel.SetSizer(panelsizer) pagepanel.Layout() pagepanel.Fit() return True
6923a44f0a82543ba8ba3be2a4381c7fa56d05ed
3,656,284
def taillight_detect(image): """ Takes in a road image, re-sizes for the model, predicts the lane to be drawn from the model in G color, recreates an RGB image of a lane and merges with the original road image. """ model = load_model('full_CNN_model.h5') #image1=image #image1=np.array(image1) #objects=np.squeeze(image,2) #rows,cols=objects.shape rows, cols,_ = image.shape #cols, rows = image.size #cols=160 #rows=80 # Get image ready for feeding into model small_img = cv2.resize(image, (160, 80)) #img_y_cr_cb = cv2.cvtColor(small_img, cv2.COLOR_BGR2YCrCb) #y, cr, cb = cv2.split(img_y_cr_cb) # Applying equalize Hist operation on Y channel. #y_eq = cv2.equalizeHist(y) #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #y_eq = clahe.apply(y) #img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb)) #small_img = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR) #small_img = imresize(image, (80, 160, 3)) small_img = np.array(small_img) small_img = small_img[None,:,:,:] # Make prediction with neural network (un-normalize value by multiplying by 255) prediction = model.predict(small_img)[0] * 255 #new_image = imresize(prediction, (rows, cols, 3)) mask = cv2.resize(prediction, (cols, rows)) img_y_cr_cb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb) y, cr, cb = cv2.split(img_y_cr_cb) # Applying equalize Hist operation on Y channel. #y_eq = cv2.equalizeHist(y) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) y_eq = clahe.apply(y) img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb)) image_he = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR) gray = cv2.cvtColor(image_he, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) auto = auto_canny(blurred) for i in range(rows): for j in range(cols): if auto[i,j] >0 and mask [i,j]>100: auto[i,j]=255 else: auto[i,j]=0 cv2.imshow('histogram equalisation', auto) cv2.waitKey(0) #h, w = edges.shape[:2] filled_from_bottom = np.zeros((rows, cols)) for col in range(cols): for row in reversed(range(rows)): if auto[row][col] < 255: filled_from_bottom[row][col] = 255 else: break filled_from_top = np.zeros((rows, cols)) for col in range(cols): for row in range(rows): if auto[row][col] < 255: filled_from_top[row][col] = 255 else: break filled_from_left = np.zeros((rows, cols)) for row in range(rows): for col in range(cols): if auto[row][col] < 255: filled_from_left[row][col] = 255 else: break filled_from_right = np.zeros((rows, cols)) for row in range(rows): for col in reversed(range(cols)): if auto[row][col] < 255: filled_from_right[row][col] = 255 else: break for i in range(rows): for j in range(cols): if filled_from_bottom[i,j] ==0 and filled_from_top[i,j]==0 and filled_from_right[i,j] ==0 and filled_from_left[i,j]==0: auto[i,j]=255 else: auto[i,j]=0 kernel = np.ones((5,5),np.uint8) opening = cv2.morphologyEx(auto, cv2.MORPH_OPEN, kernel) closing = cv2.morphologyEx(auto, cv2.MORPH_CLOSE, kernel) mask = np.expand_dims(mask, 2) mask = np.repeat(mask, 3, axis=2) # give the mask the same shape as your image colors = {"red": [0.0,1.0,1.0], "blue": [0.,0.,0.1]} # a dictionary for your colors, experiment with the values colored_mask = np.multiply(mask, colors["red"]) # broadcast multiplication (thanks to the multiplication by 0, you'll end up with values different from 0 only on the relevant channels and the right regions) image = image+colored_mask # element-wise sum (sinc img and mask have the same shape) #return image.astype(float) / 255 #return new_image return auto
ee8849b59e94f8c395211af3537310ad7d2d8999
3,656,285
def generate_random_number(rng, length): """Return random number with predefined length.""" return crypto.random_generate(rng, length)
2f3f5f290948c3eb063b46353a01a5edc17599e4
3,656,286
import ftplib import tarfile def update_old_names(): """Fetches the list of old tz names and returns a mapping""" url = urlparse(ZONEINFO_URL) log.info('Connecting to %s' % url.netloc) ftp = ftplib.FTP(url.netloc) ftp.login() gzfile = BytesIO() log.info('Fetching zoneinfo database') ftp.retrbinary('RETR ' + url.path, gzfile.write) gzfile.seek(0) log.info('Extracting backwards data') archive = tarfile.open(mode="r:gz", fileobj=gzfile) backward = {} for line in archive.extractfile('backward').readlines(): if line[0] == '#': continue if len(line.strip()) == 0: continue parts = line.split() if parts[0] != b'Link': continue backward[parts[2].decode('ascii')] = parts[1].decode('ascii') return backward
a10f5985ea6fe6709816e757ee764138735eb077
3,656,287
from typing import Optional def get_namespace(location: Optional[str] = None, namespace_id: Optional[str] = None, project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult: """ Gets a namespace. """ __args__ = dict() __args__['location'] = location __args__['namespaceId'] = namespace_id __args__['project'] = project if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:servicedirectory/v1:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value return AwaitableGetNamespaceResult( labels=__ret__.labels, name=__ret__.name)
70f59b6eb48e4952d19c5b96b9579f13c0e569fd
3,656,288
def build_headers(access_token, client_id): """ :param access_token: Access token granted when the user links their account :param client_id: This is the api key for your own app :return: Dict of headers """ return {'Content-Type': 'application/json', 'Authorization': f'Bearer {access_token}', 'trakt-api-version': '2', 'trakt-api-key': client_id}
5cd8ae3e06f67b7a4fdb1644ae82c62cb54479cb
3,656,289
import argparse def get_args(): """Get command-line arguments""" parser = argparse.ArgumentParser( description='sum numbers', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Positional arg parser.add_argument('int', metavar='INT', type=int, nargs='+', help='Numbers to add') return parser.parse_args()
09a3cec8ac25861c1e04164082a77c2a77c1c703
3,656,290
def odd_subgraph_centrality(i, lam, u): """ Calculates the number of odd length closed walks that a node participates in :cite:`estrada2005spectral`. Used in the calculation of spectral scaling and generalized robustness index. :param i: node index :param lam: largest eigenvalue :param u: largest eigenvector :return: a float """ sc = 0 for j in range(len(lam)): sc += np.power(u[i, j], 2) * np.sinh(lam[j]) return sc
eeb141ac56d9b70294bbf62a24739c73f3e4755e
3,656,291
def PolyAreasToModel(polyareas, bevel_amount, bevel_pitch, quadrangulate): """Convert a PolyAreas into a Model object. Assumes polyareas are in xy plane. Args: polyareas: geom.PolyAreas bevel_amount: float - if > 0, amount of bevel bevel_pitch: float - if > 0, angle in radians of bevel quadrangulate: bool - should n-gons be quadrangulated? Returns: geom.Model """ m = geom.Model() if not polyareas: return m polyareas.points.AddZCoord(0.0) m.points = polyareas.points for pa in polyareas.polyareas: PolyAreaToModel(m, pa, bevel_amount, bevel_pitch, quadrangulate) return m
c2243bca3d3cfa1168bde94dfe078d6cf3e86ad4
3,656,292
def preprocessing(train_data, test_data): """ * The method at first eliminates constant features from both train and test data. * Then, it splits training data into features and labels. * Finally, the method performs pca on training and testing data sets to reduce the dimension and overcome curse of dimensionality problem. Parameters ---------- train_data: training data set in data frame format test_data: testing data set in data frame format """ # constant feature elimination train_data = train_data.drop(['X3', 'X31', 'X32', 'X127', 'X128', 'X590'], axis=1) train_data = np.asarray(train_data) test_data = test_data.drop(['X3', 'X31', 'X32', 'X127', 'X128', 'X590'], axis=1) test_data = np.asarray(test_data) # training data is split into features and labels train_x = train_data[:, :train_data.shape[1] - 1] train_y = train_data[:, train_data.shape[1] - 1] train_y.shape = (np.size(train_y), 1) # principal component analysis pca = PCA(n_components=60) train_x_pca = pca.fit_transform(train_x) test_pca = pca.transform(test_data) return train_x_pca, train_y, test_pca
9f6c01d64d393c9c9fe51925f11842b63098471f
3,656,293
def generate_videos_from_events(response, video_model): """Creates the video containers/representations for this given response. We should only really invoke this as part of a migration as of right now (2/8/2019), but it's quite possible we'll have the need for dynamic upsertion later. """ seen_ids = set() video_objects = [] Video = video_model # Using a constructive approach here, but with an ancillary seen_ids list b/c Django models without # primary keys are unhashable for some dumb reason (even though they have unique fields...) for frame_id, event_data in response.exp_data.items(): if event_data.get("videoList", None) and event_data.get("videoId", None): # We've officially captured video here! events = event_data.get("eventTimings", []) for event in events: video_id = event["videoId"] pipe_name = event["pipeId"] # what we call "ID" they call "name" stream_time = event["streamTime"] if ( video_id not in seen_ids and pipe_name and stream_time and stream_time > 0 ): # Try looking for the regular ID first. file_obj = S3_RESOURCE.Object( settings.BUCKET_NAME, f"{video_id}.mp4" ) try: s3_response = file_obj.get() except ClientError: try: # If that doesn't work, use the pipe name. file_obj = S3_RESOURCE.Object( settings.BUCKET_NAME, f"{pipe_name}.mp4" ) s3_response = file_obj.get() except ClientError: logger.warning( f"could not find {video_id} or {pipe_name} in S3!" ) continue # Read first 32 bytes from streaming body (file header) to get actual filetype. streaming_body = s3_response["Body"] file_header_buffer: bytes = streaming_body.read(32) file_info = fleep.get(file_header_buffer) streaming_body.close() video_objects.append( Video( pipe_name=pipe_name, created_at=date_parser.parse(event["timestamp"]), date_modified=s3_response["LastModified"], # Can't get the *actual* pipe id property, it's in the webhook payload... frame_id=frame_id, full_name=f"{video_id}.{file_info.extension[0]}", study=response.study, response=response, is_consent_footage=frame_id in VALID_CONSENT_FRAMES, ) ) seen_ids.add(video_id) return Video.objects.bulk_create(video_objects)
f5669fbc6466bf3cf1671d04a48bad4c5975f216
3,656,294
def datetime_at_midnight(dt: DateTime, tz: TimeZone) -> DateTime: """ Returns a DateTime for the requested DateTime at midnight in the specified time zone. Args: dt (DateTime): the DateTime for which the new value at midnight should be calculated tz (TimeZone): the TimeZone to use when interpreting the DateTime Returns: DateTime Raises: DHError """ try: return _JDateTimeUtils.dateAtMidnight(dt, tz.value) except Exception as e: raise DHError(e) from e
141988c9943911d165f5f3f8ade5536ae65881f2
3,656,295
import os def count_dcm(logger, top): """ This function recursively walks through a given directory (`top`) using depth-first search (bottom up) and counts the number of .dcm files present. Parameters ---------- path : {str} The directory to count. Returns ------- count : {int} The number of .dcm files in `path`. """ try: count = 0 # Count number of .dcm files in ../data/Mass/Test. for _, _, files in os.walk(top): for f in files: if f.endswith(".dcm"): count += 1 except Exception as e: # logger.error(f'Unable to count_dcm!\n{e}') print((f"Unable to count_dcm!\n{e}")) return count
5da9be183c7a00b3ef3fbc1f7e79d7df0f5502b8
3,656,296
def convert2sametype(dict_, formula): """Utility function for internal use. Convert string/dict/DataFrame to dict Parameters ---------- dict_ : dict formula : string/dict/DataFrame Returns ------- type(formula) """ return convert2type(dict_, type(formula))
d7393668e5bd22e8482bf4b99c6a789d322b80fb
3,656,297
from typing import List import gzip def from_sdf(sdf_content: str = None, file_path: str = None, ignore_hydrogens = False) -> List[Graph]: """ parse graph from_sdf Read chemical files and parses them into instances of `Graph`. As this function is not meant to be called in a loop, inner functions only relative to chemical files parsing are declared. Type Aliases : Atom = str Bond = List[str] """ if file_path : if (file_path.endswith('.gz')): fp = gzip.open(file_path, 'rt', encoding='utf-8') sdf_content = fp.read() else : with open(file_path, 'r') as content_file: sdf_content = content_file.read() return [ Mol_to_Graph(mol[0], mol[1]) for mol in [ parse_Mol(mol_file, ignore_hydrogens) for mol_file in [ part[0] for part in [ compound.split('M END') for compound in sdf_content.split("$$$$") if (compound.strip(' \t\n\r') != '') ] if is_Mol(part) ] ] ]
5676b98a699cfed00767f4d51dec27a7dc1a94ad
3,656,298
from typing import Callable def dispatcher_connect( opp: OpenPeerPower, signal: str, target: Callable[..., None] ) -> Callable[[], None]: """Connect a callable function to a signal.""" async_unsub = run_callback_threadsafe( opp.loop, async_dispatcher_connect, opp, signal, target ).result() def remove_dispatcher() -> None: """Remove signal listener.""" run_callback_threadsafe(opp.loop, async_unsub).result() return remove_dispatcher
3dca8d6cf1f581a409c2b64e6c9a88e543fe0615
3,656,299