content
stringlengths
22
815k
id
int64
0
4.91M
def identityMatrix(nrow, ncol): """ Create a identity matrix of the given dimensions Works for square Matrices Retuns a Matrix Object """ if nrow == ncol: t = [] for i in range(nrow): t.append([]) for j in range(ncol): if i == j: t[i].append(1) else: t[i].append(0) s = Matrix(nrow=nrow, ncol=ncol, data=t) s.matrix.symmetry=True s.matrix.trace=nrow s.matrix.invertibility=True setattr(s.matrix,"identityMatrix",True) return s else: raise incompaitableTypeException
5,353,400
def extract_packages(matched, package_source): """ Extract packages installed in the "Successfully installed" line e.g. Successfully installed Abjad Jinja2-2.10 MarkupSafe-1.0 PyPDF2-1.26.0 Pygments-2.2.0 alabaster-0.7.10 \ babel-2.5.1 bleach-2.1.2 decorator-4.1.2 docutils-0.14 entrypoints-0.2.3 html5lib-1.0.1 imagesize-0.7.1 \ ipykernel-4.7.0 ipython-6.2.1 ipython-genutils-0.2.0 ipywidgets-7.1.0 jedi-0.11.1 jsonschema-2.6.0 \ jupyter-1.0.0 jupyter-client-5.2.1 jupyter-console-5.2.0 jupyter-core-4.4.0 mistune-0.8.3 nbconvert-5.3.1 \ nbformat-4.4.0 notebook-5.2.2 pandocfilters-1.4.2 parso-0.1.1 pexpect-4.3.1 pickleshare-0.7.4 \ prompt-toolkit-1.0.15 .... """ result = [] package_list = matched.groups()[0].split(' ') for package in package_list: package, version = split_package_and_version(package) if not version or not package: continue else: source = package_source.get(package) if source is None: continue # The following line is recommended when developing # assert source == PACKAGE_SOURCE_INDEX result.append('{}=={}'.format(package, version)) return result
5,353,401
def nested_set(d: JSONDict, ks: Tuple[Any, ...], v: Any) -> None: """Set value in nested dictionary. Parameters ---------- d : JSONDict ks : Tuple[str] v : Any Notes ----- Adapted from: https://stackoverflow.com/a/13688108/2528668 """ for k in ks[:-1]: d = d.setdefault(k, {}) d[ks[-1]] = v
5,353,402
def chunks(l, n): """ Yield successive n-sized chunks from l. :param l: list to split :param n: number of elements wanted in each list split """ n = 1 if n <= 0 else n for i in range(0, len(l), n): yield l[i:i+n]
5,353,403
def launch_job(cfg, init_method, func, daemon=False): """ Run 'func' on one or more GPUs, specified in cfg Args: cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py init_method (str): initialization method to launch the job with multiple devices. func (function): job to run on GPU(s) daemon (bool): The spawned processes’ daemon flag. If set to True, daemonic processes will be created """ if cfg.NUM_GPUS > 1: torch.multiprocessing.spawn( run_job, nprocs=cfg.NUM_GPUS, args=( cfg.NUM_GPUS, func, init_method, cfg.SHARD_ID, cfg.NUM_SHARDS, cfg.DIST_BACKEND, cfg, ), daemon=daemon, ) else: func(cfg=cfg)
5,353,404
def _handle_get_static(handler, path_match, data): """ Returns a static file for the frontend. """ req_file = util.sanitize_path(path_match.group('file')) # Strip md5 hash out fingerprinted = _FINGERPRINT.match(req_file) if fingerprinted: req_file = "{}.{}".format(*fingerprinted.groups()) path = os.path.join(os.path.dirname(__file__), 'www_static', req_file) handler.write_file(path)
5,353,405
def create_relationship( relationship_type: str, created_by: Identity, source: _DomainObject, target: _DomainObject, confidence: int, object_markings: List[MarkingDefinition], start_time: Optional[datetime] = None, stop_time: Optional[datetime] = None, ) -> Relationship: """Create a relationship.""" return Relationship( created_by_ref=created_by, relationship_type=relationship_type, source_ref=source, target_ref=target, start_time=start_time, stop_time=stop_time, confidence=confidence, object_marking_refs=object_markings, allow_custom=True, )
5,353,406
def _find_partition(G, starting_cell): """ Find a partition of the vertices of G into cells of complete graphs Parameters ---------- G : NetworkX Graph starting_cell : tuple of vertices in G which form a cell Returns ------- List of tuples of vertices of G Raises ------ NetworkXError If a cell is not a complete subgraph then G is not a line graph """ G_partition = G.copy() P = [starting_cell] # partition set G_partition.remove_edges_from(list(combinations(starting_cell, 2))) # keep list of partitioned nodes which might have an edge in G_partition partitioned_vertices = list(starting_cell) while G_partition.number_of_edges() > 0: # there are still edges left and so more cells to be made u = partitioned_vertices[-1] deg_u = len(G_partition[u]) if deg_u == 0: # if u has no edges left in G_partition then we have found # all of its cells so we do not need to keep looking partitioned_vertices.pop() else: # if u still has edges then we need to find its other cell # this other cell must be a complete subgraph or else G is # not a line graph new_cell = [u] + list(G_partition[u]) for u in new_cell: for v in new_cell: if (u != v) and (v not in G_partition[u]): msg = ( "G is not a line graph" "(partition cell not a complete subgraph)" ) raise nx.NetworkXError(msg) P.append(tuple(new_cell)) G_partition.remove_edges_from(list(combinations(new_cell, 2))) partitioned_vertices += new_cell return P
5,353,407
def projection_from_Rt(rmat, tvec): """ Compute the projection matrix from Rotation and translation. """ assert len(rmat.shape) >= 2 and rmat.shape[-2:] == (3, 3), rmat.shape assert len(tvec.shape) >= 2 and tvec.shape[-2:] == (3, 1), tvec.shape return torch.cat([rmat, tvec], dim=-1)
5,353,408
def verify_df(df, constraints_path, epsilon=None, type_checking=None, **kwargs): """ Verify that (i.e. check whether) the Pandas DataFrame provided satisfies the constraints in the JSON .tdda file provided. Mandatory Inputs: df A Pandas DataFrame, to be checked. constraints_path The path to a JSON .tdda file (possibly generated by the discover_constraints function, below) containing constraints to be checked. Optional Inputs: epsilon When checking minimum and maximum values for numeric fields, this provides a tolerance. The tolerance is a proportion of the constraint value by which the constraint can be exceeded without causing a constraint violation to be issued. With the default value of epsilon (EPSILON_DEFAULT = 0.01, i.e. 1%), values can be up to 1% larger than a max constraint without generating constraint failure, and minimum values can be up to 1% smaller that the minimum constraint value without generating a constraint failure. (These are modified, as appropraite, for negative values.) NOTE: A consequence of the fact that these are proportionate is that min/max values of zero do not have any tolerance, i.e. the wrong sign always generates a failure. type_checking: 'strict' or 'sloppy'. Because Pandas silently, routinely and automatically "promotes" integer and boolean columns to reals and objects respectively if they contain nulls, strict type checking can be problematical in Pandas. For this reason, type_checking defaults to 'sloppy', meaning that type changes that could plausibly be attriuted to Pandas type promotion will not generate constraint values. If this is set to strict, a Pandas "float" column c will only be allowed to satisfy a an "int" type constraint if c.dropnulls().astype(int) == c.dropnulls(). Similarly, Object fields will satisfy a 'bool' constraint only if c.dropnulls().astype(bool) == c.dropnulls(). report: 'all' or 'fields' This controls the behaviour of the __str__ method on the resulting PandasVerification object (but not its content). The default is 'all', which means that all fields are shown, together with the verification status of each constraint for that field. If report is set to 'fields', only fields for which at least one constraint failed are shown. NOTE: The method also accepts 'constraints', which will be used to indicate that only failing constraints for failing fields should be shown. This behaviour is not yet implented. Returns: PandasVerification object. This object has attributes: passed # Number of passing constriants failures # Number of failing constraints It also has a .to_frame() method for converting the results of the verification to a Pandas DataFrame, and a __str__ method to print both the detailed and summary results of the verification. Example usage (see tdda/constraints/examples/simple_verification.py for slightly fuller example). import pandas as pd from tdda.constraints.pdconstraints import verify_df df = pd.DataFrame({'a': [0, 1, 2, 10, pd.np.NaN], 'b': ['one', 'one', 'two', 'three', pd.np.NaN]}) v = verify_df(df, 'example_constraints.tdda') print('Passes:', v.passes) print('Failures: %d\n' % v.failures) print(str(v)) print(v.to_frame()) """ pdv = PandasConstraintVerifier(df, epsilon=epsilon, type_checking=type_checking) constraints = DatasetConstraints(loadpath=constraints_path) return verify(constraints, pdv.verifiers(), VerificationClass=PandasVerification, **kwargs)
5,353,409
def usage(): """print the usage of the script Args: NA Returns: None Raises: NA """ print ''' [Usage] acrnalyze.py [options] [value] ... [options] -h: print this message -i, --ifile=[string]: input file -o, --ofile=[string]: output file --vm_exit: to generate vm_exit report '''
5,353,410
def _getallstages_pm(pmstr): """pmstr: a pipelinemodel name in quote return a df: of all leaf stages of transformer. to print return in a cell , use print_return(df) """ pm=eval(pmstr) output=[] for i,s in enumerate(pm.stages): if str(type(s))=="<class 'pyspark.ml.pipeline.PipelineModel'>": pmstr2=f"{pmstr}.stages[{i}]" output.append(_getallstages_pm(pmstr2)) else: tn=re.sub(r"^.*\.(\w+)\b.*",r"\1",str(type(s))) pmstr2=f"{pmstr}.stages[{i}]" temp=pd.DataFrame([[pmstr2,tn,None,None,None]],columns=['stage','transformer_name','inputcol','outputcol','other_parameters']) if temp.transformer_name.iloc[0]=="SQLTransformer": st='"statement=\n'+re.sub('\t',' ',eval(pmstr2).getStatement())+'"' if len(st)>=32767: idx1=st.rfind('\n',0,10000) idx2=st.find('\n',len(st)-10000,len(st)) newst=st[:idx1]+"\n\n..........\n"+st[idx2:] st=newst.replace("statement=","TRUNCATED !!!\n\nstatement=") temp["other_parameters"]=st elif temp.transformer_name.iloc[0]=="CountVectorizerModel": temp["other_parameters"]="vocabulary="+str(eval(pmstr2).vocabulary) elif temp.transformer_name.iloc[0]=="RFormulaModel": temp["outputcol"]=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='featuresCol'] form="formular: "+[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='formula'][0] temp["other_parameters"]=f"number of inputCol in formula: {form.count('+')+1}" elif temp.transformer_name.iloc[0]=='LogisticRegressionModel': label=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='labelCol'][0] elasticNetParam=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='elasticNetParam'][0] regParam=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='regParam'][0] temp["other_parameters"]=f"labelCol : {label}, elasticNetParam : {elasticNetParam}, regParam : {regParam}" else: ip=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='inputCol'] if len(ip)>0: temp["inputcol"]=ip op=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='outputCol'] if len(op)>0: temp["outputcol"]=op output.append(temp) outputdf=pd.concat(output) outputdf=outputdf.reset_index(drop=True) return outputdf
5,353,411
def notify_completed_spec(spec_id): """ Spec processing has finished, now we need to record the result """ spec = Spec.objects.get(pk=spec_id) spec.finish() logger.info('✓ %s finished' % spec)
5,353,412
def merge_config(log_conf: LogConf, conf: Config) -> Config: """ Create combined config object from system wide logger setting and current logger config """ #pylint: disable=too-many-locals name = conf.name # take individual conf value, ignore common log_conf value filename = _ITEM_OR_DEFAULT(log_conf.filename, conf.filename) logger_level = _ITEM_OR_DEFAULT(log_conf.logger_level, conf.logger_level) log_fmt = _ITEM_OR_DEFAULT(log_conf.log_fmt, conf.log_fmt) log_datefmt = _ITEM_OR_DEFAULT(log_conf.log_datefmt, conf.log_datefmt) log_level = _ITEM_OR_DEFAULT(log_conf.log_level, conf.log_level) log_enabled = _ITEM_OR_DEFAULT(log_conf.log_enabled, conf.log_enabled) cout_fmt = _ITEM_OR_DEFAULT(log_conf.cout_fmt, conf.cout_fmt) cout_datefmt = _ITEM_OR_DEFAULT(log_conf.cout_datefmt, conf.cout_datefmt) cout_level = _ITEM_OR_DEFAULT(log_conf.cout_level, conf.cout_level) cout_enabled = _ITEM_OR_DEFAULT(log_conf.cout_enabled, conf.cout_enabled) propagate = _ITEM_OR_DEFAULT(log_conf.propagate, conf.propagate) log_dir = _ITEM_OR_DEFAULT(log_conf.log_dir, conf.log_dir) sub_dir = _ITEM_OR_DEFAULT(log_conf.sub_dir, conf.sub_dir) override_allowed = conf.override_allowed # take individual conf value, ignore common log_conf value n_conf: Config = Config(name, filename, logger_level, log_fmt, log_datefmt, log_level, log_enabled, cout_fmt, cout_datefmt, cout_level, cout_enabled, propagate, log_dir, sub_dir, override_allowed) return n_conf
5,353,413
def function(argument1, argument2): """ Description of function. Parameters: argument1 (type): description of argument1 argument2 (type): description of argument2 Output: output1 (type): description of output1 """ return
5,353,414
def wfa_measurement_system_repositories(): """Imports all direct dependencies for wfa_measurement_system.""" wfa_repo_archive( name = "wfa_common_jvm", repo = "common-jvm", sha256 = "b162457bcc6724f77454042e2acaf6a806dd53a2ac7423a79b48ab1cc521a3df", version = "0.25.1", ) wfa_repo_archive( name = "wfa_common_cpp", repo = "common-cpp", sha256 = "e8efc0c9f5950aff13a59f21f40ccc31c26fe40c800743f824f92df3a05588b2", version = "0.5.0", ) wfa_repo_archive( name = "wfa_measurement_proto", repo = "cross-media-measurement-api", sha256 = "da217b6100423ff81ec670e60a272a96897f8f3ed26bb14fe5e54b7a4d454222", version = "0.15.1", ) wfa_repo_archive( name = "wfa_rules_swig", commit = "653d1bdcec85a9373df69920f35961150cf4b1b6", repo = "rules_swig", sha256 = "34c15134d7293fc38df6ed254b55ee912c7479c396178b7f6499b7e5351aeeec", ) wfa_repo_archive( name = "any_sketch", version = "0.1.0", repo = "any-sketch", sha256 = "904a3dd0b48bccbbd0b84830c85e47aa56fe1257211514bfad99a88595ce6325", ) wfa_repo_archive( name = "any_sketch_java", version = "0.2.0", repo = "any-sketch-java", sha256 = "55f20dfe98c71b4fdd5068f44ea5df5d88bac51c1d24061438a8aa5ed4b853b7", ) wfa_repo_archive( name = "wfa_consent_signaling_client", repo = "consent-signaling-client", version = "0.12.0", sha256 = "b907c0dd4f6efbe4f6db3f34efeca0f1763d3cc674c37cbfebac1ee2a80c86f5", )
5,353,415
def write_csv_file(file_name, encoding, header_list, file_data): """Write a comma separated values (CSV) file to disk using the given Unicode encoding. Arguments: file_name Name of the file to write. encoding The name of a Unicode encoding to be used when reading the file. If set to None then the standard 'ascii' encoding will be used. header_list A list containing the attribute (field) names to be written at the beginning of the file. If no header line is to be written then this argument needs to be set to None. file_data A list containing the records to be written into the CSV file. Each record must be a list of values, and these values will be concatenated with commas and written into the file. It is assumed the values given do not contain comas. """ check_is_string('file_name', file_name) check_is_list('file_data', file_data) if (encoding == None): # Use default ASCII encoding encoding = 'ascii' check_is_string('encoding', encoding) check_unicode_encoding_exists(encoding) try: out_file = codecs.open(file_name, 'w', encoding=encoding) except: raise IOError( 'Cannot write CSV file "%s"' % (file_name)) if (header_list != None): check_is_list('header_list',header_list) header_str = ','.join(header_list) #print 'Header line:', header_str out_file.write(header_str+os.linesep) i = 0 for rec_list in file_data: check_is_list('rec_list %d' % (i), rec_list) line_str = ','.join(rec_list) out_file.write(line_str+os.linesep) i += 1 out_file.close()
5,353,416
def midpVector(x): """ return midpoint value (=average) in each direction """ if type(x) != list: raise Exception("must be list") dim = len(x) #nx = x[0].shape for i in range(1,dim): if type(x[i]) != np.ndarray: raise Exception("must be numpy array") #if x[i].shape != nx: # raise Exception("dimensions mismatch") avgx = [] for ifield in range(dim): avgx.append([]) avgx[ifield] = midpScalar(x[ifield]) return avgx
5,353,417
def buy_ticket(email, name, quantity): """ Attmempt to buy a ticket in the database :param owner: the email of the ticket buyer :param name: the name of the ticket being bought :param quantity: the quantity of tickets being bought :return: an error message if there is any, or None if register succeeds """ user = User.query.filter_by(email=email).first() tik = Ticket.query.filter_by(name=name).first() user.balance = user.balance - (tik.price * quantity * 1.40) if tik.quantity == quantity: db.session.delete(tik) else: tik.quantity = tik.quantity - quantity db.session.commit() return None
5,353,418
def update_bounds( sig: float, eps: float, target_eps: float, bounds: np.ndarray, bound_eps: np.ndarray, consecutive_updates: int ) -> Tuple[np.ndarray, np.ndarray, int]: # noqa:E121,E125 """ Updates bounds for sigma around a target privacy epsilon. Updates the lower bound for sigma if `eps` is larger than `target_eps` and the upper bound otherwise. :param sig: A new value for sigma. :param eps: The corresponding value for epsilon. :param target_eps: The target value for epsilon. :param bounds: Tuple containing a lower and upper bound for the sigma corresponding to target_eps. :param bound_eps: The corresponding epsilon values for the bounds. :param consecutive_updates: Tuple counting the number of consecutive updates for lower and upper bound. :return: updated bounds, bound_eps and consecutive_updates """ assert(eps <= bound_eps[0]) assert(eps >= bound_eps[1]) if eps > target_eps: bounds[0] = sig bound_eps[0] = eps consecutive_updates = [consecutive_updates[0] + 1, 0] else: bounds[1] = sig bound_eps[1] = eps consecutive_updates = [0, consecutive_updates[1] + 1] return bounds, bound_eps, consecutive_updates
5,353,419
def get_timeseries(rics, fields='*', start_date=None, end_date=None, interval='daily', count=None, calendar=None, corax=None, normalize=False, raw_output=False, debug=False): """ Returns historical data on one or several RICs Parameters ---------- rics: string or list of strings Single RIC or List of RICs to retrieve historical data for start_date: string or datetime.datetime or datetime.timedelta Starting date and time of the historical range. string format is: '%Y-%m-%dT%H:%M:%S'. e.g. '2016-01-20T15:04:05'. datetime.timedelta is negative number of day relative to datetime.now(). Default: datetime.now() + timedelta(-100) You can use the helper function get_date_from_today, please see the usage in the examples section end_date: string or datetime.datetime or datetime.timedelta End date and time of the historical range. string format could be - '%Y-%m-%d' (e.g. '2017-01-20') - '%Y-%m-%dT%H:%M:%S' (e.g. '2017-01-20T15:04:05') datetime.timedelta is negative number of day relative to datetime.now(). Default: datetime.now() You can use the helper function get_date_from_today, please see the usage in the examples section interval: string Data interval. Possible values: 'tick', 'minute', 'hour', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly' (Default 'daily') Default: 'daily' fields: string or list of strings Use this parameter to filter the returned fields set. Available fields: 'TIMESTAMP', 'VALUE', 'VOLUME', 'HIGH', 'LOW', 'OPEN', 'CLOSE', 'COUNT' By default all fields are returned. count: int, optional Max number of data points retrieved. calendar: string, optional Possible values: 'native', 'tradingdays', 'calendardays'. corax: string, optional Possible values: 'adjusted', 'unadjusted' normalize: boolean, optional If set to True, the function will return a normalized data frame with the following columns 'Date','Security','Field'. If the value of this parameter is False the returned data frame shape will depend on the number of rics and the number of fields in the response. There are three different shapes: - One ric and many fields - Many rics and one field - Many rics and many fields Default: False Remark: This parameter has a less precedence than the parameter rawOutput i.e. if rawOutput is set to True, the returned data will be the raw data and this parameter will be ignored raw_output: boolean, optional Set this parameter to True to get the data in json format if set to False, the function will return a data frame which shape is defined by the parameter normalize Default: False debug: boolean, optional When set to True, the json request and response are printed. Default: False Raises ------ Exception If request fails or if server returns an error. ValueError If a parameter type or value is wrong. Examples -------- >>> import eikon as ek >>> ek.set_app_key('set your app key here') >>> req = ek.get_timeseries(["MSFT.O"], start_date = "2017-02-01T15:04:05", >>> end_date = "2017-02-05T15:04:05", interval="tick") >>> req = ek.get_timeseries(["MSFT.O"], start_date = "2017-03-01", >>> end_date = "2017-03-10", interval="daily") >>> req = ek.get_timeseries(["MSFT.O"], start_date = get_date_from_today(150), >>> end_date = get_date_from_today(100), interval="daily") """ logger = eikon.Profile.get_profile().logger # set the ric(s) in the payload check_for_string_or_list_of_strings(rics, 'rics') if is_string_type(rics): rics = [rics.strip()] if type(rics) == list: rics = [ric.upper() if ric.islower() else ric for ric in rics] # set the field(s) in the payload if fields is None or fields == '*': fields = ['*'] else: check_for_string_or_list_of_strings(fields, 'fields') if is_string_type(fields): fields = fields.strip().upper().split() else: fields = [x.upper() for x in fields] if '*' in fields: fields = ['*'] elif 'TIMESTAMP' not in fields: fields.append('TIMESTAMP') # check the interval in the payload check_for_string(interval, 'interval') if start_date is None: start_date = get_date_from_today(100) if end_date is None: end_date = get_date_from_today(0) start_date = to_datetime(start_date).isoformat() end_date = to_datetime(end_date).isoformat() if start_date > end_date: with 'end date ({})should be after than start date ({})'.format(end_date, start_date) as error_msg: logger.error(error_msg) raise ValueError(error_msg) payload = {'rics': rics, 'fields': fields, 'interval': interval, 'startdate': start_date, 'enddate': end_date} # Add optional parameters # set the count in the payload if count is not None: check_for_int(count, 'count') payload.update({'count': count}) # set the calendar in the payload if calendar is not None: if is_string_type(calendar): payload.update({'calendar': calendar}) else: with 'calendar must has string type' as error_msg: logger.error(error_msg) raise ValueError(error_msg) # set the corax in the payload if corax is not None: if is_string_type(corax): payload.update({'corax': corax}) else: with 'corax must be a string' as error_msg: logger.error(error_msg) raise ValueError(error_msg) ts_result = eikon.json_requests.send_json_request(TimeSeries_UDF_endpoint, payload, debug=debug) # Catch all errors to raise a warning ts_timeserie_data = ts_result['timeseriesData'] ts_status_errors = [ts_data for ts_data in ts_timeserie_data if get_json_value(ts_data, 'statusCode') == 'Error'] ts_error_messages = '' for ts_status in ts_status_errors: ts_error_message = get_json_value(ts_status, 'errorMessage') ts_error_message = ts_error_message[ts_error_message.find("Description"):] ts_instrument = get_json_value(ts_status, 'ric') ts_error_message = ts_error_message.replace('Description', ts_instrument) ts_error_messages += ts_error_message ts_error_messages += ' | ' warning_message = 'Error with {}'.format(ts_error_message) logger.warning(warning_message) # if all timeseries are in error, then raise EikonError with all error messages if len(ts_status_errors) == len(ts_timeserie_data): logger.error(ts_error_messages) raise EikonError(-1, message=ts_error_messages) if raw_output: return ts_result data_frame = None if normalize: data_frame = NormalizedDataFrame_Formatter(ts_result).get_data_frame() else: data_frame = NiceDataFrame_Formatter(ts_result).get_data_frame() if len(data_frame) > 0: data_frame = data_frame.fillna(pd.np.nan) return data_frame
5,353,420
def usd(value): """Format value as USD.""" return f"${value:,.2f}"
5,353,421
def random_mindist(N, mindist, width, height): """Create random 2D points with a minimal distance to each other. Args: N(int): number of points to generate mindist(float): Minimal distance between each point width(float): Specifies [0, width) for the x-coordinate height(float): Specifies [0, height) for the y-coordinate Returns: np.array(shape=[N, 2]): matrix of coordinates """ Pts = np.empty(shape=[0, 2]) n = 0 while n < N: X = random_uniform(1, width, height) # rejection sampling if closest_euclidean(X, Pts) > mindist: Pts = np.vstack((Pts, X)) n = n+1 return Pts
5,353,422
def test_ada12_adb(style_checker): """Style check test against ada12.adb """ style_checker.set_year(2006) p = style_checker.run_style_checker('trunk/toto', 'ada12.adb') style_checker.assertEqual(p.status, 0, p.image) style_checker.assertRunOutputEmpty(p)
5,353,423
def deactivate_spotting(ID): """ Function to deactivate a spotting document in Elasticsearch Params: ID::str id of the document to deactivate Returns: bool If the changes have been applied or not """ if not ID: return False try: global INDEX body = get_document(INDEX, ID)['_source'] body['is_active'] = False create_or_update_document(INDEX, ID, body) return True except NotFoundError: print("No documents found at deactivate_spotting") return False except Exception as e: print("Exception @ deactivate_spotting\n{}".format(e)) return None
5,353,424
def Fraction_Based(nc_outname, Startdate, Enddate): """ This functions calculated monthly total supply based ETblue and fractions that are given in the get dictionary script Parameters ---------- nc_outname : str Path to the NetCDF containing the data Startdate : str Contains the start date of the model 'yyyy-mm-dd' Enddate : str Contains the end date of the model 'yyyy-mm-dd' Returns ------- DataCube_Tot_Sup : Array Array containing the total supply [time,lat,lon] DataCube_Non_Consumed : Array Array containing the amount of non consumed water [time,lat,lon] """ # import water accounting plus modules import watools.General.raster_conversions as RC import watools.Functions.Start as Start # import general modules import numpy as np # Open Arrays DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse") DataCube_ETblue = RC.Open_nc_array(nc_outname, "Blue_Evapotranspiration", Startdate, Enddate) # Get Classes LU_Classes = Start.Get_Dictionaries.get_sheet5_classes() LU_Classes_Keys = list(LU_Classes.keys()) # Get fractions consumed_fractions_dict = Start.Get_Dictionaries.consumed_fractions() # Create Array for consumed fractions DataCube_Consumed_Fractions = np.ones(DataCube_LU.shape) * np.nan # Create array with consumed_fractions for Classes_LULC in LU_Classes_Keys: Values_LULC = LU_Classes[Classes_LULC] for Value_LULC in Values_LULC: DataCube_Consumed_Fractions[DataCube_LU == Value_LULC] = consumed_fractions_dict[Classes_LULC] # Calculated Total Supply DataCube_Tot_Sup = DataCube_ETblue[:,:,:] / DataCube_Consumed_Fractions[None,:,:] # Calculated Non consumed DataCube_Non_Consumed = DataCube_Tot_Sup - DataCube_ETblue return(DataCube_Tot_Sup, DataCube_Non_Consumed)
5,353,425
def _qrd_solve(r, pmut, ddiag, bqt, sdiag): """Solve an equation given a QR factored matrix and a diagonal. Parameters: r - **input-output** n-by-n array. The full lower triangle contains the full lower triangle of R. On output, the strict upper triangle contains the transpose of the strict lower triangle of S. pmut - n-vector describing the permutation matrix P. ddiag - n-vector containing the diagonal of the matrix D in the base problem (see below). bqt - n-vector containing the first n elements of B Q^T. sdiag - output n-vector. It is filled with the diagonal of S. Should be preallocated by the caller -- can result in somewhat greater efficiency if the vector is reused from one call to the next. Returns: x - n-vector solving the equation. Compute the n-vector x such that A^T x = B, D x = 0 where A is an n-by-m matrix, B is an m-vector, and D is an n-by-n diagonal matrix. We are given information about pivoted QR factorization of A with permutation, such that A P = R Q where P is a permutation matrix, Q has orthogonal rows, and R is lower triangular with nonincreasing diagonal elements. Q is m-by-m, R is n-by-m, and P is n-by-n. If x = P z, then we need to solve R z = B Q^T, P^T D P z = 0 (why the P^T? and do these need to be updated for the transposition?) If the system is rank-deficient, these equations are solved as well as possible in a least-squares sense. For the purposes of the LM algorithm we also compute the lower triangular n-by-n matrix S such that P^T (A^T A + D D) P = S^T S. (transpose?) """ n, m = r.shape # "Copy r and bqt to preserve input and initialize s. In # particular, save the diagonal elements of r in x." Recall that # on input only the full lower triangle of R is meaningful, so we # can mirror that into the upper triangle without issues. for i in range(n): r[i,i:] = r[i:,i] x = r.diagonal().copy() zwork = bqt.copy() # "Eliminate the diagonal matrix d using a Givens rotation." for i in range(n): # "Prepare the row of D to be eliminated, locating the # diagonal element using P from the QR factorization." li = pmut[i] if ddiag[li] == 0: sdiag[i] = r[i,i] r[i,i] = x[i] continue sdiag[i:] = 0 sdiag[i] = ddiag[li] # "The transformations to eliminate the row of d modify only a # single element of (q transpose)*b beyond the first n, which # is initially zero." bqtpi = 0. for j in range(i, n): # "Determine a Givens rotation which eliminates the # appropriate element in the current row of D." if sdiag[j] == 0: continue if abs(r[j,j]) < abs(sdiag[j]): cot = r[j,j] / sdiag[j] sin = 0.5 / np.sqrt(0.25 + 0.25 * cot**2) cos = sin * cot else: tan = sdiag[j] / r[j,j] cos = 0.5 / np.sqrt(0.25 + 0.25 * tan**2) sin = cos * tan # "Compute the modified diagonal element of r and the # modified element of ((q transpose)*b,0)." r[j,j] = cos * r[j,j] + sin * sdiag[j] temp = cos * zwork[j] + sin * bqtpi bqtpi = -sin * zwork[j] + cos * bqtpi zwork[j] = temp # "Accumulate the transformation in the row of s." if j + 1 < n: temp = cos * r[j,j+1:] + sin * sdiag[j+1:] sdiag[j+1:] = -sin * r[j,j+1:] + cos * sdiag[j+1:] r[j,j+1:] = temp # Save the diagonal of S and restore the diagonal of R # from its saved location in x. sdiag[i] = r[i,i] r[i,i] = x[i] # "Solve the triangular system for z. If the system is singular # then obtain a least squares solution." nsing = n for i in range(n): if sdiag[i] == 0.: nsing = i zwork[i:] = 0 break if nsing > 0: zwork[nsing-1] /= sdiag[nsing-1] # Degenerate case # "Reverse loop" for i in range(nsing - 2, -1, -1): s = np.dot(zwork[i+1:nsing], r[i,i+1:nsing]) zwork[i] = (zwork[i] - s) / sdiag[i] # "Permute the components of z back to components of x." x[pmut] = zwork return x
5,353,426
def find_version(): """Extract the version number from the CLI source file.""" with open('pyweek.py') as f: for l in f: mo = re.match('__version__ = *(.*)?\s*', l) if mo: return eval(mo.group(1)) else: raise Exception("No version information found.")
5,353,427
def remove_unused_levels(self): """ create a new MultiIndex from the current that removing unused levels, meaning that they are not expressed in the labels The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]]) """ import pandas.core.algorithms as algos new_levels = [] new_labels = [] changed = False for lev, lab in zip(self.levels, self.labels): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(lab + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "lab" when all items are found: uniques = algos.unique(lab) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # labels get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position label_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: label_mapping[uniques] = np.arange(len(uniques)) - has_na lab = label_mapping[lab] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_labels.append(lab) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_labels(new_labels, validate=False) return result
5,353,428
def add_bands(show_id, bands): """Insert the bands to the given Show.""" for band in bands: show_band = ShowsOtherBands( ShowID=show_id, BandName=band["bandName"], BandWebsite=band["bandWebsite"], ) db.session.add(show_band) db.session.commit()
5,353,429
def _transform_playlist(playlist): """Transform result into a format that more closely matches our unified API. """ transformed_playlist = dict([ ('source_type', 'spotify'), ('source_id', playlist['id']), ('name', playlist['name']), ('tracks', playlist['tracks']['total']), ]) return transformed_playlist
5,353,430
def decode_map_states(beliefs: Dict[Hashable, Any]) -> Any: """Function to decode MAP states given the calculated beliefs. Args: beliefs: An array or a PyTree container containing beliefs for different variables. Returns: An array or a PyTree container containing the MAP states for different variables. """ return jax.tree_util.tree_map(lambda x: jnp.argmax(x, axis=-1), beliefs)
5,353,431
def main(project_dir, deploy_dir): """ :param project_dir: 项目的根目录 :param deploy_dir: 部署二进制程序的目录 :return: """ if not os.path.exists(deploy_dir): os.makedirs(deploy_dir) builddir = os.path.join(project_dir, "build") if not os.path.exists(builddir): os.makedirs(builddir) # 格式:起启文件路径,目标文件路径 files = [ ("skynet/skynet", "skynet"), ("skynet/3rd/lua/lua", "skynet"), ("skynet/3rd/lua/luac", "skynet"), ("run.sh", ""), ] for path, destdir in files: src_path = os.path.join(project_dir, path) name = os.path.basename(path) dest_path = os.path.join(deploy_dir, os.path.join(destdir, name)) copyfile(src_path, dest_path) # 格式:起启路径,目标路径,文件后缀, 需要跳过的子目录; 支持递归 dirs = [ ("skynet/lualib", "skynet/lualib", ".lua", None), ("skynet/service", "skynet/service", ".lua", None), ("skynet/luaclib", "skynet/luaclib", ".so", [".dSYM"]), ("skynet/cservice", "skynet/cservice", ".so", [".dSYM"]), ("luaclib", "luaclib", ".so", "*"), # 星号, 标示不遍历子目录 ("service", "service", ".lua", None), ("lualib","lualib", ".lua", None), ("etc", "etc", '*', None), # 第2个星号,忽略文件后缀,复制所有的文件 ] for srcdir, destdir, extension, exclude in dirs: copydir(project_dir, srcdir, deploy_dir, destdir, extension, exclude)
5,353,432
def plot_stretch_Q(datas, stretches=[0.01,0.1,0.5,1], Qs=[1,10,5,100]): """ Plots different normalizations of your image using the stretch, Q parameters. Parameters ---------- stretches : array List of stretch params you want to permutate through to find optimal image normalization. Default is [0.01, 0.1, 0.5, 1] Qs : array List of Q params you want to permutate through to find optimal image normalization. Default is [1, 10, 5, 100] Code adapted from: https://pmelchior.github.io/scarlet/tutorials/display.html Returns ------- fig : Figure object """ fig, ax = plt.subplots(len(stretches), len(Qs), figsize=(9,9)) for i, stretch in enumerate(stretches): for j, Q in enumerate(Qs): asinh = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q) # Scale the RGB channels for the image img_rgb = scarlet.display.img_to_rgb(datas, norm=asinh) ax[i][j].imshow(img_rgb) ax[i][j].set_title("Stretch {}, Q {}".format(stretch, Q)) ax[i][j].axis('off') return fig
5,353,433
def rad_plot(df, r_lo=0., r_hi=25., weighted=0): """Plot radial distribution.""" plt.figure(figsize=(16, 9)) if weighted == 1: plt.hist(df.r, bins=100, range=[r_lo, r_hi], weights=df.weight/df.r**2, histtype='step', color='black', linewidth=2) plt.ylabel(r'COUNTS/CM$^2$') else: plt.hist(df.r, bins=100, range=[r_lo, r_hi], histtype='step', color='black', linewidth=2) plt.ylabel('COUNTS') plt.xlim([r_lo, r_hi]) plt.ylim(bottom=0) plt.xlabel('RADIUS (CM)') plt.tight_layout()
5,353,434
def printClasses (theDictionary): """Prints a table displaying the student's class information. List classes alphabetically in the left column, their credit-worth in the right column. The last row displays the total number of credits. :param dict[str, int] theDictionary: The student's class information with the class as the key and the number or credits as the value :rtype: None """ classes = [i for i in theDictionary] listSortAscending (classes) print ("CLASS NAME | CREDITS") print ("=" * 23) for className in classes: print (f"{className:<13s}", "|", f"{theDictionary[className]:>7d}") print ("=" * 23) print (f"TOTAL CREDITS | {totalCredits (theDictionary):>7d}")
5,353,435
def _isSpecialGenerateOption(target, optName): """ Returns ``True`` if the given option has a special generation function, ``False`` otherwise. """ return _getSpecialFunction(target, optName, '_generateSpecial') is not None
5,353,436
def count_active_days(enable_date, disable_date): """Return the number of days the segment has been active. :param enable_date: The date the segment was enabled :type enable_date: timezone.datetime :param disable_date: The date the segment was disabled :type disable_date: timezone.datetime :returns: The amount of days a segment is/has been active :rtype: int """ if enable_date is not None: if disable_date is None or disable_date <= enable_date: # There is no disable date, or it is not relevant. delta = timezone.now() - enable_date return delta.days if disable_date > enable_date: # There is a disable date and it is relevant. delta = disable_date - enable_date return delta.days return 0
5,353,437
def test_empty_constructor_constructs_empty_weight_graph(): """Test that a new graph is empty.""" from weight_graph import Graph g = Graph() assert len(g.graph) == 0
5,353,438
def numpy_to_python_type(value): """ Convert to Python type from numpy with .item(). """ try: return value.item() except AttributeError: return value
5,353,439
def bigEI_numerical(Ey, t, P=1): """Return the column kp=0 of the matrix E_I, computed numerically.""" lmax = int(np.sqrt(Ey.shape[0]) - 1) K = len(t) map = starry.Map(ydeg=lmax, lazy=False) theta = 360 / P * t bigEI = np.zeros(K) kp = 0 for k in tqdm(range(K), disable=bool(int(os.getenv("NOTQDM", "0")))): def integrand(I): map.inc = I * 180 / np.pi A = map.design_matrix(theta=theta) return (A @ Ey @ A.T * np.sin(I))[k, kp] bigEI[k] = quad(integrand, 0, 0.5 * np.pi)[0] return bigEI
5,353,440
def db_select_all(db, query, data=None): """Select all rows""" logger_instance.debug("query = <<%s>>"% (query[:100],)) cursor = db.cursor() try: cursor.execute(query, data) except pymysql.MySQLError: exc_type, exc_value, exc_traceback = sys.exc_info() err_string = "Error from MySQL:\n" + query sys.stderr.write(err_string + "\n") logger_instance.debug(err_string) #sys.stderr.write(repr(data) + "\n") logger_instance.debug(repr(data)) traceback.print_exc() logger_instance.debug(traceback.format_exception()) cursor.close() sys.exit(1) #return False else: result = cursor.fetchall() cursor.close() return result
5,353,441
def threshold_abs(image, threshold): """Return thresholded image from an absolute cutoff.""" return image > threshold
5,353,442
def warp_images(img1_loc, img2_loc, h_loc): """ Fill documentation """ rows1, cols1 = img1_loc.shape[:2] rows2, cols2 = img2_loc.shape[:2] print("0") list_of_points_1 = np.array( [[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]], np.float32).reshape(-1, 1, 2) temp_points = np.array( [[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]], np.float32).reshape(-1, 1, 2) print("1") list_of_points_2 = cv2.perspectiveTransform(temp_points, h_loc) list_of_points = np.concatenate( (list_of_points_1, list_of_points_2), axis=0) print(list_of_points) [x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5) [x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5) print("3") translation_dist = [-x_min, -y_min] h_translation = np.array( [[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]]) print(((x_max - x_min, x_max, x_min), (y_max - y_min, y_max, y_min))) output_img = cv2.warpPerspective( img2_loc, h_translation.dot(h_loc), (x_max - x_min, y_max - y_min)) output_img[translation_dist[1]:rows1+translation_dist[1], translation_dist[0]:cols1+translation_dist[0]] = img1_loc print("5") return output_img
5,353,443
def ValidaCpf(msg='Cadastro de Pessoa Física (CPF): ', pont=True): """ -> Função para validar um CPF :param msg: Mensagem exibida para usuário antes de ler o CPF. :param pont: Se True, retorna um CPF com pontuação (ex: xxx.xxx.xxx-xx). Se False, retorna um CPF sem pontuação (ex: xxxxxxxxxxx) :return: Retorna um CPF válido. """ while True: cpf = str(input(f'{msg}')) if '.-' in cpf and pont == False: cpf.replace('.', '') cpf.replace('-', '') contDig=0 for dig in cpf: if dig.isnumeric(): contDig += 1 # Conta a quantidade de dígitos no CPF if contDig != 11: # Se o CPF possuir mais de 11 dígitos, retorna uma mensagem de erro print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue # Volta para o tpo do laço if '.' in cpf: # Verifica a existência de pontos no CPF e se a quantidade está correta(2) if cpf.count('.') != 2: print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue else: # Se não tiver pontos e se pont=True, adiciona a pontuação if pont: cpf = list(cpf) cpf.insert(3, '.') cpf.insert(7, '.') if '-' in cpf: # Verifica a existência do hífen no CPF e se a quantidade está correta(1) if cpf.count('-') != 1: print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue else: # Se não tiver hífen e se pont=True, adiciona a pontuação if pont: cpf.insert(11, '-') result = [''.join(cpf)] # Junta a lista cpf = result[0] break return cpf
5,353,444
def get_xml_path(xml, path=None, func=None): """ Return the content from the passed xml xpath, or return the result of a passed function (receives xpathContext as its only arg) """ #doc = None #ctx = None #result = None #try: doc = etree.fromstring(xml) #ctx = doc.xpathNewContext() if path: #ret = ctx.xpathEval(path) ret = doc.xpath(path) if ret is not None: if type(ret) == list: if len(ret) >= 1: result = ret[0].text else: result = ret elif func: result = func(doc) else: raise ValueError("'path' or 'func' is required.") #finally: # if doc: # doc.freeDoc() # if ctx: # ctx.xpathFreeContext() return result
5,353,445
def _raise_error_if_not_drawing_classifier_input_sframe( dataset, feature, target): """ Performs some sanity checks on the SFrame provided as input to `turicreate.drawing_classifier.create` and raises a ToolkitError if something in the dataset is missing or wrong. """ from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe _raise_error_if_not_sframe(dataset) if feature not in dataset.column_names(): raise _ToolkitError("Feature column '%s' does not exist" % feature) if target not in dataset.column_names(): raise _ToolkitError("Target column '%s' does not exist" % target) if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list): raise _ToolkitError("Feature column must contain images" + " or stroke-based drawings encoded as lists of strokes" + " where each stroke is a list of points and" + " each point is stored as a dictionary") if dataset[target].dtype != int and dataset[target].dtype != str: raise _ToolkitError("Target column contains " + str(dataset[target].dtype) + " but it must contain strings or integers to represent" + " labels for drawings.") if len(dataset) == 0: raise _ToolkitError("Input Dataset is empty!")
5,353,446
def readXYdYData(filename, comment_character='#'): """ Read in a file containing 3 columns of x, y, dy Lines beginning with commentCharacter are ignored """ return read_columnar_data(filename, number_columns=3, comment_character=comment_character)
5,353,447
def hexlen(x): """ Returns the string length of 'x' in hex format. """ return len(hex(x))+2
5,353,448
def get_db(): """ connectionを取得します """ if not hasattr(g, 'sqlite_db'): g.sqlite_db = connect_db() return g.sqlite_db
5,353,449
def test_load_course( mock_upsert_tasks, course_exists, is_published, is_run_published, blocklisted ): """Test that load_course loads the course""" course = ( CourseFactory.create(runs=None, published=is_published) if course_exists else CourseFactory.build() ) assert Course.objects.count() == (1 if course_exists else 0) assert LearningResourceRun.objects.count() == 0 props = model_to_dict( CourseFactory.build( course_id=course.course_id, platform=course.platform, published=is_published ) ) del props["id"] if is_run_published: run = model_to_dict(LearningResourceRunFactory.build(platform=course.platform)) del run["content_type"] del run["object_id"] del run["id"] props["runs"] = [run] else: props["runs"] = [] blocklist = [course.course_id] if blocklisted else [] result = load_course(props, blocklist, []) if course_exists and (not is_published or not is_run_published) and not blocklisted: mock_upsert_tasks.delete_course.assert_called_with(result) elif is_published and is_run_published and not blocklisted: mock_upsert_tasks.upsert_course.assert_called_with(result.id) else: mock_upsert_tasks.delete_program.assert_not_called() mock_upsert_tasks.upsert_course.assert_not_called() assert Course.objects.count() == 1 assert LearningResourceRun.objects.count() == (1 if is_run_published else 0) # assert we got a course back assert isinstance(result, Course) for key, value in props.items(): assert getattr(result, key) == value, f"Property {key} should equal {value}"
5,353,450
def list_manipulation(lst, command, location, value=None): """Mutate lst to add/remove from beginning or end. - lst: list of values - command: command, either "remove" or "add" - location: location to remove/add, either "beginning" or "end" - value: when adding, value to add remove: remove item at beginning or end, and return item removed >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'remove', 'end') 3 >>> list_manipulation(lst, 'remove', 'beginning') 1 >>> lst [2] add: add item at beginning/end, and return list >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'add', 'beginning', 20) [20, 1, 2, 3] >>> list_manipulation(lst, 'add', 'end', 30) [20, 1, 2, 3, 30] >>> lst [20, 1, 2, 3, 30] Invalid commands or locations should return None: >>> list_manipulation(lst, 'foo', 'end') is None True >>> list_manipulation(lst, 'add', 'dunno') is None True """ if command == "remove": if location == "end": return lst.pop() elif location == "beginning": return lst.pop(0) elif command == "add": if location == "beginning": lst.insert(0,value) return lst elif location == "end": lst.append(value) return lst
5,353,451
def test_jones_num_funcs_x_orientation(): """Test functions to convert jones pol strings and numbers with x_orientation.""" jnums = [-8, -7, -6, -5, -4, -3, -2, -1] x_orient1 = "east" jstr = ["Jne", "Jen", "Jnn", "Jee", "Jlr", "Jrl", "Jll", "Jrr"] assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient1) assert jstr == uvutils.jnum2str(jnums, x_orientation=x_orient1) # Check shorthands jstr = ["ne", "en", "nn", "n", "ee", "e", "lr", "rl", "ll", "l", "rr", "r"] jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient1) # Check individuals assert -6 == uvutils.jstr2num("jnn", x_orientation=x_orient1) assert "Jen" == uvutils.jnum2str(-7, x_orientation=x_orient1) # Check errors pytest.raises(KeyError, uvutils.jstr2num, "foo", x_orientation=x_orient1) pytest.raises(ValueError, uvutils.jstr2num, 1, x_orientation=x_orient1) pytest.raises(ValueError, uvutils.jnum2str, 7.3, x_orientation=x_orient1) # check parse method assert uvutils.parse_jpolstr("e", x_orientation=x_orient1) == "Jee" assert uvutils.parse_jpolstr("x", x_orientation=x_orient1) == "Jee" assert uvutils.parse_jpolstr("y", x_orientation=x_orient1) == "Jnn" assert uvutils.parse_jpolstr("en", x_orientation=x_orient1) == "Jen" assert uvutils.parse_jpolstr("NE", x_orientation=x_orient1) == "Jne" jnums = [-8, -7, -6, -5, -4, -3, -2, -1] x_orient2 = "north" jstr = ["Jen", "Jne", "Jee", "Jnn", "Jlr", "Jrl", "Jll", "Jrr"] assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient2) assert jstr == uvutils.jnum2str(jnums, x_orientation=x_orient2) # Check shorthands jstr = ["en", "ne", "ee", "e", "nn", "n", "lr", "rl", "ll", "l", "rr", "r"] jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient2) # Check individuals assert -6 == uvutils.jstr2num("jee", x_orientation=x_orient2) assert "Jne" == uvutils.jnum2str(-7, x_orientation=x_orient2) # Check errors pytest.raises(KeyError, uvutils.jstr2num, "foo", x_orientation=x_orient2) pytest.raises(ValueError, uvutils.jstr2num, 1, x_orientation=x_orient2) pytest.raises(ValueError, uvutils.jnum2str, 7.3, x_orientation=x_orient2) # check parse method assert uvutils.parse_jpolstr("e", x_orientation=x_orient2) == "Jee" assert uvutils.parse_jpolstr("x", x_orientation=x_orient2) == "Jnn" assert uvutils.parse_jpolstr("y", x_orientation=x_orient2) == "Jee" assert uvutils.parse_jpolstr("en", x_orientation=x_orient2) == "Jen" assert uvutils.parse_jpolstr("NE", x_orientation=x_orient2) == "Jne" # check warnings for non-recognized x_orientation with uvtest.check_warnings(UserWarning, "x_orientation not recognized"): assert uvutils.jstr2num("x", x_orientation="foo") == -5 with uvtest.check_warnings(UserWarning, "x_orientation not recognized"): assert uvutils.jnum2str(-6, x_orientation="foo") == "Jyy"
5,353,452
def image_comparison(src_file, dest_file): """Compare the images listed in input csv file and outputs the score,elapsed time along with it into the output csv file""" try: with open(str(src_file), mode='r') as f, open(str(dest_file), mode="w") as csv_file: fieldnames = ["Image1", "Image2", "Similar", "Elapsed"] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() reader = csv.reader(f) image_list = list(reader) for item in range(1, len(image_list)): src = Image.open(image_list[item][0]) dest = Image.open(image_list[item][1]) start_time = time.time() score = calculate_rms(src, dest) end_time = time.time() elapsed_time = round(end_time - start_time, 4) writer.writerow({"Image1": image_list[item][0], "Image2": image_list[item][1], "Similar": score, "Elapsed": elapsed_time}) except IOError as exception: if exception.strerror == "No such file or directory": print(f"Please check the file exists or not - {exception.strerror}") else: print(f"Here is the Error - {exception.strerror}")
5,353,453
def solve( problem, comm=_NoArgumentGiven, dispatcher_rank=0, log_filename=None, results_filename=None, **kwds ): """Solves a branch-and-bound problem and returns the solution. Note ---- This function also collects and summarizes runtime workload statistics, which may introduce additional overhead. This overhead can be avoided by directly instantiating a :class:`Solver` object and calling the :func:`Solver.solve` method. Parameters ---------- problem : :class:`pybnb.Problem <pybnb.problem.Problem>` An object that defines a branch-and-bound problem comm : ``mpi4py.MPI.Comm``, optional The MPI communicator to use. If unset, the mpi4py.MPI.COMM_WORLD communicator will be used. Setting this keyword to None will disable the use of MPI and avoid an attempted import of mpi4py.MPI (which avoids triggering a call to `MPI_Init()`). dispatcher_rank : int, optional The process with this rank will be designated the dispatcher process. If MPI functionality is disabled (by setting comm=None, or when comm.size==1), this keyword must be left at 0. (default: 0) log_filename : string, optional A filename where solver output should be sent in addition to console. This keyword will be ignored if the `log` keyword is set. (default: None) results_filename : string, optional Saves the solver results into a YAML-formatted file with the given name. (default: None) **kwds Additional keywords to be passed to :func:`Solver.solve`. See that method for additional keyword documentation. Returns ------- results : :class:`SolverResults <pybnb.solver_results.SolverResults>` An object storing information about the solve. """ opt = Solver(comm=comm, dispatcher_rank=dispatcher_rank) if (opt.is_dispatcher) and ("log" not in kwds) and (log_filename is not None): kwds["log"] = get_simple_logger(filename=log_filename) results = opt.solve(problem, **kwds) stats = opt.collect_worker_statistics() if opt.is_dispatcher: tmp = six.StringIO() summarize_worker_statistics(stats, stream=tmp) opt._disp.log_info(tmp.getvalue()) if opt.is_dispatcher and (results_filename is not None): results.write(results_filename) return results
5,353,454
def solution(lst): """Given a non-empty list of integers, return the sum of all of the odd elements that are in even positions. Examples solution([5, 8, 7, 1]) ==> 12 solution([3, 3, 3, 3, 3]) ==> 9 solution([30, 13, 24, 321]) ==>0 """ #[SOLUTION] return sum([x for idx, x in enumerate(lst) if idx%2==0 and x%2==1])
5,353,455
def summarize_traffic_mix(l_d_flow_records, d_filters={}): """ Filter the traffic flow data and execute the processing analysis logic for network behavior metrics. """ o_tcp_src_analysis = TopProtocolAnalysis() o_tcp_dst_analysis = TopProtocolAnalysis() o_upd_src_analysis = TopProtocolAnalysis() o_upd_dst_analysis = TopProtocolAnalysis() for flow in l_d_flow_records: # print "Flow:", str(flow) if matches_desired_flows(op_src_asn_to_filter, op_dst_asn_to_filter, op_ingress_asn_to_filter, flow, d_filters): # get srcIP and dstIP int_flow_sa = flow['sa'] # get bytes and packets flow_bytes = fputil.record_to_numeric(flow['ibyt']) flow_packets = fputil.record_to_numeric(flow['ipkt']) # get ports and protocol flow_sp = fputil.record_to_numeric(flow['sp']) flow_dp = fputil.record_to_numeric(flow['dp']) str_flow_pr = fputil.proto_int_to_str(flow['pr']) # process and save traffic information per selected L7 protocols and group other using -1 port number if str_flow_pr == "TCP": if flow_sp in cons.d_proto_l7_int_str.keys(): o_tcp_src_analysis.update_port_sum(flow_sp, flow_bytes, flow_packets) o_tcp_src_analysis.update_port_ips_sum(flow_sp, int_flow_sa) else: o_tcp_src_analysis.update_port_sum(-1, flow_bytes, flow_packets) o_tcp_src_analysis.update_port_ips_sum(-1, int_flow_sa) if flow_dp in cons.d_proto_l7_int_str.keys(): o_tcp_dst_analysis.update_port_sum(flow_dp, flow_bytes, flow_packets) else: o_tcp_dst_analysis.update_port_sum(-1, flow_bytes, flow_packets) if str_flow_pr == "UDP": if flow_sp in cons.d_proto_l7_int_str.keys(): o_upd_src_analysis.update_port_sum(flow_sp, flow_bytes, flow_packets) o_upd_src_analysis.update_port_ips_sum(flow_sp, int_flow_sa) else: o_upd_src_analysis.update_port_sum(-1, flow_bytes, flow_packets) o_upd_src_analysis.update_port_ips_sum(-1, int_flow_sa) if flow_dp in cons.d_proto_l7_int_str.keys(): o_upd_dst_analysis.update_port_sum(flow_dp, flow_bytes, flow_packets) else: o_upd_dst_analysis.update_port_sum(-1, flow_bytes, flow_packets) return [o_tcp_src_analysis, o_tcp_dst_analysis, o_upd_src_analysis, o_upd_dst_analysis]
5,353,456
def segment(X, upscale=1.0, denoise=False): """ :param X: :param upscale: :param denoise: :return: """ if upscale > 1.0: X = rescale(X, upscale) if denoise: X = denoise_wavelet(X) thresh = filters.threshold_otsu(X) bw = closing(X > thresh, square(3)) cleared = clear_border(bw) cleared = rescale(cleared, 1.0 / upscale) return label(cleared)
5,353,457
def describe_user_pool_client(UserPoolId=None, ClientId=None): """ Client method for returning the configuration information and metadata of the specified user pool app client. See also: AWS API Documentation Exceptions :example: response = client.describe_user_pool_client( UserPoolId='string', ClientId='string' ) :type UserPoolId: string :param UserPoolId: [REQUIRED]\nThe user pool ID for the user pool you want to describe.\n :type ClientId: string :param ClientId: [REQUIRED]\nThe app client ID of the app associated with the user pool.\n :rtype: dict ReturnsResponse Syntax { 'UserPoolClient': { 'UserPoolId': 'string', 'ClientName': 'string', 'ClientId': 'string', 'ClientSecret': 'string', 'LastModifiedDate': datetime(2015, 1, 1), 'CreationDate': datetime(2015, 1, 1), 'RefreshTokenValidity': 123, 'ReadAttributes': [ 'string', ], 'WriteAttributes': [ 'string', ], 'ExplicitAuthFlows': [ 'ADMIN_NO_SRP_AUTH'|'CUSTOM_AUTH_FLOW_ONLY'|'USER_PASSWORD_AUTH'|'ALLOW_ADMIN_USER_PASSWORD_AUTH'|'ALLOW_CUSTOM_AUTH'|'ALLOW_USER_PASSWORD_AUTH'|'ALLOW_USER_SRP_AUTH'|'ALLOW_REFRESH_TOKEN_AUTH', ], 'SupportedIdentityProviders': [ 'string', ], 'CallbackURLs': [ 'string', ], 'LogoutURLs': [ 'string', ], 'DefaultRedirectURI': 'string', 'AllowedOAuthFlows': [ 'code'|'implicit'|'client_credentials', ], 'AllowedOAuthScopes': [ 'string', ], 'AllowedOAuthFlowsUserPoolClient': True|False, 'AnalyticsConfiguration': { 'ApplicationId': 'string', 'RoleArn': 'string', 'ExternalId': 'string', 'UserDataShared': True|False }, 'PreventUserExistenceErrors': 'LEGACY'|'ENABLED' } } Response Structure (dict) -- Represents the response from the server from a request to describe the user pool client. UserPoolClient (dict) -- The user pool client from a server response to describe the user pool client. UserPoolId (string) -- The user pool ID for the user pool client. ClientName (string) -- The client name from the user pool request of the client type. ClientId (string) -- The ID of the client associated with the user pool. ClientSecret (string) -- The client secret from the user pool request of the client type. LastModifiedDate (datetime) -- The date the user pool client was last modified. CreationDate (datetime) -- The date the user pool client was created. RefreshTokenValidity (integer) -- The time limit, in days, after which the refresh token is no longer valid and cannot be used. ReadAttributes (list) -- The Read-only attributes. (string) -- WriteAttributes (list) -- The writeable attributes. (string) -- ExplicitAuthFlows (list) -- The authentication flows that are supported by the user pool clients. Flow names without the ALLOW_ prefix are deprecated in favor of new names with the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along with values without ALLOW_ prefix. Valid values include: ALLOW_ADMIN_USER_PASSWORD_AUTH : Enable admin based user password authentication flow ADMIN_USER_PASSWORD_AUTH . This setting replaces the ADMIN_NO_SRP_AUTH setting. With this authentication flow, Cognito receives the password in the request instead of using the SRP (Secure Remote Password protocol) protocol to verify passwords. ALLOW_CUSTOM_AUTH : Enable Lambda trigger based authentication. ALLOW_USER_PASSWORD_AUTH : Enable user password-based authentication. In this flow, Cognito receives the password in the request instead of using the SRP protocol to verify passwords. ALLOW_USER_SRP_AUTH : Enable SRP based authentication. ALLOW_REFRESH_TOKEN_AUTH : Enable authflow to refresh tokens. (string) -- SupportedIdentityProviders (list) -- A list of provider names for the identity providers that are supported on this client. (string) -- CallbackURLs (list) -- A list of allowed redirect (callback) URLs for the identity providers. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Not include a fragment component. See OAuth 2.0 - Redirection Endpoint . Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. (string) -- LogoutURLs (list) -- A list of allowed logout URLs for the identity providers. (string) -- DefaultRedirectURI (string) -- The default redirect URI. Must be in the CallbackURLs list. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Not include a fragment component. See OAuth 2.0 - Redirection Endpoint . Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. AllowedOAuthFlows (list) -- The allowed OAuth flows. Set to code to initiate a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the token endpoint. Set to implicit to specify that the client should get the access token (and, optionally, ID token, based on scopes) directly. Set to client_credentials to specify that the client should get the access token (and, optionally, ID token, based on scopes) from the token endpoint using a combination of client and client_secret. (string) -- AllowedOAuthScopes (list) -- The allowed OAuth scopes. Possible values provided by OAuth are: phone , email , openid , and profile . Possible values provided by AWS are: aws.cognito.signin.user.admin . Custom scopes created in Resource Servers are also supported. (string) -- AllowedOAuthFlowsUserPoolClient (boolean) -- Set to true if the client is allowed to follow the OAuth protocol when interacting with Cognito user pools. AnalyticsConfiguration (dict) -- The Amazon Pinpoint analytics configuration for the user pool client. Note Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides. ApplicationId (string) -- The application ID for an Amazon Pinpoint application. RoleArn (string) -- The ARN of an IAM role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics. ExternalId (string) -- The external ID. UserDataShared (boolean) -- If UserDataShared is true , Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics. PreventUserExistenceErrors (string) -- Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to ENABLED and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY , those APIs will return a UserNotFoundException exception if the user does not exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the old behavior of Cognito where user existence related errors are not prevented. This setting affects the behavior of following APIs: AdminInitiateAuth AdminRespondToAuthChallenge InitiateAuth RespondToAuthChallenge ForgotPassword ConfirmForgotPassword ConfirmSignUp ResendConfirmationCode Note After February 15th 2020, the value of PreventUserExistenceErrors will default to ENABLED for newly created user pool clients if no value is provided. Exceptions CognitoIdentityProvider.Client.exceptions.ResourceNotFoundException CognitoIdentityProvider.Client.exceptions.InvalidParameterException CognitoIdentityProvider.Client.exceptions.TooManyRequestsException CognitoIdentityProvider.Client.exceptions.NotAuthorizedException CognitoIdentityProvider.Client.exceptions.InternalErrorException :return: { 'UserPoolClient': { 'UserPoolId': 'string', 'ClientName': 'string', 'ClientId': 'string', 'ClientSecret': 'string', 'LastModifiedDate': datetime(2015, 1, 1), 'CreationDate': datetime(2015, 1, 1), 'RefreshTokenValidity': 123, 'ReadAttributes': [ 'string', ], 'WriteAttributes': [ 'string', ], 'ExplicitAuthFlows': [ 'ADMIN_NO_SRP_AUTH'|'CUSTOM_AUTH_FLOW_ONLY'|'USER_PASSWORD_AUTH'|'ALLOW_ADMIN_USER_PASSWORD_AUTH'|'ALLOW_CUSTOM_AUTH'|'ALLOW_USER_PASSWORD_AUTH'|'ALLOW_USER_SRP_AUTH'|'ALLOW_REFRESH_TOKEN_AUTH', ], 'SupportedIdentityProviders': [ 'string', ], 'CallbackURLs': [ 'string', ], 'LogoutURLs': [ 'string', ], 'DefaultRedirectURI': 'string', 'AllowedOAuthFlows': [ 'code'|'implicit'|'client_credentials', ], 'AllowedOAuthScopes': [ 'string', ], 'AllowedOAuthFlowsUserPoolClient': True|False, 'AnalyticsConfiguration': { 'ApplicationId': 'string', 'RoleArn': 'string', 'ExternalId': 'string', 'UserDataShared': True|False }, 'PreventUserExistenceErrors': 'LEGACY'|'ENABLED' } } :returns: (string) -- """ pass
5,353,458
def get_image_info(doc): """Create dictionary with key->id, values->image information """ id_img = dict() #add image information for img_infor in doc['images']: filename = img_infor['file_name'] width = img_infor['width'] height = img_infor['height'] id_img[img_infor['id']] = [filename, width, height] return id_img
5,353,459
def common_values(series1, series2): """ Shows the differences, intersections and union of two sets. """ values1 = set(series1) values2 = set(series2) intersection = set.intersection(values1, values2) no_values2 = values1 - values2 no_values1 = values2 - values1 total = set.union(values1, values2) print('Intersection: {}'.format(len(intersection))) print('Total set 1: {}'.format(len(values1))) print('Not in set 2: {}'.format(len(no_values2))) print('Total set 2: {}'.format(len(values2))) print('Not in set 1: {}'.format(len(no_values1))) print('Total: {}'.format(len(total)))
5,353,460
def __check_partial(detected,approx, width, height): """ Check if it's a partial shape It's a partial shape if the shape's contours is on the image's edges. Parameters ---------- detected : Shape The detected shape approx : numpy.ndarray Approximates a polygonal curves. width : int Image's width height : int Image's height Returns ------- detected : Shape The detected shape """ # Checks in the x,y positions of the contours. # The shape is on the image's edges if a point is less than 1 or more than width-1. result = np.where((approx <= 1) | (approx >= width-1)) if(len(result[0]) > 0): #result[0] contain the positions found by np.where. detected = Shape.Shape.PARTIAL.value else: #check if there is a point(X or Y) equals to height or height-1. result = np.where((approx == height) | (approx == height-1)) result = np.where(result[2] == 1) #check if this point is Y. if(len(result[0])>0): detected = Shape.Shape.PARTIAL.value else: detected = None return detected
5,353,461
def get_svg_size(filename): """return width and height of a svg""" with open(filename) as f: lines = f.read().split('\n') width, height = None, None for l in lines: res = re.findall('<svg.*width="(\d+)pt".*height="(\d+)pt"', l) if len(res) > 0: # need to scale up, maybe due to omni-graffle scale = 2 width = round(scale*float(res[0][0])) height = round(scale*float(res[0][1])) res = re.findall('width="([.\d]+)', l) if len(res) > 0: width = round(float(res[0])) res = re.findall('height="([.\d]+)', l) if len(res) > 0: height = round(float(res[0])) if width is not None and height is not None: return width, height assert False, 'cannot find height and width for ' + filename
5,353,462
def bin_mgf(mgf_files,output_file = None, min_bin = 50, max_bin = 850, bin_size = 0.01, max_parent_mass = 850, verbose = False, remove_zero_sum_rows = True, remove_zero_sum_cols = True, window_filter = True, filter_window_size = 50, filter_window_retain = 3, filter_parent_peak = True): """ Bins an mgf file Bins an mgf of ms2 spectra and returns a sparse CSR matrix. Operates on either a single or a list of mgf files. The CSR matrix has bins on the rows and spectra as the columns Args: mgf_files: The path of an mgf file, or a list of multiple mgf files. Can be a directory path containing mgf files output_file: Name of output file in pickle format. min_bin: smallest m/z value to be binned. max_bin: largest m/z value to be binned. bin_size: m/z range in one bin. max_parent_mass: Remove ions larger than this. verbose: Print debug info. remove_zero_sum_rows: Explicitly remove empty rows (bins). remove_zero_sum_cols: Explicitly remove spectra where all values were filtered away (columns) filter_parent_peak: Remove all ms2 peaks larger than the parent mass Returns: A sparse CSR matrix X, a list of bin names, and a list of spectra names """ start = time.time() # Creates a list of bins based on the parameters inputted bins = np.arange(min_bin, max_bin, bin_size) # If the path passed in is a directory then loop through it if type(mgf_files) != list and os.path.isdir(mgf_files): dir = mgf_files mgf_files = [] directory = os.fsencode(dir) for file in os.listdir(directory): filename = os.fsdecode(file) # only save filenames of .mgf files in the directory if filename.endswith(".mgf"): mgf_files.append(os.path.join(dir, filename)) # If only one mgf file is passed in, make it a list so that it's iterable elif type(mgf_files) != list: mgf_files = glob.glob(mgf_files) n_scans = 0 # Go through all the mgf files and see how many spectra are there in total # for construction of the intensity matrix X for file in mgf_files: reader0 = mgf.MGF(file) n_scans += len([x for x in reader0]) scan_names = [] # Create an empty sparse matrix with bins as the rows and spectra as the columns X = dok_matrix((len(bins), n_scans), dtype=np.float32) # Go through each file and bin each MGF file for file in mgf_files: X,scan_names = bin_sparse(X, file, scan_names, bins, max_parent_mass, verbose, window_filter, filter_window_size, filter_window_retain) # Convert from DOK to CSR for easier processing/handling X = X.tocsr() X_orig_shape = X.shape # Filter out rows summing to zero if specified print("\nSummary:") if verbose else None if remove_zero_sum_rows: X, row_names_filter = filter_zero_rows(X) # Adjust the bins accordingly based on row_names_filter which says which rows to keep bins = [x for (x, v) in zip(bins, row_names_filter) if v] print("Removed %s rows" % (X_orig_shape[0] - X.shape[0] )) if verbose else None # Filter out columns summing to zero if specified if remove_zero_sum_cols: X, col_names_filter = filter_zero_cols(X) # Adjust the scan names accordingly based on col_names_filter which says which columns to keep scan_names = [x for (x, v) in zip(scan_names, col_names_filter) if v] print("Removed %s cols" % (X_orig_shape[1] - X.shape[1] )) if verbose else None if verbose: print("Binned in %s seconds with dimensions %sx%s, %s nonzero entries (%s)\n" % (time.time()-start, X.shape[0], X.shape[1], X.count_nonzero(), X.count_nonzero()/(n_scans*len(bins)))) # If an output file is specified, write to it if output_file is not None: # Use pickle to create a binary file that holds the intensity matrix, bins, and spectra names pkl.dump((X, bins, scan_names),open( output_file, "wb")) print("Wrote data to " + output_file) if verbose else None return(X, bins, scan_names)
5,353,463
def index(): """Video streaming home page which makes use of /mjpeg.""" return render_template('index.html')
5,353,464
def tex_quoted_no_underscore (s) : """Same as tex_quoted but does NOT quote underscores. """ if isinstance (s, pyk.string_types) : s = _tex_pi_symbols.sub (_tex_subs_pi_symbols, s) s = _tex_to_quote.sub (_tex_subs_to_quote, s) s = _tex_tt_symbols.sub (_tex_subs_tt_symbols, s) s = _tex_diacritics.sub (_tex_subs_diacritics, s) return s
5,353,465
def load_from_json_file(filename): """ function that creates an Object from a “JSON file” """ with open(filename, 'r') as f: return json.loads(f.read())
5,353,466
def _variable_to_field(v): """Transform a FuzzyVariable into a restx field""" if isinstance(v.domain, FloatDomain): a, b = v.domain.min, v.domain.max f = fields.Float(description=v.name, required=True, min=a, max=b, example=(a + b) / 2) elif isinstance(v.domain, CategoricalDomain): raise NotImplementedError else: raise ValueError("Unknown domain for variable %s" % v) return v.name, f
5,353,467
def abs_ang_mom(u, lat=None, radius=RAD_EARTH, rot_rate=ROT_RATE_EARTH, lat_str=LAT_STR): """Absolute angular momentum.""" if lat is None: lat = u[lat_str] coslat = cosdeg(lat) return radius*coslat*(rot_rate*radius*coslat + u)
5,353,468
def main(): """ Simple pyvmomi (vSphere SDK for Python) script that generates ESXi support bundles running from VCSA using vCenter Alarm """ # Logger for storing vCenter Alarm logs vcAlarmLog = logging.getLogger('vcenter_alarms') vcAlarmLog.setLevel(logging.INFO) vcAlarmLogFile = os.path.join('/var/log', 'vcenter_alarms.log') formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s","%Y-%m-%d %H:%M:%S") vcAlarmLogHandler = logging.FileHandler(vcAlarmLogFile) vcAlarmLogHandler.setFormatter(formatter) vcAlarmLog.addHandler(vcAlarmLogHandler) vcAlarmLog.propagate = False args = get_args() try: si = None try: si = connect.SmartConnect(host=args.host, user=args.user, pwd=args.password, port=int(args.port)) except IOError, e: pass if not si: vcAlarmLog.info("Could not connect to the specified host using specified username and password") print "Could not connect to the specified host using specified username and password" return -1 atexit.register(connect.Disconnect, si) content = si.RetrieveContent() # Get Diag Manager which is used to generate support bundles in VC diagManager = content.diagnosticManager # Extract the vSphere Cluster generated from vCenter Server Alarm cluster = os.environ['VMWARE_ALARM_EVENT_COMPUTERESOURCE'] #cluster = "Non-VSAN-Cluster" if cluster == None: vcAlarmLog.info("Unable to extract vSphere Cluster from VMWARE_ALARM_EVENT_COMPUTERESOURCE") print "Unable to extract vSphere Cluster from VMWARE_ALARM_EVENT_COMPUTERESOURCE" return -1 vcAlarmLog.info("Cluster passed from VC Alarm: " + cluster) # Retrieve all vSphere Clusters container = content.viewManager.CreateContainerView(content.rootFolder, [vim.ClusterComputeResource], True) # Return vSphere Cluster that matches name specified for c in container.view: if c.name == cluster: cluster_view = c break container.Destroy() # Retrieve all ESXi hosts in the vSphere Cluster # to generate log bundles for hosts_to_generate_logs = [] hosts = cluster_view.host for h in hosts: hosts_to_generate_logs.append(h) # Generate log bundle excluding VC logs vcAlarmLog.info("Generating support bundle") print "Generating support bundle" task = diagManager.GenerateLogBundles_Task(includeDefault=False,host=hosts_to_generate_logs) task_done = False result = None while not task_done: if task.info.state == "success": result = task.info.result task_done = True if task.info.state == "error": vcAlarmLog.error("An error occured while generating support logs") print "An error occured while generating support logs" vcAlarmLog.error(task.info) print task.info return -1 task_done = True if task.info.state == "running": time.sleep(60) # Path to which logs will be stored (automatically creating /esxi-support-logs dir) dir = args.filepath + "/esxi-support-logs" try: os.stat(dir) except: vcAlarmLog.info("Creating directory " + dir + " to store support bundle") os.mkdir(dir) # Loop through the result to get the download URL for each # ESXi support bundle and save it to VCSA filesystem for file in result: download_url = file.url download_file = dir + "/vmsupport-" + file.system.name + ".tgz" vcAlarmLog.info("Downloading " + download_url + " to " + download_file) print "Downloading " + download_url + " to " + download_file urllib.urlretrieve(download_url,download_file) except vmodl.MethodFault, e: vcAlarmLog.error("Caught vmodl fault : " + e.msg) print "Caught vmodl fault : " + e.msg return -1 except Exception, e: vcAlarmLog.error("Caught exception : " + str(e)) print "Caught exception : " + str(e) return -1 return 0
5,353,469
def accelerate_backward(env, time=100): """ Accelerates forward for designated timesteps """ print("Accelerating forward for {} timesteps".format(time)) for i in range(time): ensure_orientation(env) env.step(BACKWARD)
5,353,470
def _get_ngrams(segment, max_order): """Extracts all n-grams upto a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with a count of how many times each n-gram occurred. """ ngram_counts = collections.Counter() for order in range(1, max_order + 1): for i in range(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
5,353,471
def print_donors_list(): """ print a list of existing donors """ print(mr.list_donors()) return False
5,353,472
def log_cef(name, severity, env, *args, **kwargs): """Simply wraps the cef_log function so we don't need to pass in the config dictionary every time. See bug 707060. env can be either a request object or just the request.META dictionary""" c = {'cef.product': getattr(settings, 'CEF_PRODUCT', 'AMO'), 'cef.vendor': getattr(settings, 'CEF_VENDOR', 'Mozilla'), 'cef.version': getattr(settings, 'CEF_VERSION', '0'), 'cef.device_version': getattr(settings, 'CEF_DEVICE_VERSION', '0'), 'cef.file': getattr(settings, 'CEF_FILE', 'syslog'), } # The CEF library looks for some things in the env object like # REQUEST_METHOD and any REMOTE_ADDR stuff. Django not only doesn't send # half the stuff you'd expect, but it specifically doesn't implement # readline on its FakePayload object so these things fail. I have no idea # if that's outdated code in Django or not, but andym made this # <strike>awesome</strike> less crappy so the tests will actually pass. # In theory, the last part of this if() will never be hit except in the # test runner. Good luck with that. if isinstance(env, HttpRequest): r = env.META.copy() if 'PATH_INFO' in r: r['PATH_INFO'] = env.build_absolute_uri(r['PATH_INFO']) elif isinstance(env, dict): r = env else: r = {} if settings.USE_HEKA_FOR_CEF: return settings.HEKA.cef(name, severity, r, *args, config=c, **kwargs) else: return _log_cef(name, severity, r, *args, config=c, **kwargs)
5,353,473
def collimate(S, r, phasevec, print_values = False): """Collimate r phase vectors into a new phase vector on [S]. Output: the collimated phase vector ([b(0),b(1),...,b(L'-1)], L') on [S]. Parameters: S: output phase vectors has all multipliers on [S] r: arity, the number of phase vectors that is collimated phasevec: list of phasevectors to be collimated To be improved: -add scaled interval collimation with modulo measurement """ [b, L] = summate(r, phasevec) # calculate the values of b'(j^vec) in b q = np.floor_divide(b,S) # calculate values of q = floor(b'(j^vec)/S) q_meas = choice(q) # measured value is q_meas # take values of b with q equals the measured value q_meas b_new = np.ma.masked_where(q != q_meas, b).compressed() L_new = len(b_new) b_new = (b_new-b_new[0]) % S # modulo and substract first value to ignore global phase # another equivalent option: b_new = b_new - S*q if print_values: #print("b =", b) #print("q =", q) #print("Measured value q =", q_meas) print(phasevec[0][0], " and ", phasevec[1][0], " collimated into ", b_new) return [b_new, L_new]
5,353,474
def parse_and_load(gw, subj, primitive, cgexpr, g): """ Parse the conceptual grammar expression for the supplied subject and, if successful, add it to graph g. :param gw: parser gateway :param subj: subject of expression :param primitive: true means subClassOf, false means equivalentClass :param cgexpr: expression to parse :param g: graph to add the result to :return: true means success, false error """ ttlresult = gw.parse(subj, primitive, cgexpr) if ttlresult: ttlresult = owlbasere.sub(r'\1>', ttlresult) g.parse(io.StringIO(ttlresult), format='n3') return bool(ttlresult)
5,353,475
def pushed(property_name, **kwargs) -> Signal: """ Returns the `pushed` Signal for the given property. This signal is emitted, when a new child property is added to it. From the perspective of a state, this can be achieved with the `ContextWrapper.push(...)` function.<br> __Hint:__ All key-word arguments of #constraint.s(...) (`min_age`, `max_age`, `detached`) are supported. """ return s(f"{property_name}:pushed", **kwargs)
5,353,476
def upgrade(): """Migrations for the upgrade.""" connection = op.get_bind() # Clean data export_workflow_data(connection) op.drop_table('db_dbworkflowstep_sub_workflows') op.drop_table('db_dbworkflowstep_calculations') op.drop_table('db_dbworkflowstep') op.drop_index('ix_db_dbworkflowdata_aiida_obj_id', table_name='db_dbworkflowdata') op.drop_index('ix_db_dbworkflowdata_parent_id', table_name='db_dbworkflowdata') op.drop_table('db_dbworkflowdata') op.drop_index('ix_db_dbworkflow_label', table_name='db_dbworkflow') op.drop_table('db_dbworkflow')
5,353,477
async def delete_data(table_name: str, filter: str = Header(None), filterparam: str = Header(None), current_user_role: bool = Depends(security.get_write_permission)): """ Parameters - **table_name** (path): **Required** - Name of the table to perform operations on. - **filter** (header): Optional - SQL-like filter to limit the records to retrieve. ex: 'id=:qid and name=:qname' - **filterparam** (header): Optional - SQL-like parameter of *filter. ex: {'qid':3,'qname':'jack'} """ log.logger.debug( 'Access \'/_table/{table_name}\' : run in delete_data(), input data table_name: [%s]' % table_name) log.logger.debug('filter: [%s]' % filter) log.logger.debug('filterparam: [%s]' % filterparam) if not meta.DBMeta().check_table_schema(table_name): raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail='Table [ %s ] not found' % table_name ) ptable = tablemodel.TableModel(table_name) return ptable.delete(filter, filterparam)
5,353,478
def test_memoryview_supports_int(valid_bytes_128): """ Assert that the `int` representation of a :class:`~ulid.ulid.MemoryView` is equal to the result of the :meth:`~ulid.ulid.MemoryView.int` method. """ mv = ulid.MemoryView(valid_bytes_128) assert int(mv) == mv.int
5,353,479
def do_endpoint(method, handler, endpoint, parameters): """Parse url parameters and ready up the search of the endpoint""" if method != "get": token = handler.session.query(Token).where(Token.session_token == handler.session_token).first() if token: if token.expiry_date < int(time.time()): handler.session.delete(token) logging.info("Token expired") handler.send_error(403, "Not connected") return elif token.access_token: handler.refresh_token(token) elif endpoint != "/api/v1/discord/tokens": logging.info("No token") handler.send_error(403, "Not connected") return parsed_url = parse.urlparse(endpoint.strip("/")) if not find_endpoint( method, handler, parameters, parse.parse_qs(parsed_url.query), [], "server/routes", parsed_url.path ): handler.send_error(404, "The resource at the location specified doesn't exist") print("404 error")
5,353,480
def sort(suffixes: tuple, src_path: str, dst_path: str, verbose: bool = False): """ :param suffixes: tuple of file suffixes (mp4, mov) :param src_path: path that contains files needed be sorted :param dst_path: destination path of sorted files :param verbose: prints paths to which were files sorted to :return: None """ assert suffixes, "List is empty" assert os.path.exists(dst_path) and os.path.exists(src_path), "Path does not exist" for file in os.listdir(src_path): src_p = src_path + file if os.path.isfile(src_p): dst_p = dst_path + file name, suffix = file.rsplit(".", maxsplit=1) if suffix in suffixes: try: os.rename(src_p, dst_p) if verbose: print("Moved: ", dst_p) except FileExistsError: duplicants = len([x for x in os.listdir(dst_path) if name in x]) os.rename(src_p, dst_path + name + " (%s)." % duplicants + suffix) if verbose: print("Duplicant: ", dst_path + name + "(%s)" % duplicants + suffix)
5,353,481
def _is_dask_series(ddf): """ Will determine if the given arg is a dask dataframe. Returns False if dask is not installed. """ try: import dask.dataframe as dd return isinstance(ddf, dd.Series) except: return False
5,353,482
def square(t, A=1, f=1, D=0): """ t: time A: the amplitude, the peak deviation of the function from zero. f: the ordinary frequency, the number of oscillations (cycles) that occur each second of time. D: non-zero center amplitude """ square_ = A*scipy.signal.square( 2 * np.pi * f * t ) + D return square_
5,353,483
def self_distance_array(reference, box=None, result=None, backend="serial"): """Calculate all possible distances within a configuration `reference`. If the optional argument `box` is supplied, the minimum image convention is applied when calculating distances. Either orthogonal or triclinic boxes are supported. If a 1D numpy array of dtype ``numpy.float64`` with the shape ``(n*(n-1)/2,)`` is provided in `result`, then this preallocated array is filled. This can speed up calculations. Parameters ---------- reference : numpy.ndarray Reference coordinate array of shape ``(3,)`` or ``(n, 3)`` (dtype is arbitrary, will be converted to ``numpy.float32`` internally). box : array_like, optional The unitcell dimensions of the system, which can be orthogonal or triclinic and must be provided in the same format as returned by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:\n ``[lx, ly, lz, alpha, beta, gamma]``. result : numpy.ndarray, optional Preallocated result array which must have the shape ``(n*(n-1)/2,)`` and dtype ``numpy.float64``. Avoids creating the array which saves time when the function is called repeatedly. backend : {'serial', 'OpenMP'}, optional Keyword selecting the type of acceleration. Returns ------- d : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n*(n-1)/2,)``) Array containing the distances ``dist[i,j]`` between reference coordinates ``i`` and ``j`` at position ``d[k]``. Loop through ``d``: .. code-block:: python for i in range(n): for j in range(i + 1, n): k += 1 dist[i, j] = d[k] .. versionchanged:: 0.13.0 Added *backend* keyword. .. versionchanged:: 0.19.0 Internal dtype conversion of input coordinates to ``numpy.float32``. """ refnum = reference.shape[0] distnum = refnum * (refnum - 1) // 2 distances = _check_result_array(result, (distnum,)) if len(distances) == 0: return distances if box is not None: boxtype, box = check_box(box) if boxtype == 'ortho': _run("calc_self_distance_array_ortho", args=(reference, box, distances), backend=backend) else: _run("calc_self_distance_array_triclinic", args=(reference, box, distances), backend=backend) else: _run("calc_self_distance_array", args=(reference, distances), backend=backend) return distances
5,353,484
def mainRecursivePartitioningLoop(A, B, n_cutoff): """ """ # Initialize storage objects n = A.shape[0] groups = numpy.zeros((n,), dtype=int) groups_history = [] counts = {'twoway-single' : 0, 'twoway-pair' : 0, 'threeway-pair' : 0} to_split = {0 : True} # Recursively partition network while numpy.any([v for v in to_split.values()]): for gn in [g for g,v in to_split.items() if v]: # Initialize group info indx = numpy.where(groups==gn)[0] ni = len(indx) #c = numpy.zeros((1,3)) if ni > n_cutoff: # Calc and sort eigenvecs, eigenvalues BtoEigs = LinearOperator((ni, ni), matvec = lambda x: B(x, A, indx), dtype=float) try: if ni > 2: vals, vecs = eigsh(BtoEigs, k=3, which='BE') sort_inds = numpy.argsort(-vals) vals = vals[sort_inds] vecs = vecs[:,sort_inds] else: vals, vecs = eigsh(BtoEigs, k=2, which='LA') sort_inds = numpy.argsort(-vals) vals = vals[sort_inds] vecs = vecs[:,sort_inds] vals = numpy.array([vals[0], vals[1], min(0, vals[1] - 1)]) except ArpackNoConvergence: to_split[gn] = False # Initialize temporary score and groups holders temp_Q = {} temp_C = {} # Leading eignevec 2-way temp_C['twoway-single'] = twoway1(vecs, B, A, indx) temp_Q['twoway-single'] = modularity(temp_C['twoway-single'], B, A, indx) # Convert eigenvecs to vertex vectors mod_factor = numpy.sqrt(vals[:2] - vals[2]) vecs = vecs[:,0:2] * mod_factor # Leading two eigenvec 2-way temp_C['twoway-pair'] = twoway2(vecs, B, A, indx) temp_Q['twoway-pair'] = modularity(temp_C['twoway-pair'], B, A, indx) # # Leading two eigenvec 3-way # temp_C['threeway-pair'] = threewayCoarse(vecs, B, A, indx, 24) # temp_Q['threeway-pair'] = modularity(temp_C['threeway-pair'], # B, A, indx) # # Determine best Score, Grouping best_split_ind = [k for k in temp_Q.keys()]\ [numpy.where(list(temp_Q.values())==max(temp_Q.values()))[0][0]] best_Q = temp_Q[best_split_ind] best_C = temp_C[best_split_ind] # Update master group store, info regarding availalbe splitting if (best_Q > 0) and (max(best_C) - min(best_C) > 0): counts[best_split_ind] += 1 g0 = numpy.array(best_C)==0 g1 = numpy.array(best_C)==1 g2 = numpy.array(best_C)==2 max_gn = max(groups) groups[indx[g1]] = max_gn + 1 groups[indx[g2]] = max_gn + 2 to_split[gn] = sum(g0) > 2 to_split[max_gn + 1] = sum(g1) > 2 to_split[max_gn + 2] = sum(g2) > 2 groups_history.append(groups.copy()) else: to_split[gn] = False else: to_split[gn] = False groups_history = numpy.array(groups_history).T return(groups, counts, groups_history)
5,353,485
def corrSmatFunc(df, metric='pearson-signed', simFunc=None, minN=None): """Compute a pairwise correlation matrix and return as a similarity matrix. Parameters ---------- df : pd.DataFrame (n_instances, n_features) metric : str Method for correlation similarity: pearson or spearman, optionally "signed" (e.g. pearson-signed) A "signed" similarity means that anti-correlated instances will have low similarity. simFunc : function Optionally supply an arbitrary distance function. Function takes two instances and returns their distance. minN : int Minimum number of non-NA values in order for correlation to be non-NA. Returns ------- smatDf : pd.DataFrame (n_instances, n_instances)""" if minN is None: minN = df.shape[0] if simFunc is None: if metric in ['spearman', 'pearson']: """Anti-correlations are also considered as high similarity and will cluster together""" smat = df.corr(method=metric, min_periods=minN).values**2 smat[np.isnan(smat)] = 0 elif metric in ['spearman-signed', 'pearson-signed']: """Anti-correlations are considered as dissimilar and will NOT cluster together""" smat = df.corr(method=metric.replace('-signed', ''), min_periods=minN).values smat = (smat**2 * np.sign(smat) + 1)/2 smat[np.isnan(smat)] = 0 else: raise NameError('metric name not recognized') else: ncols = df.shape[1] smat = np.zeros((ncols, ncols)) for i in range(ncols): for j in range(ncols): """Assume distance is symetric""" if i <= j: tmpdf = df.iloc[:, [i, j]] tmpdf = tmpdf.dropna() if tmpdf.shape[0] >= minN: d = simFunc(df.iloc[:, i], df.iloc[:, j]) else: d = np.nan smat[i, j] = d smat[j, i] = d return pd.DataFrame(smat, columns=df.columns, index=df.columns)
5,353,486
def update(): """Updates graph elements and source.data based on filtering""" filtered_df = select_beers() x_name = axis_map[x_axis.value] y_name = axis_map[y_axis.value] p.xaxis.axis_label = x_axis.value p.yaxis.axis_label = y_axis.value p.title.text = "%d beers selected" % len(filtered_df) color_select = color_axis_map[circle_color.value] source.data = dict( x=filtered_df[x_name], y=filtered_df[y_name], name=filtered_df["name"], brand=filtered_df["brand"], price=filtered_df["price"], container_type=filtered_df["container_type"], hierarchy_type=filtered_df["hierarchy_type"], hierarchy_subtype=filtered_df["hierarchy_subtype"], short_pack_size=filtered_df["short_pack_size"], alcohol_pct=filtered_df["alcohol_pct"], short_volume=filtered_df["short_volume"], supplier_id=filtered_df["supplier_id"], type=filtered_df["type"], oz_of_alcohol_per_dollar=filtered_df["oz_of_alcohol_per_dollar"], cost_per_oz=filtered_df["cost_per_oz"]) table_source.data = source.data transform_scale = cat_linear_color_toggle(color_select, filtered_df) c.glyph.fill_color = {"field": color_select, "transform": transform_scale} p.legend.items[0].label = {'field': color_select} if filtered_df[color_select].dtype in (float, int): bar.color_mapper = rescale_color(cmap, filtered_df) c.glyph.x = jitter('x', width=jitter_amt.value, range=p.x_range) c.glyph.y = jitter('y', width=jitter_amt.value, range=p.y_range)
5,353,487
def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ """ if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) return ranks
5,353,488
def recover_exception_table(): """ Recover the CIE and FDE entries from the segment .eh_frame """ seg_eas = [ea for ea in idautils.Segments() if not is_invalid_ea(ea)] for seg_ea in seg_eas: seg_name = idc.get_segm_name(seg_ea) if seg_name in [".eh_frame", "__eh_frame"]: recover_frame_entries(seg_ea) break
5,353,489
def _cmpopts(x, y): """Compare to option names. The options can be of 2 forms: option_name or group/option_name. Options without a group always comes first. Options are sorted alphabetically inside a group. """ if '/' in x and '/' in y: prex = x[:x.find('/')] prey = y[:y.find('/')] if prex != prey: return cmp(prex, prey) return cmp(x, y) elif '/' in x: return 1 elif '/' in y: return -1 else: return cmp(x, y)
5,353,490
def ask(question, choices): """Prompt user for a choice from a list. Return the choice.""" choices_lc = [x.lower() for x in choices] user_choice = "" match = False while not match: print question user_choice = raw_input("[" + "/".join(choices) + "] ? ").strip().lower() for choice in choices_lc: if user_choice.startswith(choice): match = True break return user_choice
5,353,491
def test_config_inheritance(): """Inheritance of config and schemas.""" item = Second() assert (6.0, 2, 9.0) == (item.a, item.b, item.c) assert item._options['a'].block_propagation == False assert item._options['c'].block_propagation == False item = Second(a=5.0, b=4) assert (5.0, 4, 9.0) == (item.a, item.b, item.c) assert item._options['c'].block_propagation == False item = Third() assert (0.5, 2, 2, -1) == (item.a.a, item.a.b, item.b, item.c) assert isinstance(item.a, LambdaConfig) assert item._options['a'].block_propagation == False assert item._options['c'].block_propagation == True item = Third(a=LambdaConfig(a=0.66, b=13), d=5) assert (0.66, 13, 2, 5) == (item.a.a, item.a.b, item.b, item.d) assert item._options['c'].block_propagation == True
5,353,492
def get_and_validate_user(username, password): """ Check if user with username/email exists and specified password matchs well with existing user password. if user is valid, user is returned else, corresponding exception is raised. """ user_model = apps.get_model("users", "User") qs = user_model.objects.filter(Q(username=username) | Q(email=username)) if len(qs) == 0: raise WrongArguments("Username or password does not matches user.") user = qs[0] if not user.check_password(password): raise WrongArguments("Username or password does not matches user.") return user
5,353,493
def process_tweet(tweet): """Process tweet function. Input: tweet: a string containing a tweet Output: tweets_clean: a list of words containing the processed tweet""" stemmer = PorterStemmer() stopwords_english = stopwords.words('english') # Remove stock market tickers like $GE tweet = re.sub(r'\$\w*', '', tweet) # Remove old style retweet text "RT" tweet = re.sub(r'^RT[\s]+', '', tweet) # Remove hyperlinks tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet) # Remove hashtags # Only removing the hash # sign from the word tweet = re.sub(r'#', '', tweet) # Tokenize tweets tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True) tweet_tokens = tokenizer.tokenize(tweet) tweets_clean = [] for word in tweet_tokens: # 1 Remove stopwords # 2 Remove punctuation if (word not in stopwords_english and word not in string.punctuation): # 3 Stemming word stem_word = stemmer.stem(word) # 4 Add it to tweets_clean tweets_clean.append(stem_word) return tweets_clean
5,353,494
def oracle_query_id(sender_id, nonce, oracle_id): """ Compute the query id for a sender and an oracle :param sender_id: the account making the query :param nonce: the nonce of the query transaction :param oracle_id: the oracle id """ def _int32(val): return val.to_bytes(32, byteorder='big') return hash_encode("oq", decode(sender_id) + _int32(nonce) + decode(oracle_id))
5,353,495
def knapsack_with_budget(vals: List[float], weights: List[int], budget: int, cap: int) -> Set[int]: """ Solves the knapsack problem (with budget) of the items with the given values and weights, with the given budget and capacity, in an bottom-up way. :param vals: list[float] :param weights: list[int] :param budget: int :param cap: int :return: set{int} """ # Check whether the input arrays are None or empty if not vals: return set() # Check whether the input budget is non-negative if budget < 0: return set() # Check whether the input capacity is non-negative if cap < 0: raise set() n = len(vals) # Initialization subproblems = [ [[0.0] * (cap + 1) for _ in range(budget + 1)] for _ in range(n) ] for b in range(budget + 1): for x in range(cap + 1): if b >= 1 and weights[0] <= x: subproblems[0][b][x] = vals[0] # Bottom-up calculation for item in range(1, n): for b in range(budget + 1): for x in range(cap + 1): if b <= 0 or weights[item] > x: subproblems[item][b][x] = subproblems[item - 1][b][x] else: result_without_curr = subproblems[item - 1][b][x] result_with_curr = \ subproblems[item - 1][b - 1][x - weights[item]] + \ vals[item] subproblems[item][b][x] = max(result_without_curr, result_with_curr) return _reconstruct(vals, weights, budget, cap, subproblems) # Overall running time complexity: O(n*k*W), where k is the budget and W is # the knapsack capacity
5,353,496
def log_mlflow(run_params: Dict, df:pd.DataFrame, df_sim:pd.DataFrame, units:dict) -> None: """ Logs result of model training and validation to mlflow Args: run_params: Dictionary containing parameters of run. Expects keys for 'experiment', 'artifact_dir', 'iteration', and 'index. df : true model test df_sim : simulation Returns: None """ mlflow.set_experiment(run_params['experiment']) #auc, recall, precision, f1 = evaluate_binary(y_true, y_pred) #roc_path = plot_roc(y_true, y_pred, '{} (auc = {:.2f})'.format(model_name, auc), run_params['artifact_dir']) #pr_path = plot_precision_recall(y_true, y_pred, # '{} (prec: {:.2f}, recall: {:.2f})'.format(model_name, precision, recall), # run_params['artifact_dir']) #model_path = save_model(model, model_name, run_params['artifact_dir']) plot_dir = os.path.join('figures','twin_sim') if not os.path.exists(plot_dir): os.mkdir(plot_dir) plot_keys = ['psi', 'V', 'beta', 'delta'] with mlflow.start_run(run_name=run_params['iteration']): for key,value in run_params.items(): mlflow.log_param(key, value) mlflow.log_artifact(track_plot(df=df, df_sim=df_sim, run_name=run_params['id'], plot_dir=plot_dir)) for key in plot_keys: mlflow.log_artifact(plot(key=key, df=df, df_sim=df_sim, run_name=run_params['id'], plot_dir=plot_dir, units=units)) metrics = evaluate(df=df, df_sim=df_sim) for key,value in metrics.items(): mlflow.log_metric(key, value) #mlflow.log_param('index', run_params['index']) #mlflow.log_param('model', model_name) #mlflow.log_metric('auc', auc) #mlflow.log_metric('recall', recall) #mlflow.log_metric('precision', precision) #mlflow.log_metric('f1', f1) #mlflow.log_artifact(model_path) #mlflow.log_artifact(roc_path) #mlflow.log_artifact(pr_path)
5,353,497
def checkvalid_main(args): """Function to quickly check if a certificate update is needed Only runs a small part of the checkgen_main script in order to test for certificate validity. This is used to ensure state in configuration management tooling. """ fqdn = platform.node() if not fqdn: raise SetupError('Missing FQDN!') try: group_info = grp.getgrnam(ACCESS_GROUP) group_gid = group_info.gr_gid except KeyError: raise SetupError('Missing group: {}'.format(ACCESS_GROUP)) format_settings = {'base': BASE_DIR, 'fqdn': fqdn} live_dir = LIVE_DIR.format(**format_settings) cert_path = os.path.join(live_dir, CERT_FILENAME) if new_cert_needed(cert_path): sys.exit(1) else: sys.exit(0)
5,353,498
def _basemap_redirect(func): """ Docorator that calls the basemap version of the function of the same name. This must be applied as the innermost decorator. """ name = func.__name__ @functools.wraps(func) def wrapper(self, *args, **kwargs): if getattr(self, 'name', '') == 'basemap': return getattr(self.projection, name)(*args, ax=self, **kwargs) else: return func(self, *args, **kwargs) wrapper.__doc__ = None return wrapper
5,353,499