content
stringlengths
22
815k
id
int64
0
4.91M
def dnu_sspec_model(params, xdata, ydata, weights): """ Fit 1D function to cut through ACF for decorrelation bandwidth. Default function has is exponential with dnu measured at half power amp = Amplitude dnu = bandwidth at 1/2 power wn = white noise spike in ACF cut """ # # if weights is None: # weights = np.ones(np.shape(ydata)) # # y = dnu_acf_model(xdata, ydata, weights, params) # # From ACF model, construct Fourier-domain model # y_flipped = y[::-1] # y = list(y) + list(y_flipped) # concatenate # y = y[0:2*len(f)-1] # # Get Fourier model # yf = np.fft(y) # yf = np.real(yf) # yf = yf[0:len(f)] # return (data - model) * weights return
5,352,200
def _write_cnv(catalog, filename, phase_mapping=None, ifx_list=None, weight_mapping=None, default_weight=0): """ Write a :class:`~obspy.core.event.Catalog` object to CNV event summary format (used as event/pick input by VELEST program). .. warning:: This function should NOT be called directly, it registers via the the :meth:`~obspy.core.event.Catalog.write` method of an ObsPy :class:`~obspy.core.event.Catalog` object, call this instead. :type catalog: :class:`~obspy.core.event.Catalog` :param catalog: Input catalog for CNV output.. :type filename: str or file :param filename: Filename to write or open file-like object. :type phase_mapping: dict :param phase_mapping: Mapping of phase hints to "P" or "S". CNV format only uses a single letter phase code (either "P" or "S"). If not specified the following default mapping is used: 'p', 'P', 'Pg', 'Pn', 'Pm' will be mapped to "P" and 's', 'S', 'Sg', 'Sn', 'Sm' will be mapped to "S". :type ifx_list: list of :class:`~obspy.core.event.ResourceIdentifier` :param ifx_list: List of events for which the 'IFX' flag should be set (used in VELEST to fix the y coordinate of the hypocenter). :type weight_mapping: list of float :param weight_mapping: Mapping of pick uncertainties to integer weights. (Sorted) list of floats of boundary uncertainties. If uncertainty of pick is lower than the first entry of the list than a weight of 0 is assigned. If it is larger than the first entry, but smaller than the second entry a weight of 1 is assigned, and so on. The list of uncertainty boundaries should not contain more than 9 entries because the integer weight is restricted to a single digit. If not specified all picks will be output with weight `default_weight`. :type default_weight: int :param default_weight: Default weight to use when pick has no timing uncertainty and thus can not be mapped using `weight_mapping` parameter. Default weight should not be larger than 9, as the weight is represented as a single digit. """ # Check phase mapping or use default one if phase_mapping is None: phase_mapping = {'p': "P", 'P': "P", 'Pg': "P", 'Pn': "P", 'Pm': "P", 's': "S", 'S': "S", 'Sg': "S", 'Sn': "S", 'Sm': "S"} else: values = set(phase_mapping.values()) values.update(("P", "S")) if values != set(("P", "S")): msg = ("Values of phase mapping should only be 'P' or 'S'") raise ValueError(msg) if ifx_list is None: ifx_list = [] if weight_mapping is None: weight_mapping = [] else: if list(weight_mapping) != sorted(weight_mapping): msg = ("List of floats in weight mapping must be sorted in " "ascending order.") raise ValueError(msg) out = [] for event in catalog: o = event.preferred_origin() or event.origins[0] m = event.preferred_magnitude() or event.magnitudes[0] out_ = "%s %5.2f %7.4f%1s %8.4f%1s%7.2f%7.2f%2i\n" cns = o.latitude >= 0 and "N" or "S" cew = o.longitude >= 0 and "E" or "W" if event.resource_id in ifx_list: ifx = 1 else: ifx = 0 out_ = out_ % (o.time.strftime("%y%m%d %H%M"), o.time.second + o.time.microsecond / 1e6, abs(o.latitude), cns, abs(o.longitude), cew, o.depth / 1e3, m.mag, ifx) # assemble phase info picks = [] for p in event.picks: # map uncertainty to integer weight if p.time_errors.upper_uncertainty is not None and \ p.time_errors.lower_uncertainty is not None: uncertainty = p.time_errors.upper_uncertainty + \ p.time_errors.lower_uncertainty else: uncertainty = p.time_errors.uncertainty if uncertainty is None: msg = ("No pick time uncertainty, pick will be mapped to " "default integer weight (%s).") % default_weight warnings.warn(msg) weight = default_weight else: weight = bisect_right(weight_mapping, uncertainty) if weight > 9: msg = ("Integer weight for pick is greater than 9. " "This is not compatible with the single-character " "field for pick weight in CNV format." "Using 9 as integer weight.") warnings.warn(msg) weight = 9 # map phase hint phase = phase_mapping.get(p.phase_hint, None) if phase is None: msg = "Skipping pick (%s) with unmapped phase hint: %s" msg = msg % (p.waveform_id.get_seed_string(), p.phase_hint) warnings.warn(msg) continue station = p.waveform_id.station_code if len(station) > 4: msg = ("Station code with more than 4 characters detected. " "Only the first 4 characters will be used in output.") warnings.warn(msg) station = station[:4] dt = "%6.2f" % (p.time - o.time) if len(dt) != 6: msg = ("Problem with pick (%s): Calculated travel time '%s' " "does not fit in the '%%6.2f' fixed format field. " "Skipping this pick.") msg = msg % (p.waveform_id.get_seed_string(), dt) warnings.warn(msg) continue picks.append("".join([station.ljust(4), phase, str(weight), dt])) while len(picks) > 6: next_picks, picks = picks[:6], picks[6:] out_ += "".join(next_picks) + "\n" if picks: out_ += "".join(picks) + "\n" out.append(out_) if out: out = "\n".join(out + [""]) else: msg = "No event/pick information, writing empty CNV file." warnings.warn(msg) # Open filehandler or use an existing file like object. if not hasattr(filename, "write"): file_opened = True fh = open(filename, "wb") else: file_opened = False fh = filename fh.write(out.encode()) # Close if a file has been opened by this function. if file_opened is True: fh.close()
5,352,201
def _check_draw_graph(graph_file): """Check whether the specified graph file should be redrawn. Currently we use the following heuristic: (1) if graph is older than N minutes, redraw it; (2) if admin has active session(s), redraw on every cron run (we detect this by ajax active timestamp). We could also redraw if an interesting parameter has changed (user or s2s count, license limits, timezones, etc). But because these entail RRD accesses, we just use the simpler heuristic above. """ now = datetime.datetime.utcnow() # consider ajax "off-line" if timestamp older than this (or negative) ajax_limit = datetime.timedelta(0, 5*60, 0) # XXX: constants? # redraw graphs if graph age below zero or over limit below graph_zero = datetime.timedelta(0, 0, 0) graph_maxage = datetime.timedelta(0, 15*60, 0) # XXX: constants? if helpers.check_marker_file(constants.WEBUI_ADMIN_ACTIVE_TIMESTAMP): dt = helpers.read_datetime_marker_file(constants.WEBUI_ADMIN_ACTIVE_TIMESTAMP) diff = now - dt if diff < ajax_limit: # ajax active, draw _log.info('ajax active, redraw graph %s' % graph_file) return True else: # fall through, check graph file pass if os.path.exists(graph_file): mtime = datetime.datetime.utcfromtimestamp(os.stat(graph_file).st_mtime) diff = now - mtime if (diff < graph_zero) or (diff > graph_maxage): # bogus or too old, redraw _log.info('graph too old, redraw graph %s' % graph_file) return True else: _log.info('graph not too old, skipping redraw for %s' % graph_file) return False # no graph file, redraw always _log.info('graph does not exist, redraw graph %s' % graph_file) return True
5,352,202
def random_shadow(image): """ Function to add shadow in images randomly at random places, Random shadows meant to make the Convolution model learn Lanes and lane curvature patterns effectively in dissimilar places. """ if np.random.rand() < 0.5: # (x1, y1) and (x2, y2) forms a line # xm, ym gives all the locations of the image x1, y1 = image.shape[1] * np.random.rand(), 0 x2, y2 = image.shape[1] * np.random.rand(), image.shape[0] xm, ym = np.mgrid[0:image.shape[0], 0:image.shape[1]] mask = np.zeros_like(image[:, :, 1]) mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1 # choose which side should have shadow and adjust saturation cond = mask == np.random.randint(2) s_ratio = np.random.uniform(low=0.2, high=0.5) # adjust Saturation in HLS(Hue, Light, Saturation) hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) else: return image
5,352,203
def build_channel_header(type, tx_id, channel_id, timestamp, epoch=0, extension=None, tls_cert_hash=None): """Build channel header. Args: type (common_pb2.HeaderType): type tx_id (str): transaction id channel_id (str): channel id timestamp (grpc.timestamp): timestamp epoch (int): epoch extension: extension Returns: common_proto.Header instance """ channel_header = common_pb2.ChannelHeader() channel_header.type = type channel_header.version = 1 channel_header.channel_id = proto_str(channel_id) channel_header.tx_id = proto_str(tx_id) channel_header.epoch = epoch channel_header.timestamp.CopyFrom(timestamp) if tls_cert_hash: channel_header.tls_cert_hash = tls_cert_hash if extension: channel_header.extension = extension return channel_header
5,352,204
def get_character(data, index): """Return one byte from data as a signed char. Args: data (list): raw data from sensor index (int): index entry from which to read data Returns: int: extracted signed char value """ result = data[index] if result > 127: result -= 256 return result
5,352,205
def df_to_embed(df, img_folder): """ Extract image embeddings, sentence embeddings and concatenated embeddings from dataset and image folders :param df: dataset file to use :param img_folder: folder where the corresponding images are stored :return: tuple containing sentence embeddings, image embeddings, concatenated embeddings """ sent_embed = extract_all_sentences(df) img_embed = extract_all_images("xception", img_folder) concat = np.concatenate((sent_embed, img_embed), axis=1) return sent_embed, img_embed, concat
5,352,206
def load_release_data(): """ Load the release data. This always prints a warning if the release data contains any release data. :return: """ filen = path.join(PATH_ROOT, PATH_RELEASE_DATA) try: with open(filen, "r") as in_file: data = pickle.load(in_file) if data: print_warning("You are continuing an existing release. If this " "an error, delete the release data file and try " "again. " "Filename = %s" % filen) return data except: return {}
5,352,207
def generate_player_attributes(): """ Return a list of 53 dicts with player attributes that map to Player model fields. """ # Get player position distribution position_dist = get_position_distribution() # Get player attribute distribution attr_dist = get_attribute_distribution() # Get player names from CSV player_names = read_player_names_from_csv() player_list = [] # Generate 53 players per team for roster_spot in range(0, 53): player = {} # Set player names from parsed CSV data player['first_name'] = player_names[roster_spot][0] player['last_name'] = player_names[roster_spot][1] # Only assign player a position that isn't filled on the roster for pos, dist in position_dist.items(): if dist[0] < dist[1]: player['position'] = pos # Pick a random prototype based on position player['prototype'] = random.choice(list(attr_dist[pos])) dist[0] += 1 break else: continue # Assign player ages based on normal distribution player['age'] = int(random.gauss(1, 0.1) * random.randint(25, 35)) default_rookie_age = 22 player['experience'] = player['age'] - default_rookie_age if player['age'] < 22: player['experience'] = 0 # Generate ratings based on weights and normal distribution base_rating = int(random.gauss(70, 20)) position, prototype = player['position'], player['prototype'] pos_weights = attr_dist[position][prototype] # Apply position and prototype weights after_pos_weights = [] for pw in range(len(pos_weights)): after_pos_weights.append(pos_weights[pw] + base_rating) # Sigmas for standard deviation sigmas = [20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] final_ratings = list(map(random.gauss, after_pos_weights, sigmas)) i = 0 calc_overall = [] # Assign final ratings to player key for attribute in ('potential', 'confidence', 'iq', 'speed', 'strength', 'agility', 'awareness', 'stamina', 'injury', 'run_off', 'pass_off', 'special_off', 'run_def', 'pass_def', 'special_def'): rating = int(final_ratings[i]) if rating > 99: rating = 99 elif rating < 0: rating = 0 player[attribute] = rating calc_overall.append(rating) i += 1 # Calculate overall rating and add player to list player['overall_rating'] = int(sum(calc_overall) / len(calc_overall)) player_list.append(player) return player_list
5,352,208
def colour_from_loadings(loadings, maxLoading=None, baseColor="#FF0000"): """Computes colors given loading values. Given an array of loading values (loadings), returns an array of colors that graphviz can understand that can be used to colour the nodes. The node with the greatest loading uses baseColor, and a node with zero loading uses white (#FFFFFF). This is achieved through clever sneaky use of the alpha channel.""" if maxLoading is None: maxLoading = max(loadings) return [baseColor + hex(int(loading / maxLoading * 255))[2:] for loading in loadings]
5,352,209
def mock_dao(monkeypatch): """Create a mock database table.""" _hazard_1 = RAMSTKHazardRecord() _hazard_1.revision_id = 1 _hazard_1.function_id = 1 _hazard_1.hazard_id = 1 _hazard_1.assembly_effect = "" _hazard_1.assembly_hri = 20 _hazard_1.assembly_hri_f = 4 _hazard_1.assembly_mitigation = "" _hazard_1.assembly_probability = TEST_PROBS["A"] _hazard_1.assembly_probability_f = TEST_PROBS["B"] _hazard_1.assembly_severity = "Major" _hazard_1.assembly_severity_f = "Medium" _hazard_1.function_1 = "uf1*uf2" _hazard_1.function_2 = "res1/ui1" _hazard_1.function_3 = "" _hazard_1.function_4 = "" _hazard_1.function_5 = "" _hazard_1.potential_cause = "" _hazard_1.potential_hazard = "" _hazard_1.remarks = "" _hazard_1.result_1 = 0.0 _hazard_1.result_2 = 0.0 _hazard_1.result_3 = 0.0 _hazard_1.result_4 = 0.0 _hazard_1.result_5 = 0.0 _hazard_1.system_effect = "" _hazard_1.system_hri = 20 _hazard_1.system_hri_f = 20 _hazard_1.system_mitigation = "" _hazard_1.system_probability = TEST_PROBS["A"] _hazard_1.system_probability_f = TEST_PROBS["C"] _hazard_1.system_severity = "Medium" _hazard_1.system_severity_f = "Medium" _hazard_1.user_blob_1 = "" _hazard_1.user_blob_2 = "" _hazard_1.user_blob_3 = "" _hazard_1.user_float_1 = 1.5 _hazard_1.user_float_2 = 0.8 _hazard_1.user_float_3 = 0.0 _hazard_1.user_int_1 = 2 _hazard_1.user_int_2 = 0 _hazard_1.user_int_3 = 0 _hazard_2 = RAMSTKHazardRecord() _hazard_2.revision_id = 1 _hazard_2.function_id = 1 _hazard_2.hazard_id = 2 _hazard_2.assembly_effect = "" _hazard_2.assembly_hri = 20 _hazard_2.assembly_hri_f = 4 _hazard_2.assembly_mitigation = "" _hazard_2.assembly_probability = TEST_PROBS["A"] _hazard_2.assembly_probability_f = TEST_PROBS["B"] _hazard_2.assembly_severity = "Major" _hazard_2.assembly_severity_f = "Medium" _hazard_2.function_1 = "uf1*uf2" _hazard_2.function_2 = "res1/ui1" _hazard_2.function_3 = "" _hazard_2.function_4 = "" _hazard_2.function_5 = "" _hazard_2.potential_cause = "" _hazard_2.potential_hazard = "" _hazard_2.remarks = "" _hazard_2.result_1 = 0.0 _hazard_2.result_2 = 0.0 _hazard_2.result_3 = 0.0 _hazard_2.result_4 = 0.0 _hazard_2.result_5 = 0.0 _hazard_2.system_effect = "" _hazard_2.system_hri = 20 _hazard_2.system_hri_f = 20 _hazard_2.system_mitigation = "" _hazard_2.system_probability = TEST_PROBS["A"] _hazard_2.system_probability_f = TEST_PROBS["C"] _hazard_2.system_severity = "Medium" _hazard_2.system_severity_f = "Medium" _hazard_2.user_blob_1 = "" _hazard_2.user_blob_2 = "" _hazard_2.user_blob_3 = "" _hazard_2.user_float_1 = 1.5 _hazard_2.user_float_2 = 0.8 _hazard_2.user_float_3 = 0.0 _hazard_2.user_int_1 = 2 _hazard_2.user_int_2 = 0 _hazard_2.user_int_3 = 0 dao = MockDAO() dao.table = [ _hazard_1, _hazard_2, ] yield dao
5,352,210
def determineDocument(pdf): """ Scans the pdf document for certain text lines and determines the type of investment vehicle traded""" if 'turbop' in pdf or 'turboc' in pdf: return 'certificate' elif 'minil' in pdf: return 'certificate' elif 'call' in pdf or 'put' in pdf: return 'warrant' else: return 'stock'
5,352,211
def setup_parser() -> argparse.ArgumentParser: """Set default values and handle arg parser""" parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="wlanpi-core provides backend services for the WLAN Pi. Read the manual with: man wlanpi-core", ) parser.add_argument( "--reload", dest="livereload", action="store_true", default=False ) parser.add_argument( "--version", "-V", "-v", action="version", version=f"{__version__}" ) return parser
5,352,212
def deserialize_api_types(class_name: str, d: dict) -> Any: """ Deserializes an API type. Allowed classes are defined in: * :mod:`maestral.core` * :mod:`maestral.model` * :mod:`maestral.exceptions` :param class_name: Name of class to deserialize. :param d: Dictionary of serialized class. :returns: Deserialized object. """ bytes_message = serpent.tobytes(d["object"]) check_signature(d["signature"], bytes_message) return pickle.loads(bytes_message)
5,352,213
def runner() -> CliRunner: """Fixture for invoking command-line interfaces.""" return testing.CliRunner()
5,352,214
def cal_quant_model_accuracy(model, gpu_index, val_loader, args, config_file, record_file): """Save the quantized model and infer the accuracy of the quantized model.""" torch.save({'state_dict': model.state_dict()}, os.path.join(TMP, 'model_best.pth.tar')) print('==> AMCT step3: save_quant_retrain_model..') quantized_pb_path = os.path.join(OUTPUTS, 'ResNet101') amct.save_quant_retrain_model( config_file, model, record_file, quantized_pb_path, get_input_data([(1, 3, SIZE, SIZE)], model), input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}) print("=> validating fake quant model") quant_top1, quant_top5 = validate_onnx( val_loader, ''.join([quantized_pb_path, '_fake_quant_model.onnx']), args.print_freq) return quant_top1, quant_top5
5,352,215
def give_color_to_direction_dynamic(dir): """ Assigns a color to the direction (dynamic-defined colors) Parameters -------------- dir Direction Returns -------------- col Color """ dir = 0.5 + 0.5 * dir norm = mpl.colors.Normalize(vmin=0, vmax=1) nodes = [0.0, 0.01, 0.25, 0.4, 0.45, 0.55, 0.75, 0.99, 1.0] colors = ["deepskyblue", "skyblue", "lightcyan", "lightgray", "gray", "lightgray", "mistyrose", "salmon", "tomato"] cmap = mpl.colors.LinearSegmentedColormap.from_list("mycmap2", list(zip(nodes, colors))) #cmap = cm.plasma m = cm.ScalarMappable(norm=norm, cmap=cmap) rgba = m.to_rgba(dir) r = get_string_from_int_below_255(math.ceil(rgba[0] * 255.0)) g = get_string_from_int_below_255(math.ceil(rgba[1] * 255.0)) b = get_string_from_int_below_255(math.ceil(rgba[2] * 255.0)) return "#" + r + g + b
5,352,216
def pfilter(plugins, plugin_type=Analyser, **kwargs): """ Filter plugins by different criteria """ if isinstance(plugins, models.Plugins): plugins = plugins.plugins elif isinstance(plugins, dict): plugins = plugins.values() logger.debug('#' * 100) logger.debug('plugin_type {}'.format(plugin_type)) if plugin_type: if isinstance(plugin_type, PluginMeta): plugin_type = plugin_type.__name__ try: plugin_type = plugin_type[0].upper() + plugin_type[1:] pclass = globals()[plugin_type] logger.debug('Class: {}'.format(pclass)) candidates = filter(lambda x: isinstance(x, pclass), plugins) except KeyError: raise models.Error('{} is not a valid type'.format(plugin_type)) else: candidates = plugins if 'name' in kwargs: kwargs['name'] = kwargs['name'].lower() logger.debug(candidates) def matches(plug): res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items()) logger.debug("matching {} with {}: {}".format(plug.name, kwargs, res)) return res if kwargs: candidates = filter(matches, candidates) return candidates
5,352,217
def tmux_session_detection(session_name: str) -> bool: """ Function checks if session already exists. """ cmd = ['tmux', 'has-session', '-t', session_name] result = subprocess.call(cmd, stderr=subprocess.DEVNULL) if result == 0: return True else: return False
5,352,218
def prune_arms(active_arms, sample_arms, verbose=False): """Remove all arms from ``active_arms`` that have an allocation less than two standard deviations below the current best arm. :param active_arms: list of coordinate-tuples corresponding to arms/cohorts currently being sampled :type active_arms: list of tuple :param sample_arms: all arms from prev and current cohorts, keyed by coordinate-tuples Arm refers specifically to a :class:`moe.bandit.data_containers.SampleArm` :type sample_arms: dict :param verbose: whether to print status messages to stdout :type verbose: bool :return: list of coordinate-tuples that are the *well-performing* members of ``active_arms`` length is at least 1 and at most ``len(active_arms)`` :rtype: list of tuple """ # Find all active sample arms active_sample_arms = {} for active_arm in active_arms: active_sample_arms[active_arm] = sample_arms[active_arm] # Find the best arm # Our objective is a relative CTR, so status_quo is 0.0; we # know that the best arm cannot be worse than status_quo best_arm_val = 0.0 for sample_arm_point, sample_arm in active_sample_arms.iteritems(): arm_value, arm_variance = objective_function( sample_arm, sample_arms[tuple(STATUS_QUO_PARAMETER)], ) if arm_value > best_arm_val: best_arm_val = arm_value # Remove all arms that are more than two standard deviations worse than the best arm pruned_arms = copy.copy(active_arms) for sample_arm_point, sample_arm in active_sample_arms.iteritems(): arm_value, arm_variance = objective_function( sample_arm, sample_arms[tuple(STATUS_QUO_PARAMETER)], ) if sample_arm.total > 0 and arm_value + 2.0 * numpy.sqrt(arm_variance) < best_arm_val: if verbose: print "Removing underperforming arm: {0}".format(sample_arm_point) pruned_arms.remove(sample_arm_point) return pruned_arms
5,352,219
def prepare_string(dist, digits=None, exact=False, tol=1e-9, show_mask=False, str_outcomes=False): """ Prepares a distribution for a string representation. Parameters ---------- dist : distribution The distribution to be stringified. digits : int or None The probabilities will be rounded to the specified number of digits, using NumPy's around function. If `None`, then no rounding is performed. Note, if the number of digits is greater than the precision of the floats, then the resultant number of digits will match that smaller precision. exact : bool If `True`, then linear probabilities will be displayed, even if the underlying pmf contains log probabilities. The closest rational fraction within a tolerance specified by `tol` is used as the display value. tol : float If `exact` is `True`, then the probabilities will be displayed as the closest rational fraction within `tol`. show_mask : bool If `True`, show the mask for marginal distributions. str_outcomes : bool If `True`, then attempt to convert outcomes which are tuples to just strings. This is only a dislplay technique. Returns ------- pmf : sequence The formatted pmf. This could be a NumPy array (possibly rounded) or a list of Fraction instances. outcomes : sequence The formated outcomes. base : str or float The base of the formatted pmf. colsep : str The column separation for printing. max_length : int The length of the largest outcome, as a string. pstr : str A informative string representing the probability of an outcome. This will be 'p(x)' xor 'log p(x)'. """ colsep = ' ' # Create outcomes with wildcards, if desired and possible. if show_mask: if not dist.is_joint(): msg = '`show_mask` can be `True` only for joint distributions' raise ditException(msg) if show_mask not in [True, False]: # The user is specifying what the mask should look like. wc = show_mask else: wc = '*' ctor = dist._outcome_ctor def outcome_wc(outcome): """ Builds the wildcarded outcome. """ i = 0 e = [] for is_masked in dist._mask: if is_masked: symbol = wc else: symbol = outcome[i] i += 1 e.append(symbol) e = ctor(e) return e outcomes = map(outcome_wc, dist.outcomes) else: outcomes = dist.outcomes # Convert outcomes to strings, if desired and possible. if str_outcomes: if not dist.is_joint(): msg = '`str_outcomes` can be `True` only for joint distributions' raise ditException(msg) try: # First, convert the elements of the outcome to strings. outcomes_ = [map(str, outcome) for outcome in outcomes] # Now convert the entire outcome to a string outcomes_ = map(lambda o: ''.join(o), outcomes_) # Force the iterators to expand in case there are exceptions. outcomes = list(outcomes_) except: outcomes = map(str, outcomes) else: outcomes = map(str, outcomes) outcomes = list(outcomes) if len(outcomes): max_length = max(map(len, outcomes)) else: max_length = 0 # 1) Convert to linear probabilities, if necessary. if exact: # Copy to avoid precision loss d = dist.copy(base='linear') else: d = dist # 2) Round, if necessary, possibly after converting to linear probabilities. if digits is not None and digits is not False: pmf = d.pmf.round(digits) else: pmf = d.pmf # 3) Construct fractions, if necessary. if exact: pmf = [approximate_fraction(x, tol) for x in pmf] if d.is_log(): pstr = 'log p(x)' else: pstr = 'p(x)' base = d.get_base() return pmf, outcomes, base, colsep, max_length, pstr
5,352,220
def verifier(func): """ Creates a `Verifier` by given specifier. Parameters ---------- func: callable, [callable], (str, callable), [(str, callable)] The specifier of `Verifier` which can take various forms and determines the attributes and behaviors of `Verifier`. When it is declared as a list having a specifier, the `Verifier` deals with an input as iterable object and tries to apply inner verifying function to each value. If a tuple of string and callable is given, the string is used as the name of the `Verifier`. Otherwise, its name is determined by `__name__` attribute of the callable object. The callable should be a function taking an input and returns boolean value representing the result of the verification. Returns ------- Verifier Created `Verifier`. """ func, is_iter = (func[0], True) if isinstance(func, list) else (func, False) if isinstance(func, Verifier): return func elif isinstance(func, Variable): return func._verifier elif isinstance(func, partial): ff, n, t_in, t_out, args, kwargs = analyze_specifier(func, (), {}) return Verifier(n, func, is_iter, *args, **kwargs) elif callable(func): return Verifier(func.__name__, func, is_iter) elif isinstance(func, tuple): ff, n, t_in, t_out, args, kwargs = analyze_specifier(func[1], (), {}) return Verifier(func[0], func[1], is_iter, *args, **kwargs) else: raise TypeError("Given value is not valid Verifier specifier.")
5,352,221
def obtain_celeba_images(n_people:int) -> pd.DataFrame: """ Unique labels: 10,177 It is expected for the structure to be as following: <CELEBA_PATH>/ ├─ identity_CelebA.txt ├─ img_align_celeba/ ├─<images> * 'identity_CelebA.txt' is the downloaded identity text annotations without change from the dataset. * 'img_align_celeba' is the folder with all the downloaded images. @returns a pandas DataFrame of a n size sample with the following cols: - path: path to the location of the image - label: name of the person within the image """ df = pd.read_csv( os.path.join(CELEBA_PATH, "identity_CelebA.txt"), names = ["path", "label"], sep=' ' ) # Extract according to unique number of people df = extract_people_images(df, n_people) root = os.path.join(CELEBA_PATH, "img_align_celeba/") df["path"] = root + df["path"] return df
5,352,222
def assert_equal( actual: List[Literal["y4", "y3", "y2", "y1"]], desired: List[Literal["y4", "y3", "y2", "y1"]], ): """ usage.statsmodels: 1 """ ...
5,352,223
def get_add_diff_file_list(git_folder): """List of new files. """ repo = Repo(str(git_folder)) repo.git.add("sdk") output = repo.git.diff("HEAD", "--name-only") return output.splitlines()
5,352,224
def _save_qr_code(qr_code: str, filepath: str = qr_path, filename: str = qr_name) -> str: """Use it for save QrCode from web.whatsapp.com (copied as string) to PNG file to your path and your filename. :param qr_code: QrCode string from web.whatsapp.com. :param filepath: Your path for saving file. :param filename: Your name for file. :return: Absolute path to saved file. """ path = os.path.join(filepath, filename) from PIL import Image background = Image.new('RGB', (background_width, background_height), color='white') img = qrcode.make(qr_code) img_w, img_h = img.size bg_w, bg_h = background.size offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2) background.paste(img, offset) background.save(path) return path
5,352,225
def has_read_perm(user, group, is_member, is_private): """ Return True if the user has permission to *read* Articles, False otherwise. """ if (group is None) or (is_member is None) or is_member(user, group): return True if (is_private is not None) and is_private(group): return False return True
5,352,226
def _create_docker_build_ctx( launch_project: LaunchProject, dockerfile_contents: str, ) -> str: """Creates build context temp dir containing Dockerfile and project code, returning path to temp dir.""" directory = tempfile.mkdtemp() dst_path = os.path.join(directory, "src") assert launch_project.project_dir is not None shutil.copytree( src=launch_project.project_dir, dst=dst_path, symlinks=True, ) shutil.copy( os.path.join(os.path.dirname(__file__), "templates", "_wandb_bootstrap.py"), os.path.join(directory), ) if launch_project.python_version: runtime_path = os.path.join(dst_path, "runtime.txt") with open(runtime_path, "w") as fp: fp.write(f"python-{launch_project.python_version}") # TODO: we likely don't need to pass the whole git repo into the container # with open(os.path.join(directory, ".dockerignore"), "w") as f: # f.write("**/.git") with open(os.path.join(directory, _GENERATED_DOCKERFILE_NAME), "w") as handle: handle.write(dockerfile_contents) return directory
5,352,227
def normalize_df(dataframe, columns): """ normalized all columns passed to zero mean and unit variance, returns a full data set :param dataframe: the dataframe to normalize :param columns: all columns in the df that should be normalized :return: the data, centered around 0 and divided by it's standard deviation """ for column in columns: data = dataframe.loc[:, column].values sd = np.std(data) mean = np.mean(data) dataframe.loc[:, column] = (data - mean) / sd return dataframe
5,352,228
def PeekTrybotImage(chromeos_root, buildbucket_id): """Get the artifact URL of a given tryjob. Args: buildbucket_id: buildbucket-id chromeos_root: root dir of chrome os checkout Returns: (status, url) where status can be 'pass', 'fail', 'running', and url looks like: gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789 """ command = ( 'cros buildresult --report json --buildbucket-id %s' % buildbucket_id) rc, out, _ = RunCommandInPath(chromeos_root, command) # Current implementation of cros buildresult returns fail when a job is still # running. if rc != 0: return ('running', None) results = json.loads(out)[buildbucket_id] return (results['status'], results['artifacts_url'].rstrip('/'))
5,352,229
def frozenset_code_repr(value: frozenset) -> CodeRepresentation: """ Gets the code representation for a frozenset. :param value: The frozenset. :return: It's code representation. """ return container_code_repr("frozenset({", "})", ((el,) for el in value), lambda el: el)
5,352,230
def SRMI(df, n): """ MI修正指标 Args: df (pandas.DataFrame): Dataframe格式的K线序列 n (int): 参数n Returns: pandas.DataFrame: 返回的DataFrame包含2列, 是"a", "mi", 分别代表A值和MI值 Example:: # 获取 CFFEX.IF1903 合约的MI修正指标 from tqsdk import TqApi, TqSim from tqsdk.ta import SRMI api = TqApi(TqSim()) klines = api.get_kline_serial("CFFEX.IF1903", 24 * 60 * 60) srmi = SRMI(klines, 9) print(list(srmi["a"])) print(list(srmi["mi"])) # 预计的输出是这样的: [..., 0.10362397961836425, 0.07062591892459567, -0.03341929372138309, ...] [..., 0.07583104758041452, 0.0752526999519902, 0.06317803398828206, ...] """ new_df = pd.DataFrame() new_df["a"] = np.where(df["close"] < df["close"].shift(n), (df["close"] - df["close"].shift(n)) / df["close"].shift(n), np.where(df["close"] == df["close"].shift(n), 0, (df["close"] - df["close"].shift(n)) / df["close"])) new_df["mi"] = tafunc.sma(new_df["a"], n, 1) return new_df
5,352,231
def init_db(database_url: PostgresDsn) -> None: """ Runs the migrations and creates all of the database objects. """ alembic_config = get_alembic_config(database_url) upgrade_db(alembic_config)
5,352,232
def get_purchase_rows(*args, **kwargs): """ 获取列表 :param args: :param kwargs: :return: """ return db_instance.get_rows(Purchase, *args, **kwargs)
5,352,233
def depart_delete(request): """ 删除部门 """ nid = request.GET.get('nid') models.Department.objects.filter(id=nid).delete() return redirect("/depart/list/")
5,352,234
def create_condor_scheduler(name, host, username=None, password=None, private_key_path=None, private_key_pass=None): """ Creates a new condor scheduler Args: name (str): The name of the scheduler host (str): The hostname or IP address of the scheduler username (str, optional): The username to use when connecting to the scheduler password (str, optional): The password for the username private_key_path (str, optional): The path to the location of the SSH private key file private_key_pass (str, optional): The passphrase for the private key Returns: The newly created condor scheduler Note: The newly created condor scheduler object is not committed to the database. """ condor_scheduler = CondorScheduler(name, host, username=username, password=password, private_key_path=private_key_path, private_key_pass=private_key_pass) return condor_scheduler
5,352,235
def edit_stack( action : Optional[List[str]] = None, debug : bool = False, **kw ): """ Open docker-compose.yaml or .env for editing """ from meerschaum.config._edit import general_edit_config if action is None: action = [] files = { 'compose' : STACK_COMPOSE_PATH, 'docker-compose' : STACK_COMPOSE_PATH, 'docker-compose.yaml' : STACK_COMPOSE_PATH, } return general_edit_config(action=action, files=files, default='compose', debug=debug)
5,352,236
def check_compare_list_val(list_comp, name=None): """ Compares list values. Yields warning, if list values are different Parameters ---------- list_comp : list (of floats) List of float values for comparison name : str (optional) Name of list / parameters """ for i in range(len(list_comp) - 1): val_1 = list_comp[i] val_2 = list_comp[i + 1] if val_1 is not None and val_2 is not None: if val_1 != val_2: msg = 'Value %d at index %d is different from value ' \ '%d at index %d for list %s' % (val_1, i, val_2, i + 1, name) warnings.warn(msg) else: if (val_1 is None and val_2 is not None or (val_1 is not None and val_2 is None)): msg = 'At least one value in %s is None, while at least ' \ 'one other value is not None' % (name) warnings.warn(msg)
5,352,237
def setenv(): """ Set some environment variables for basic operation. """ # Set version string os.environ['RQ_VERSION'] = "0.2.0" # Set vendor string os.environ['RQ_VENDOR'] = "UNIT" # Standardise some environment variables across systems. # Usernames will always be stored as lowercase for compatibility. if platform.system() == "Windows": # Windows #os.environ['RQ_RUNNING_OS'] = "Windows" if not 'RQ_USERNAME' in os.environ: os.environ['RQ_USERNAME'] = os.environ['USERNAME'].lower() userHome = os.environ['USERPROFILE'] elif platform.system() == "Darwin": # Mac OS #os.environ['RQ_RUNNING_OS'] = "MacOS" if not 'RQ_USERNAME' in os.environ: os.environ['RQ_USERNAME'] = os.environ['USER'].lower() userHome = os.environ['HOME'] else: # Linux #os.environ['RQ_RUNNING_OS'] = "Linux" if not 'RQ_USERNAME' in os.environ: os.environ['RQ_USERNAME'] = os.environ['USER'].lower() userHome = os.environ['HOME'] # Check for environment awareness try: os.environ['RQ_ENV'] except KeyError: os.environ['RQ_ENV'] = "STANDALONE" # Set up basic paths os.environ['RQ_DATABASE'] = 'J:/rq_database' #os.environ['RQ_BASEDIR'] = os.getcwd() os.environ['RQ_CONFIGDIR'] = os.path.join(os.environ['RQ_DATABASE'], 'config') os.environ['RQ_USERPREFS'] = os.path.join(os.environ['RQ_CONFIGDIR'], 'users', os.environ['RQ_USERNAME']) # User prefs stored on server #os.environ['RQ_USERPREFS'] = os.path.join(userHome, '.renderqueue') # User prefs stored in user home folder os.environ['RQ_HISTORY'] = os.path.join(os.environ['RQ_USERPREFS'], 'history') appendSysPaths()
5,352,238
def dot_to_underscore(instring): """Replace dots with underscores""" return instring.replace(".", "_")
5,352,239
def get_birthday_weekday(current_weekday: int, current_day: int, birthday_day: int) -> int: """Return the day of the week it will be on birthday_day, given that the day of the week is current_weekday and the day of the year is current_day. current_weekday is the current day of the week and is in the range 1-7, indicating whether today is Sunday (1), Monday (2), ..., Saturday (7). current_day and birthday_day are both in the range 1-365. >>> get_birthday_weekday(5, 3, 4) 6 >>> get_birthday_weekday(5, 3, 116) 6 >>> get_birthday_weekday(6, 116, 3) 5 """ days_diff = days_difference(current_day, birthday_day) return get_weekday(current_weekday, days_diff)
5,352,240
def matlab_kcit(X: np.ndarray, Y: np.ndarray, Z: np.ndarray, seed: int = None, matlab_engine_instance=None, installed_at=None): """Python-wrapper for original implementation of KCIT by Zhang et al. (2011) References ---------- Zhang, K., Peters, J., Janzing, D., & Schölkopf, B. (2011). Kernel-based Conditional Independence Test and Application in Causal Discovery. In Proceedings of the 27th Conference on Uncertainty in Artificial Intelligence (pp. 804–813). Corvallis, Oregon: AUAI Press. """ import matlab.engine not_given = matlab_engine_instance is None try: if not_given: matlab_engine_instance = matlab.engine.start_matlab() dir_at = os.path.expanduser(installed_at) matlab_engine_instance.addpath(matlab_engine_instance.genpath(dir_at)) if seed is not None: matlab_engine_instance.RandStream.setGlobalStream(matlab_engine_instance.RandStream('mcg16807', 'Seed', seed)) statistic, v2, boot_p_value, v3, appr_p_value = matlab_engine_instance.CInd_test_new_withGP(np2matlab(X), np2matlab(Y), np2matlab(Z), 0.01, 0, nargout=5) return statistic, v2, boot_p_value, v3, appr_p_value finally: if not_given and matlab_engine_instance is not None: matlab_engine_instance.quit()
5,352,241
def _check(err, msg=""): """Raise error for non-zero error codes.""" if err < 0: msg += ': ' if msg else '' if err == _lib.paUnanticipatedHostError: info = _lib.Pa_GetLastHostErrorInfo() hostapi = _lib.Pa_HostApiTypeIdToHostApiIndex(info.hostApiType) msg += 'Unanticipated host API {0} error {1}: {2!r}'.format( hostapi, info.errorCode, _ffi.string(info.errorText).decode()) else: msg += _ffi.string(_lib.Pa_GetErrorText(err)).decode() raise PortAudioError(msg) return err
5,352,242
def RunPackage(output_dir, target, package_path, package_name, package_deps, package_args, args): """Copies the Fuchsia package at |package_path| to the target, executes it with |package_args|, and symbolizes its output. output_dir: The path containing the build output files. target: The deployment Target object that will run the package. package_path: The path to the .far package file. package_name: The name of app specified by package metadata. package_args: The arguments which will be passed to the Fuchsia process. args: Structure of arguments to configure how the package will be run. Returns the exit code of the remote package process.""" system_logger = ( _AttachKernelLogReader(target) if args.system_logging else None) try: if system_logger: # Spin up a thread to asynchronously dump the system log to stdout # for easier diagnoses of early, pre-execution failures. log_output_quit_event = multiprocessing.Event() log_output_thread = threading.Thread( target=lambda: _DrainStreamToStdout(system_logger.stdout, log_output_quit_event)) log_output_thread.daemon = True log_output_thread.start() tuf_root = tempfile.mkdtemp() pm_serve_task = None # Publish all packages to the serving TUF repository under |tuf_root|. subprocess.check_call([PM, 'newrepo', '-repo', tuf_root]) all_packages = [package_path] + package_deps for next_package_path in all_packages: PublishPackage(tuf_root, next_package_path) # Serve the |tuf_root| using 'pm serve' and configure the target to pull # from it. # TODO(kmarshall): Use -q to suppress pm serve output once blob push # is confirmed to be running stably on bots. serve_port = common.GetAvailableTcpPort() pm_serve_task = subprocess.Popen( [PM, 'serve', '-d', os.path.join(tuf_root, 'repository'), '-l', ':%d' % serve_port, '-q']) remote_port = common.ConnectPortForwardingTask(target, serve_port, 0) _RegisterAmberRepository(target, tuf_root, remote_port) # Install all packages. for next_package_path in all_packages: install_package_name, package_version = GetPackageInfo(next_package_path) logging.info('Installing %s version %s.' % (install_package_name, package_version)) return_code = target.RunCommand(['amber_ctl', 'get_up', '-n', install_package_name, '-v', package_version], timeout_secs=_INSTALL_TIMEOUT_SECS) if return_code != 0: raise Exception('Error while installing %s.' % install_package_name) if system_logger: log_output_quit_event.set() log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS) if args.install_only: logging.info('Installation complete.') return logging.info('Running application.') command = ['run', _GetComponentUri(package_name)] + package_args process = target.RunCommandPiped(command, stdin=open(os.devnull, 'r'), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if system_logger: output_fd = MergedInputStream([process.stdout, system_logger.stdout]).Start() else: output_fd = process.stdout.fileno() # Run the log data through the symbolizer process. build_ids_path = os.path.join(os.path.dirname(package_path), 'ids.txt') output_stream = SymbolizerFilter(output_fd, build_ids_path) for next_line in output_stream: print next_line.rstrip() process.wait() if process.returncode == 0: logging.info('Process exited normally with status code 0.') else: # The test runner returns an error status code if *any* tests fail, # so we should proceed anyway. logging.warning('Process exited with status code %d.' % process.returncode) finally: if system_logger: logging.info('Terminating kernel log reader.') log_output_quit_event.set() log_output_thread.join() system_logger.kill() _UnregisterAmberRepository(target) if pm_serve_task: pm_serve_task.kill() shutil.rmtree(tuf_root) return process.returncode
5,352,243
def make_flood_fill_unet(input_fov_shape, output_fov_shape, network_config): """Construct a U-net flood filling network. """ image_input = Input(shape=tuple(input_fov_shape) + (1,), dtype='float32', name='image_input') if network_config.rescale_image: ffn = Lambda(lambda x: (x - 0.5) * 2.0)(image_input) else: ffn = image_input mask_input = Input(shape=tuple(input_fov_shape) + (1,), dtype='float32', name='mask_input') ffn = concatenate([ffn, mask_input]) # Note that since the Keras 2 upgrade strangely models with depth > 3 are # rejected by TF. ffn = add_unet_layer(ffn, network_config, network_config.unet_depth - 1, output_fov_shape, n_channels=network_config.convolution_filters) mask_output = Conv3D( 1, (1, 1, 1), kernel_initializer=network_config.initialization, padding=network_config.convolution_padding, name='mask_output', activation=network_config.output_activation)(ffn) ffn = Model(inputs=[image_input, mask_input], outputs=[mask_output]) return ffn
5,352,244
def FRAC(total): """Returns a function that shows the average percentage of the values from the total given.""" def realFrac(values, unit): r = toString(sum(values) / len(values) / total * 100) r += '%' if max(values) > min(values): r += ' avg' return [r] return realFrac
5,352,245
def get_min_max(ints): """ Return a tuple(min, max) out of list of unsorted integers. Args: ints(list): list of integers containing one or more integers """ if len(ints) == 0: return (None, None) low = ints[0] high = ints[0] for i in ints: if i < low: low = i elif i > high: high = i return (low, high)
5,352,246
def SearchOldMessages(p_webSocketSession, p_requestMessage, p_responseMessage): """Handles search old messages message requested by a client Args: p_webSocketSession (WSHandler): the websocket client that requested this. p_requestMessage (dict): the request message. p_responseMessage (dict): the response message. """ try: v_database = p_webSocketSession.session.v_omnidb_database.v_connection v_database.Open() v_table = v_database.Query(''' select * from ( select 2 as type, meg.gro_in_code as code, mes.mes_in_code, mes.mes_dt_creation, mes.mes_dt_update, use.user_id as use_in_code, use.user_name as use_st_login, mes.met_in_code, coalesce(mes.mes_st_content, '') as mes_st_content, coalesce(mes.mes_st_title, '') as mes_st_title, coalesce(mes.mes_st_attachmentname, '') as mes_st_attachmentname, meg.meg_bo_viewed as visualizada, coalesce(mes.mes_st_snippetmode, '') as mes_st_snippetmode, (case instr(coalesce(mes.mes_st_originalcontent, ''), '#start_mentioned_message#') when 0 then coalesce(mes.mes_st_originalcontent, '') else substr(coalesce(mes.mes_st_originalcontent, ''), 1, instr(coalesce(mes.mes_st_originalcontent, ''), '#start_mentioned_message#')) end ) as mes_st_originalcontent from messages_groups meg inner join messages mes on meg.mes_in_code = mes.mes_in_code inner join users use on mes.use_in_code = use.user_id where meg.use_in_code = {0} union select 1 as type, mec.cha_in_code as code, mes.mes_in_code, mes.mes_dt_creation, mes.mes_dt_update, use.user_id as use_in_code, use.user_name as use_st_login, mes.met_in_code, coalesce(mes.mes_st_content, '') as mes_st_content, coalesce(mes.mes_st_title, '') as mes_st_title, coalesce(mes.mes_st_attachmentname, '') as mes_st_attachmentname, mec.mec_bo_viewed as visualizada, coalesce(mes.mes_st_snippetmode, '') as mes_st_snippetmode, (case instr(coalesce(mes.mes_st_originalcontent, ''), '#start_mentioned_message#') when 0 then coalesce(mes.mes_st_originalcontent, '') else substr(coalesce(mes.mes_st_originalcontent, ''), 1, instr(coalesce(mes.mes_st_originalcontent, ''), '#start_mentioned_message#')) end ) as mes_st_originalcontent from messages_channels mec inner join messages mes on mec.mes_in_code = mes.mes_in_code inner join users use on mes.use_in_code = use.user_id where mec.use_in_code = {0} ) x where x.mes_st_originalcontent like '%{1}%' order by x.mes_dt_creation desc'''.format( int(p_webSocketSession.cookies['user_id'].value), p_requestMessage['v_data']['textPattern'].replace("'", "''") ) ) v_database.Close() v_data = { 'textPattern': p_requestMessage['v_data']['textPattern'], 'messageList': [] } for v_row in v_table.Rows: v_user = classes.User(v_row['use_in_code'], '', v_row['use_st_login'], None) v_message = classes.Message( int(v_row['mes_in_code']), v_row['mes_dt_creation'], v_row['mes_dt_update'], v_user, int(v_row['met_in_code']), v_row['mes_st_content'], v_row['mes_st_title'], v_row['mes_st_attachmentname'], (True if v_row['visualizada'] else False), v_row['mes_st_snippetmode'], v_row['mes_st_originalcontent'] ) v_data['messageList'].append({ 'message': v_message, 'type': v_row['type'], 'code': v_row['code'] }) p_responseMessage['v_code'] = response.SearchedOldMessages.value p_responseMessage['v_data'] = v_data SendToClient(p_webSocketSession, p_responseMessage, False) except Spartacus.Database.Exception as exc: LogException(p_webSocketSession, '', 'Database Exception', 'SearchOldMessages', traceback.format_exc()) p_responseMessage['v_error'] = True p_responseMessage['v_data'] = 'Error while executing the static method "SearchOldMessages".' SendToClient(p_webSocketSession, p_responseMessage, True) return except Exception as exc: LogException(p_webSocketSession, '', 'Exceção de Sistema', 'SearchOldMessages', traceback.format_exc()) p_responseMessage['v_error'] = True p_responseMessage['v_data'] = 'Error while executing the static method "SearchOldMessages".' SendToClient(p_webSocketSession, p_responseMessage, True) return
5,352,247
def gen_sweep_pts(start: float=None, stop: float=None, center: float=0, span: float=None, num: int=None, step: float=None, endpoint=True): """ Generates an array of sweep points based on different types of input arguments. Boundaries of the array can be specified using either start/stop or using center/span. The points can be specified using either num or step. Args: start (float) : start of the array stop (float) : end of the array center (float) : center of the array N.B. 0 is chosen as a sensible default for the span. it is argued that no such sensible default exists for the other types of input. span (float) : span the total range of values to span num (int) : number of points in the array step (float) : the stepsize between points in the array endpoint (bool): whether to include the endpoint """ if (start is not None) and (stop is not None): if num is not None: return np.linspace(start, stop, num, endpoint=endpoint) elif step is not None: # numpy arange does not natively support endpoint return np.arange(start, stop + endpoint*step/100, step) else: raise ValueError('Either "num" or "step" must be specified') elif (center is not None) and (span is not None): if num is not None: return span_num(center, span, num, endpoint=endpoint) elif step is not None: return span_step(center, span, step, endpoint=endpoint) else: raise ValueError('Either "num" or "step" must be specified') else: raise ValueError('Either ("start" and "stop") or ' '("center" and "span") must be specified')
5,352,248
def set_lang_owner(cursor, lang, owner): """Set language owner. Args: cursor (cursor): psycopg2 cursor object. lang (str): language name. owner (str): name of new owner. """ query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner) executed_queries.append(query) cursor.execute(query) return True
5,352,249
def rstrip_tuple(t: tuple): """Remove trailing zeroes in `t`.""" if not t or t[-1]: return t right = len(t) - 1 while right > 0 and t[right - 1] == 0: right -= 1 return t[:right]
5,352,250
def _calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces, out=None): """Calculate divergence of face-based fluxes at nodes (active faces only). Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) divided by cell area, at each node that lies within a cell. Construction:: _calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name (x number of faces) Flux per unit width associated with faces. out : ndarray (x number of nodes), optional Buffer to hold the result. Returns ------- ndarray (x number of nodes) Flux divergence at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_active_face_flux_divergence_at_node(rg, -fg) array([ 0. , 0. , 0. , 0. , 0. , 1.64, 0.94, 0. , 0. , 0. , 0. , 0. ]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> _calc_active_face_flux_divergence_at_node(rg, -fg) array([ 0. , 0. , 0. , 0. , 0. , 1.14, 0.22, 0. , 0. , 0. , 0. , 0. ]) Notes ----- Performs a numerical flux divergence operation on cells, and returns the result in an array of length equal to the number of nodes. Nodes without cells (those on the grid perimeter) are not affected (i.e., their value is either zero, or if `out` is given, whatever the prior value in `out` was). """ if out is None: out = grid.zeros(at='node') out[grid.node_at_cell] = \ _calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces) \ / grid.area_of_cell return out
5,352,251
def get_EL(overlaps): """ a) 1 +++++++++|---|--- 2 --|---|++++++++++ b) 1 ---|---|+++++++++++++ 2 ++++++++++|---|--- """ EL1a = overlaps['query_start'] EL2a = overlaps['target_len'] - overlaps['target_end'] - 1 EL1b = overlaps['query_len'] - overlaps['query_end'] - 1 EL2b = overlaps['target_start'] final_EL = [] for i in range(overlaps.shape[0]): if extend_right(overlaps['query_end'][i], overlaps['query_len'][i], overlaps['target_end'][i], overlaps['target_len'][i]): final_EL.append([EL1b[i], EL2b[i]]) elif extend_left(overlaps['query_start'][i], overlaps['target_start'][i]): final_EL.append([EL1a[i], EL2a[i]]) else: # TODO filtriraj one koji uopce ne produzuju nista ni sa jedne strane continue final_EL = np.array(final_EL).reshape(-1,2) return np.split(final_EL, 2, axis=1)
5,352,252
def get_line(prompt: str = '') -> Effect[HasConsole, NoReturn, str]: """ Get an `Effect` that reads a `str` from stdin Example: >>> class Env: ... console = Console() >>> greeting = lambda name: f'Hello {name}!' >>> get_line('What is your name? ').map(greeting).run(Env()) name? # input e.g 'John Doe' 'Hello John Doe!' Args: prompt: prompt to display in console Return: an `Effect` that produces a `str` read from stdin """ return depend(HasConsole).and_then(lambda env: env.console.input(prompt))
5,352,253
def reconstruct_wave(*args: ndarray, kwargs_istft, n_sample=-1) -> ndarray: """ construct time-domain wave from complex spectrogram Args: *args: the complex spectrogram. kwargs_istft: arguments of Inverse STFT. n_sample: expected audio length. Returns: audio (numpy) """ if len(args) == 1: spec = args[0].squeeze() mag = None phase = None assert np.iscomplexobj(spec) elif len(args) == 2: spec = None mag = args[0].squeeze() phase = args[1].squeeze() assert np.isrealobj(mag) and np.isrealobj(phase) else: raise ValueError kwarg_len = dict(length=n_sample) if n_sample != -1 else dict() if spec is None: spec = mag * np.exp(1j * phase) wave = librosa.istft(spec, **kwargs_istft, **kwarg_len) return wave
5,352,254
def test_inc(): """Verify that 'inc' actually works!""" assert inc(3) == 4
5,352,255
def perfect_score(student_info): """ :param student_info: list of [<student name>, <score>] lists :return: first `[<student name>, 100]` or `[]` if no student score of 100 is found. """ # first = [] student_names = [] score = [] print (student_info) for name in student_info: print('1', 'name', name[0]) print ('2','score',name[1]) print(type(name[1])) score = int(name[1]) print(type(score)) if (score == 100 ): print('3', score) print(name) return name return first
5,352,256
def parse_args(): """Parses command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( "--cl_kernel_dir", type=str, default="./mace/ops/opencl/cl/", help="The cl kernels directory.") parser.add_argument( "--output_path", type=str, default="./mace/examples/codegen/opencl/opencl_encrypted_program.cc", help="The path of encrypted opencl kernels.") return parser.parse_known_args()
5,352,257
def get_composite_component(current_example_row, cache, model_config): """ maps component_id to dict of {cpu_id: False, ...} :param current_example_row: :param cache: :param model_config: :return: nested mapping_dict = { #there can be multiple components component_id = { #components can be deployed on multiple servers cpu_id: False, ... }, ... } """ mapping_dict = defaultdict(lambda: {}) # for context in for column_name in model_config["components"]: allocation_name = column_name.replace("AllocationDegreeImpl:", "") context = get_element_by_identifier(element_tree=cache.get_xml_tree("allocation"), search_string=allocation_name, attribute="entityName") system_id = get_linkage_id(identifier="assemblyContext_AllocationContext", element_tree=context) assembly_context = get_by_id(element=cache.get_xml_tree("system"), element_id=system_id) component = assembly_context.find("./encapsulatedComponent__AssemblyContext") if component.get(get_xml_schema_type()) == "repository:CompositeComponent": repo_id = get_linkage_id(element_tree=assembly_context, identifier="encapsulatedComponent__AssemblyContext") composite_component = get_by_id(element=cache.get_xml_tree("repository"), element_id=repo_id) for composed_structure in composite_component.findall("./assemblyContexts__ComposedStructure"): component_id = composed_structure.get("encapsulatedComponent__AssemblyContext") # check if column (with name of component) of current test data is allocated to existing server if current_example_row[column_name] in model_config["server"].keys(): # if component is allocated to existing server append allocation to list for server_id in model_config["server"]: # if component is part of composite if current_example_row[column_name] == server_id: temp_server_id = model_config["server"][current_example_row[column_name]] mapping_dict[component_id].update({temp_server_id: False}) return mapping_dict
5,352,258
def presigned_url_both(filename, email): """ Return presigned urls both original image url and thumbnail image url :param filename: :param email: :return: """ prefix = "photos/{0}/".format(email_normalize(email)) prefix_thumb = "photos/{0}/thumbnails/".format(email_normalize(email)) key_thumb = "{0}{1}".format(prefix_thumb, filename) key_origin = "{0}{1}".format(prefix, filename) try: s3_client = boto3.client('s3') thumb_url = s3_client.generate_presigned_url( 'get_object', Params={'Bucket': conf['S3_PHOTO_BUCKET'], 'Key': key_thumb}, ExpiresIn=conf['S3_PRESIGNED_EXP']) origin_url = s3_client.generate_presigned_url( 'get_object', Params={'Bucket': conf['S3_PHOTO_BUCKET'], 'Key': key_origin}, ExpiresIn=conf['S3_PRESIGNED_EXP']) except Exception as e: raise ChaliceViewError(e) return thumb_url, origin_url
5,352,259
def check_if_event_exists(service, new_summary): """ Description: checks if the event summary exists using a naive approach """ event_exists = False page_token = None calendarId = gcalendarId while True: events = ( service.events().list(calendarId=calendarId, pageToken=page_token).execute() ) for event in events["items"]: # purge location from summary string if new_summary in event["summary"]: event_exists = True break page_token = events.get("nextPageToken") if not page_token: break return event_exists
5,352,260
def retry(func, *args, **kwargs): """ You can use the kwargs to override the 'retries' (default: 5) and 'use_account' (default: 1). """ global url, token, parsed, conn retries = kwargs.get('retries', 5) use_account = 1 if 'use_account' in kwargs: use_account = kwargs['use_account'] del kwargs['use_account'] use_account -= 1 attempts = 0 backoff = 1 while attempts <= retries: attempts += 1 try: if not url[use_account] or not token[use_account]: url[use_account], token[use_account] = \ get_auth(swift_test_auth, swift_test_user[use_account], swift_test_key[use_account]) parsed[use_account] = conn[use_account] = None if not parsed[use_account] or not conn[use_account]: parsed[use_account], conn[use_account] = \ http_connection(url[use_account]) return func(url[use_account], token[use_account], parsed[use_account], conn[use_account], *args, **kwargs) except (socket.error, HTTPException): if attempts > retries: raise parsed[use_account] = conn[use_account] = None except AuthError, err: url[use_account] = token[use_account] = None continue except InternalServerError, err: pass if attempts <= retries: sleep(backoff) backoff *= 2 raise Exception('No result after %s retries.' % retries)
5,352,261
def eval_f(angles, data=None): """ function to minimize """ x1, x2, d, zt, z, alpha, beta, mask, b1, b2 = data thetaxm, thetaym, thetazm, thetaxp, thetayp, thetazp = angles rm = rotation(thetaxm, thetaym, thetazm) rp = rotation(thetaxp, thetayp, thetazp) x1r = rm.dot(x1.T).T x2r = rp.dot(x2.T).T + d obj = poisson_complete_ll(x1r, x2r, zt, z, alpha, beta, mask, b1, b2) return obj
5,352,262
def clamp(min_v, max_v, value): """ Clamps a value between a min and max value Args: min_v: Minimum value max_v: Maximum value value: Value to be clamped Returns: Returns the clamped value """ return min_v if value < min_v else max_v if value > max_v else value
5,352,263
def collatz(n): """Sequence generation.""" l = [] while n > 1: l.append(n) if n % 2 == 0: n = n / 2 else: n = (3 * n) + 1 l.append(n) return l
5,352,264
def print_configs(configs) -> None: """Print available configurations.""" print('Available configurations:') for i, config in enumerate(configs): domain = find_by_repr(hook._domains, config.domain) print(f'{i}: {config.name}, using domain "{domain.name}"')
5,352,265
def print_stype_pre(st,fh): """ Print header and paragraph starts """ if st == 'p': print ("<p>", file = fh) elif st == 'h1': print ("<h1>", file = fh)
5,352,266
def _load_tokenizer(path, **kwargs): """TODO: add docstring.""" if not os.path.isdir(path): raise ValueError( "transformers.AutoTokenizer.from_pretrained" " should be called with a path to a model directory." ) return transformers.AutoTokenizer.from_pretrained(path, **kwargs)
5,352,267
def generate_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2 ** np.arange(3, 6), stride=16): """ Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window. """ base_anchor = np.array([1, 1, base_size, base_size]) - 1 ratio_anchors = _ratio_enum(base_anchor, ratios) anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])]) return anchors
5,352,268
def number_format(number_string, fill=2): """ add padding zeros to make alinged numbers ex. >>> number_format('2') '02' >>> number_format('1-2') '01-02' """ output = [] digits_spliter = r'(?P<digit>\d+)|(?P<nondigit>.)' for token in [m.groups() for m in re.finditer(digits_spliter, number_string)]: if token[0] is None: output.append(token[1]) else: output.append(token[0].zfill(2)) return ''.join(output)
5,352,269
def box_postp2use(pred_boxes, nms_iou_thr=0.7, conf_thr=0.5): """Postprocess prediction boxes to use * Non-Maximum Suppression * Filter boxes with Confidence Score Args: pred_boxes (np.ndarray dtype=np.float32): pred boxes postprocessed by yolo_output2boxes. shape: [cfg.cell_size * cfg.cell_size *cfg.boxes_per_cell, 6] nms_iou_thr (float): Non-Maximum Suppression IoU Threshold conf_thr (float): Confidence Score Threshold Returns: np.ndarray (dtype=np.float32) """ boxes_nms = nms(pred_boxes=pred_boxes, iou_thr=nms_iou_thr) boxes_conf_filtered = boxes_nms[boxes_nms[:, 4] >= conf_thr] return boxes_conf_filtered
5,352,270
def plotPolarPoint(axe, colorspace, target1, target2, color=None, marker="+"): """Plots polar points which is read from auxiliary_line.json. Args: axe (matplotlib.axes.Axes): Target axe for plotting. colorspace (str): Group key defined in json. target1 (str): Array data key defined under "colorspace". Plots this key-value on x-axis. target2 (str): Array data key defined under "colorspace". Plots this key-value on y-axis. color (type, optional): Color parameter defined by matplotlib. When specified "None", set color defined by json or "#000000". Defaults to None. marker (str, optional): Marker style parameter defined by matplotlib. Defaults to "+". """ POLAR_KEY = 'polar' PLOTCOLOR_KEY = 'plot_color' with open('auxiliary_line.json') as auxline_file: polor_point = json.load(auxline_file) if not colorspace in polor_point: return if not POLAR_KEY in polor_point[colorspace]: return for key, value in polor_point[colorspace][POLAR_KEY].items(): if target1 in value and target2 in value: if color is None: if PLOTCOLOR_KEY in value: plot_color = value[PLOTCOLOR_KEY] else: plot_color = '#000000' else: plot_color = color axe.plot( value[target1], value[target2], color=plot_color, marker=marker, linestyle="", alpha=1.0, zorder=-5.0 )
5,352,271
def many_hsvs_to_rgb(hsvs): """Combine list of hsvs otf [[(h, s, v), ...], ...] and return RGB list.""" num_strips = len(hsvs[0]) num_leds = len(hsvs[0][0]) res = [[[0, 0, 0] for ll in range(num_leds)] for ss in range(num_strips)] for strip in range(num_strips): for led in range(num_leds): # for some reason the conversion screws this up? # # import bibliopixel as bp # c1 = bp.colors.conversions.hsv2rgb((0, 0, 0)) # c2 = bp.colors.conversions.hsv2rgb((0, 0, 0)) # c3 = bp.colors.conversions.hsv2rgb((0, 0, 0)) # bp.colors.arithmetic.color_blend( # bp.colors.arithmetic.color_blend(c1, c2), # c3) # # = (2, 2, 2) if all(hsv[strip][led][2] == 0 for hsv in hsvs): rgb = (0, 0, 0) else: rgbs = [bp.colors.conversions.hsv2rgb(hsv[strip][led]) for hsv in hsvs] rgb = reduce(bp.colors.arithmetic.color_blend, rgbs) res[strip][led] = rgb return res
5,352,272
def deduplicate(input_file, output_file, columns): """De-duplicate rows in CSV based on columns specified (comma separated)""" df = pd.read_csv(input_file) columns = [x.strip() for x in columns.split(",")] if len(columns) < 1: print("No columns specified") all_ok = True for column in columns: if column not in df.columns: print(f"Could not find specified column {column} in table") all_ok = False if not all_ok: print(f"One or more columns not found in table. Options are {list(df.columns)}") sys.exit(1) print(f"Dropping duplicate entries based on column subset{columns}") df.drop_duplicates(subset=columns, inplace=True) df.to_csv(output_file)
5,352,273
def _convert_paths_to_flask(transmute_paths): """flask has it's own route syntax, so we convert it.""" paths = [] for p in transmute_paths: paths.append(p.replace("{", "<").replace("}", ">")) return paths
5,352,274
def check_isup(k, return_client=None): """ Checks ping and returns status Used with concurrent decorator for parallel checks :param k: name to ping :param return_client: to change return format as '{k: {'comments': comments}}' :return(str): ping ok / - """ if is_up(k): comments = 'ping ok' else: comments = ' - ' if return_client: comments = {k: {'comments': comments}} return comments
5,352,275
def histogram(measurements, dataset_name: str, plt=pyplot, show: bool=True): """ Shows a histogram with a fitted normal distribution. Example: histogram(np.array([1, 2, 3, 4, 4, 5, 5, 6, 7]), "X") :param measurements: The measurements to create a histogram for. :param dataset_name: The of the dataset to show in the header. :param plt: The matplotlib instance to use (Either pyplot or an Axes instance) :param show: Whether to call the show method on plt (if it exists). """ mu, std = stats.norm.fit(measurements) plt.hist(measurements, bins='auto', density=True) xmin, xmax = 0, 0 if hasattr(plt, 'get_xlim'): xmin, xmax = plt.get_xlim() elif hasattr(plt, 'xlim'): xmin, xmax = plt.xlim() x = np.linspace(xmin, xmax, 100) p = stats.norm.pdf(x, mu, std) plt.plot(x, p, 'k', linewidth=2) if hasattr(plt, 'set_title'): plt.set_title("Histogram of {}".format(dataset_name)) elif hasattr(plt, 'title'): plt.title("Histogram of {}".format(dataset_name)) if hasattr(plt, 'show') and show: plt.show()
5,352,276
def percent_uppercase(text): """Calculates percentage of alphabetical characters that are uppercase, out of total alphabetical characters. Based on findings from spam.csv that spam texts have higher uppercase alphabetical characters (see: avg_uppercase_letters())""" alpha_count = 0 uppercase_count = 0 for char in text: if char.isalpha(): alpha_count += 1 if char.isupper(): uppercase_count += 1 # calculate percentage - make sure not to divide by 0 try: perc_uppercase = float(uppercase_count) / float(alpha_count) return str(perc_uppercase) except ZeroDivisionError: return "0"
5,352,277
def get_move() -> tuple: """ Utility function to get the player's move. :return: tuple of the move """ return get_tuple('What move to make?')
5,352,278
def database(): """A Database shortcut can auto close.""" db = Database() yield db db.close()
5,352,279
def _delete_file_if_exists(filepath): """Delete the file if it exists. :param filepath: The file path. """ if os.path.exists(filepath): os.remove(filepath)
5,352,280
def list_supported_parset_settings(): """List the ``YandaSoft`` parset settings that are currently supported by ``dstack``. This function uses logger level INFO to return the supported settings Parameters ========== Returns ======= Prints out the supported settings: log """ print_support = lambda functionality, flist: log.info('Supported {0:s}: '.format(functionality) + ' '.join(map(str, flist))) log.info('Settings for YandaSoft parsets supported by the dstack wrapper:') print_support('Imagers',_SUPPORTED_IMAGERS) print_support('Solvers',_SUPPORTED_SOLVERS) print_support('Gridders',_SUPPORTED_GRIDDER_NAMES) print_support('Preconditioners',_SUPPORTED_PRECONDITIONERS)
5,352,281
def set_value(parent, type, name, value) : """ Sets a value in the format Mitsuba Renderer expects """ curr_elem = etree.SubElement(parent, type) curr_elem.set("name", name) curr_elem.set("id" if type in ["ref", "shapegroup"] else "value", value) # The can be an id return curr_elem
5,352,282
def create_bst(nodes) -> BST: """Creates a BST from a specified nodes.""" root = BST(nodes[0]) for i in range(1, len(nodes)): root.insert(nodes[i]) return root
5,352,283
def get_dec_arch(gen: Generator) -> nn.Sequential: """ Get decoder architecture associated with given generator. Args: gen (Generator): Generator associated with the decoder. Returns: nn.Sequential: Decoder architecture. """ # As defined in the paper. len_z = len(gen.latent_space_mean()) h_size = math.floor(len_z / 2) decoder = nn.Sequential( nn.Linear(len_z, h_size), nn.ReLU(inplace=True), nn.Linear(h_size, len_z), ) return decoder
5,352,284
def add_page_to_xml(alto_xml, alto_xml_page, page_number=0): """ Add new page to end of alto_xml or replace old page. """ # If book empty if (alto_xml == None): page_dom = xml.dom.minidom.parseString(alto_xml_page) page_dom.getElementsByTagName("Page")[0].setAttribute("ID", 'page_1') alto_xml_page = page_dom.toxml(encoding="utf-8") return(alto_xml_page) # If not book_dom = xml.dom.minidom.parseString(alto_xml) page_dom = xml.dom.minidom.parseString(alto_xml_page) page = page_dom.getElementsByTagName("Page")[0] if(page_number==0): # Find last page page_number = book_dom.getElementsByTagName("Page").length # and add page to end book_dom.getElementsByTagName("Layout")[0].appendChild(page) page.setAttribute("ID", 'page_%d' % (page_number+1)) # If new page is not last page else: old_page = book_dom.getElementsByTagName("Page")[page_number-1] book_dom.getElementsByTagName("Layout")[0].replaceChild(page, old_page) page.setAttribute("ID", 'page_%d' % page_number) return(book_dom.toxml(encoding="utf-8"))
5,352,285
def calculate_performance(all_data): """ Calculates the performance metrics as found in "benchmarks" folder of scikit-optimize and prints them in console. Parameters ---------- * `all_data`: dict Traces data collected during run of algorithms. For more details, see 'evaluate_optimizer' function. """ sorted_traces = defaultdict(list) for model in all_data: for dataset in all_data[model]: for algorithm in all_data[model][dataset]: data = all_data[model][dataset][algorithm] # leave only best objective values at particular iteration best = [[v[-1] for v in d] for d in data] supervised_learning_type = "Regression" if ("Regressor" in model) else "Classification" # for every item in sorted_traces it is 2d array, where first dimension corresponds to # particular repeat of experiment, and second dimension corresponds to index # of optimization step during optimization key = (algorithm, supervised_learning_type) sorted_traces[key].append(best) # calculate averages for key in sorted_traces: # the meta objective: average over multiple tasks mean_obj_vals = np.mean(sorted_traces[key], axis=0) minimums = np.min(mean_obj_vals, axis=1) f_calls = np.argmin(mean_obj_vals, axis=1) min_mean = np.mean(minimums) min_stdd = np.std(minimums) min_best = np.min(minimums) f_mean = np.mean(f_calls) f_stdd = np.std(f_calls) f_best = np.min(f_calls) def fmt(float_value): return ("%.3f" % float_value) output = str(key[0]) + " | " + " | ".join( [fmt(min_mean) + " +/- " + fmt(min_stdd)] + [fmt(v) for v in [min_best, f_mean, f_stdd, f_best]]) result = table_template + output print("") print(key[1]) print(result)
5,352,286
def connected_components(num_nodes, Ap, Aj, components): """connected_components(int const num_nodes, int const [] Ap, int const [] Aj, int [] components) -> int""" return _amg_core.connected_components(num_nodes, Ap, Aj, components)
5,352,287
def test_ft_ovr_counters(): """ Author: Ramprakash Reddy ([email protected]) Verify tx_ovr and rx_ovr counters should not increment. Verify rx_err counters should increment, when framesize is more than MTU. """ flag = 1 properties = ['rx_ovr','tx_ovr'] intf_data.port_list = [vars.D1T1P1, vars.D1T1P2] intfapi.clear_interface_counters(vars.D1) intf_data.tg.tg_traffic_control(action='clear_stats', port_handle=[intf_data.tg_ph_1, intf_data.tg_ph_2]) intf_data.tg.tg_traffic_control(action='run', stream_handle=[intf_data.streams['traffic_tg1'], intf_data.streams['traffic_tg2']]) st.wait(intf_data.wait_sec) intf_data.tg.tg_traffic_control(action='stop', stream_handle=[intf_data.streams['traffic_tg1'], intf_data.streams['traffic_tg2']]) counters = intfapi.get_interface_counter_value(vars.D1, intf_data.port_list, properties) for each_port in intf_data.port_list: for each_property in properties: value = counters[each_port][each_property] if value: flag = 0 st.error("{} counters value expected 0, but found {} for port {}".format(each_property,value,each_port)) if flag == 1: st.log("rx_ovr and tx_ovr counters is not increasing as expected") intfapi.clear_interface_counters(vars.D1) intfapi.interface_properties_set(vars.D1, vars.D1T1P1, 'mtu', intf_data.mtu) intf_data.tg.tg_traffic_control(action='clear_stats', port_handle=[intf_data.tg_ph_1]) intf_data.tg.tg_traffic_control(action='run', stream_handle=intf_data.streams['traffic_tg1']) st.wait(intf_data.wait_sec) intf_data.tg.tg_traffic_control(action='stop', stream_handle=intf_data.streams['traffic_tg1']) rx_err = intfapi.get_interface_counter_value(vars.D1, vars.D1T1P1, properties="rx_err")[vars.D1T1P1]['rx_err'] if not rx_err: st.report_fail("interface_rx_err_counters_fail", vars.D1T1P1) if flag == 1: st.log("rx_err counters is increasing as expected") if flag == 0: st.report_fail("test_case_failed") st.report_pass("test_case_passed")
5,352,288
def main() -> NoReturn: """Run intersect.""" result = intersect( linked_list_1=[13, 4, 12, 27, ], linked_list_2=[29, 23, 82, 11, 12, 27, ] ) print(result)
5,352,289
def get_drm_version(): """ Return DRM library version. Returns: str: DRM library version. """ path = _join(PROJECT_DIR, "CMakeLists.txt") with open(path, "rt") as cmakelists: for line in cmakelists: if line.startswith("set(ACCELIZEDRM_VERSION "): version = f"v{line.split(' ')[1].strip().strip(')')}" print(f"Detected DRM library version: {version}") return version raise ValueError(f'ACCELIZEDRM_VERSION not found in "{path}"')
5,352,290
def pmx(p1, p2): """Perform Partially Mapped Crossover on p1 and p2.""" return pmx_1(p1, p2), pmx_1(p2, p1)
5,352,291
def get_met_rxn_names(raw_data_dir: str, model_name: str) -> tuple: """ Gets the names of metabolites and reactions in the model. Args: raw_data_dir: path to folder with the raw data. model_name: named of the model. Returns: A list with the metabolite names and another with the reaction names. """ file_met_names = os.path.join(raw_data_dir, f'{model_name}_metsActive.dat') met_names = pd.read_csv(file_met_names, sep='\n').values met_names = list(met_names.transpose()[0]) met_names = [met_name.replace('m_m_', '') for met_name in met_names] # get reaction names file_rxn_names = os.path.join(raw_data_dir, f'{model_name}_rxnsActive.dat') rxn_names = pd.read_csv(file_rxn_names, sep='\n').values rxn_names = list(rxn_names.transpose()[0]) rxn_names = [rxn_name.replace('r_', '') for rxn_name in rxn_names] return met_names, rxn_names
5,352,292
def main(): """ The program will match the DNA with the DNA-substring and find out the most similar DNA-substring in the DNA """ long = input('Please give me a DNA sequence to search: ') short = input('What DNA sequence would you like to match? ') result = match(long, short) print ('The best match is: ' + str(result))
5,352,293
def test_convert_from_pip_fail_if_no_egg(): """Parsing should fail without `#egg=`. """ dep = 'git+https://github.com/kennethreitz/requests.git' with pytest.raises(ValueError) as e: dep = Requirement.from_line(dep).as_pipfile() assert 'pipenv requires an #egg fragment for vcs' in str(e)
5,352,294
def meshparameterspace(shape=(20, 20), psi_limits=(None, None), eta_limits=(None, None), psi_spacing="linear", eta_spacing="linear", user_spacing=(None, None)): """Builds curvilinear mesh inside parameter space. :param psi_spacing and eta_spacing: - 'linear': uniform spacing on interior of the surface - 'cosine': cosine spacing - 'uniform': spacing matches the spacing along edge - 'user': user spacing that is passed in through user_spacing :param psi_limits and eta_limits: only define if 'uniform'. Should be points where intersection is located. """ if psi_spacing == "cosine": x_spacing = cosine_spacing() elif psi_spacing == "linear": x_spacing = np.linspace elif psi_spacing == "uniform": x_spacing = _uniform_spacing(eta_limits, 0) elif psi_spacing == "user": if user_spacing[0] is not None: x_spacing = user_spacing[0] else: raise RuntimeError("must provide user_spacing w/ psi_spacing=user") else: raise RuntimeError("specified spacing not recognized") if eta_spacing == "cosine": y_spacing = cosine_spacing() elif eta_spacing == "linear": y_spacing = np.linspace elif eta_spacing == "uniform": y_spacing = _uniform_spacing(psi_limits, 1) elif eta_spacing == "user": if user_spacing[1] is not None: y_spacing = user_spacing[1] else: raise RuntimeError("must provide user_spacing w/ psi_spacing=user") else: raise RuntimeError("specified spacing not recognized") n_psi, n_eta = shape psi_lower, psi_upper = psi_limits eta_lower, eta_upper = eta_limits # if limits aren't specified, set lower to 0 and upper to 1 if psi_lower is None: psi_lower = np.full((n_eta, 2), 0.) eta_min = eta_lower[0, 1] if eta_lower is not None else 0. eta_max = eta_upper[0, 1] if eta_upper is not None else 1. psi_lower[:, 1] = y_spacing(eta_min, eta_max, n_eta) if psi_upper is None: psi_upper = np.full((n_eta, 2), 1.) eta_min = eta_lower[-1, 1] if eta_lower is not None else 0. eta_max = eta_upper[-1, 1] if eta_upper is not None else 1. psi_upper[:, 1] = y_spacing(eta_min, eta_max, n_eta) if eta_lower is None: eta_lower = np.full((n_psi, 2), 0.) psi_min = psi_lower[0, 0] if psi_lower is not None else 0. psi_max = psi_upper[0, 0] if psi_upper is not None else 1. eta_lower[:, 0] = x_spacing(psi_min, psi_max, n_psi) if eta_upper is None: eta_upper = np.full((n_psi, 2), 1.) psi_min = psi_lower[-1, 0] if psi_lower is not None else 0. psi_max = psi_upper[-1, 0] if psi_upper is not None else 1. eta_upper[:, 0] = x_spacing(psi_min, psi_max, n_psi) grid = mesh_curvilinear(psi_lower, psi_upper, eta_lower, eta_upper, x_spacing, y_spacing) # TODO: the following probably belongs outside the scope of this class # if flip: # grid = np.flipud(grid) return grid[:, :, 0], grid[:, :, 1]
5,352,295
def write_file(file_path='', data=''): """write a file from a string.""" fid = codecs.open(file_path, 'w', 'utf-8') try: fid.write(data) except (UnicodeEncodeError, UnicodeDecodeError): fid.write('error: could not write file') fid.close()
5,352,296
def get_key_information(index, harness_result: HarnessResult, testbed_parser, esapi_instance: ESAPI): """ 1. key_exception_dic是以引擎名为key的字典,若能提取错误信息,value为引擎的关键报错信息,若所有引擎均没有报错信息,则value为引擎的完整输出 返回[double_output_id, engine_name, key_exception_dic, api_name, 过滤类型]。过滤类型分为两种:第一类型是指异常结果 存在错误信息,第二类型是指异常结果没有错误信息的, 第三类型是指所有引擎均没有报错(即不一致是由于执行结果不一致导致的)。 其值的取值是[1,2,3],其含义是[第一类型,第二类型,第三类型] """ suspicious_output = None for output in harness_result.outputs: if output.id == index: suspicious_output = output if suspicious_output is None: raise Exception("Harness result does not contain special index") key_exception = list_normalized_essential_exception_message(suspicious_output.stderr + suspicious_output.stdout) key_exception_dic = {} double_output_id = index engine_name = testbed_parser.parse_engine_name(suspicious_output.testbed) no_exception_info_engine_counter = 0 es_api_node_ast_in_testcase = None # 差分测试不一致的的结果中存在报错信息,第一类型 if key_exception != "": filter_type = FilerType.TYPE1.value [api_name, es_api_node_ast_in_testcase] = getExecptionStatementApi.get_exception_statement_api( esapi_instance, harness_result.testcase, suspicious_output.stderr + suspicious_output.stdout, es_api_node_ast_in_testcase) if api_name is None: api_name = "NoApi" key_exception_dic = {engine_name: key_exception} # 差分测试不一致的结果中不存在报错信息,第二类型 else: filter_type = FilerType.TYPE2.value no_exception_info_engine_counter += 1 # 差分后得到的测试结果无法提取错误信息 api_list = [] for output in harness_result.outputs: if output.id != index: exception_engine_name = testbed_parser.parse_engine_name(output.testbed) exception_info = list_normalized_essential_exception_message(output.stderr + output.stdout) if exception_info == "": no_exception_info_engine_counter += 1 key_exception_dic.update({exception_engine_name: exception_info}) [api, es_api_node_ast_in_testcase] = getExecptionStatementApi.get_exception_statement_api( esapi_instance, harness_result.testcase, output.stderr + output.stdout, es_api_node_ast_in_testcase) api = "NoApi" if api is None else api api_list.append(api) most_frequent_api, most_frequent_count = get_highest_frequency(api_list) if most_frequent_count < len(api_list) * 1 / 2: api_name = "NoApi" else: api_name = most_frequent_api # 所有引擎均为报错,仅仅是输出不一致,第三类型 if no_exception_info_engine_counter == len(harness_result.outputs): # return None filter_type = FilerType.TYPE3.value for output in harness_result.outputs: exception_engine_name = testbed_parser.parse_engine_name(output.testbed) output = output.stderr + output.stdout key_exception_dic.update({exception_engine_name: output}) api_name = "NoApi" return [double_output_id, engine_name, key_exception_dic, api_name, filter_type]
5,352,297
def prepare_data(): """Merge tables generated from simulated data, where columns 'fac1', 'fac2', 'fac3' from **table_2** contain the factor ids in **table_1** and columns 'x1', 'x2' from **table_2** contain control ids in **table_3**. The output are one pandas dataframe (saved as pickle) per factor, named 'meas_facX' (X as 1, 2, 3), that contains the multiindex (caseid, period), the two controls, and three measurements. """ # Read in dataframes from Stata files. factor = pd.read_stata( ppj("OUT_DATA","tables", "data_table_1.dta"), index_col = 'factor_id', columns = ['factor_id', 'meas1', 'meas2', 'meas3'] ) case = pd.read_stata( ppj("OUT_DATA", "tables", "data_table_2.dta") ) case.set_index(['caseid', 't'], inplace = True) control = pd.read_stata( ppj("OUT_DATA", "tables", "data_table_3.dta"), index_col = 'cont_id' ) # Join data at indices, generate one dataframe per factor # and save as pickle. c_nr = ['x1', 'x2'] for nr, c in enumerate(c_nr): case = case.join(control, on = c, rsuffix = '_'+str(nr+1)) case.drop(c_nr, axis = 1, inplace = True) f_nr=['fac1', 'fac2', 'fac3'] dataframes = [] for nr, f in enumerate(f_nr): dataframes.append(case.join(factor, on = f)) dataframes[nr].drop(f_nr, axis = 1, inplace = True) dataframes[nr].drop_duplicates(inplace = True) dataframes[nr].to_pickle(ppj("OUT_ANALYSIS", 'meas_'+f+'.pkl'))
5,352,298
def open_image(asset): """Opens the image represented by the given asset.""" try: asset_path = asset.get_path() except NotImplementedError: return Image.open(StringIO(asset.get_contents())) else: return Image.open(asset_path)
5,352,299