content
stringlengths
22
815k
id
int64
0
4.91M
def clean_data(df): """ Clean Data : 1. Clean and Transform Category Columns from categories csv 2.Drop Duplicates 3.Remove any missing values Args: INPUT - df - merged Dataframe from load_data function OUTPUT - Returns df - cleaned Dataframe """ # Split categories into separate category columns categories = df['categories'].str.split(';', expand=True) row = categories.iloc[0] # Get new column names from category columns category_colnames = row.apply(lambda x: x.rstrip('- 0 1')) categories.columns = category_colnames # Convert category values to 0 or 1 categories = categories.applymap(lambda s: int(s[-1])) # Drop the original categories column from Dataframe df.drop('categories', axis=1, inplace=True) # Concatenate the original dataframe with the new `categories` dataframe df_final = pd.concat([df, categories], axis=1) #Drop missing values and duplicates from the dataframe df_final.drop_duplicates(subset='message', inplace=True) df_final.dropna(subset=category_colnames, inplace=True) #Refer ETL Pipeline preparation Notebook to understand why these columns are dropped df_final = df_final[df_final.related != 2] df_final = df_final.drop('child_alone', axis=1) return df_final
5,355,300
def test_determine_cum_stored_energy_series_simple_up_down(): """ /\ :return: """ gamma = np.array([0., 1., 0.5]) tau = np.array([0., 1., 0]) expected_delta_e = 0.75 # two triangles (1x1x0.5 + 1x0.5x0.5) et = ShearTest(tau, gamma) energy = assess.calc_case_et(et) assert energy[-1] == expected_delta_e, energy
5,355,301
def commitFile(file: str = None, message: str = None, debug: bool = False) -> bool: """Commit a file when it is changed. :param file: The name of the file we want to commit. :type file: str :param message: The commit message we want to use. :type message: str :param debug: If we want debug logging enabled. :type debug: Bool :rtype: bool :return: When committed (True), or no commit has been made (False) """ changelogdUpdated = ["git", "status", "|", "grep", file, "|", "wc", "-l"] changelogdUpdatedOutput = int(generic.executeCommand(command=changelogdUpdated)) if changelogdUpdatedOutput >= 1: # gitCommitCommand = ["git", "commit", "-m", {m}, {f}.format(m=message, f=file)] gitCommitCommand = ["git", "commit", "-m", message, file] generic.executeCommand(command=gitCommitCommand, shell=False, debug=debug) return True return False
5,355,302
def all_pairs_shortest_path_length(G,cutoff=None): """ Compute the shortest path lengths between all nodes in G. Parameters ---------- G : NetworkX graph cutoff : integer, optional depth to stop the search. Only paths of length <= cutoff are returned. Returns ------- lengths : dictionary Dictionary of shortest path lengths keyed by source and target. Notes ----- The dictionary returned only has keys for reachable node pairs. Examples -------- >>> G=nx.path_graph(5) >>> length=nx.all_pairs_shortest_path_length(G) >>> print(length[1][4]) 3 >>> length[1] {0: 1, 1: 0, 2: 1, 3: 2, 4: 3} """ paths={} for n in G: paths[n]=single_source_shortest_path_length(G,n,cutoff=cutoff) return paths
5,355,303
def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None): """ Figure out based on the possible columns inputs which columns to keep. Args: gctoo (GCToo object): cid (list of strings): col_bool (boolean array): cidx (list of integers): exclude_cid (list of strings): Returns: cols_to_keep (list of strings): col ids to be kept """ # Use cid if provided if cid is not None: assert type(cid) == list, "cid must be a list. cid: {}".format(cid) cols_to_keep = [gctoo_col for gctoo_col in gctoo.meth_df.columns if gctoo_col in cid] # Tell user if some cids not found num_missing_cids = len(cid) - len(cols_to_keep) if num_missing_cids != 0: logger.info("{} cids were not found in the GCT.".format(num_missing_cids)) # Use col_bool if provided elif col_bool is not None: assert len(col_bool) == gctoo.meth_df.shape[1], ( "col_bool must have length equal to gctoo.meth_df.shape[1]. " + "len(col_bool): {}, gctoo.meth_df.shape[1]: {}".format( len(col_bool), gctoo.meth_df.shape[1])) cols_to_keep = gctoo.meth_df.columns[col_bool].values # Use cidx if provided elif cidx is not None: assert type(cidx[0]) is int, ( "cidx must be a list of integers. cidx[0]: {}, " + "type(cidx[0]): {}").format(cidx[0], type(cidx[0])) assert max(cidx) <= gctoo.meth_df.shape[1], ( "cidx contains an integer larger than the number of columns in " + "the GCToo. max(cidx): {}, gctoo.meth_df.shape[1]: {}").format( max(cidx), gctoo.meth_df.shape[1]) cols_to_keep = gctoo.meth_df.columns[cidx].values # If cid, col_bool, and cidx are all None, return all columns else: cols_to_keep = gctoo.meth_df.columns.values # Use exclude_cid if provided if exclude_cid is not None: # Keep only those columns that are not in exclude_cid cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid] return cols_to_keep
5,355,304
def check_zenity(): """ Check if zenity is installed """ warning = '''zenity was not found in your $PATH Installation is recommended because zenity is used to indicate that protonfixes is doing work while waiting for a game to launch. To install zenity use your system's package manager. ''' if not shutil.which('zenity'): log.warn(warning) return False return True
5,355,305
def get_auth_token(): """ Return the zerotier auth token for accessing its API. """ with open("/var/snap/zerotier-one/common/authtoken.secret", "r") as source: return source.read().strip()
5,355,306
def sph_harm_transform(f, mode='DH', harmonics=None): """ Project spherical function into the spherical harmonics basis. """ assert f.shape[0] == f.shape[1] if isinstance(f, tf.Tensor): sumfun = tf.reduce_sum def conjfun(x): return tf.conj(x) n = f.shape[0].value else: sumfun = np.sum conjfun = np.conj n = f.shape[0] assert np.log2(n).is_integer() if harmonics is None: harmonics = sph_harm_all(n) a = DHaj(n, mode) f = f*np.array(a)[np.newaxis, :] real = is_real_sft(harmonics) coeffs = [] for l in range(n // 2): row = [] minl = 0 if real else -l for m in range(minl, l+1): # WARNING: results are off by this factor, when using driscoll1994computing formulas factor = 2*np.sqrt(np.pi) row.append(sumfun(factor * np.sqrt(2*np.pi)/n * f * conjfun(harmonics[l][m-minl]))) coeffs.append(row) return coeffs
5,355,307
def create_model(experiment_settings:ExperimentSettings) -> OuterModel: """ function creates an OuterModel with provided settings. Args: inner_settings: an instannce of InnerModelSettings outer_settings: an instannce of OuterModelSettings """ model = OuterModel(experiment_settings.outer_settings) model.compile( loss= experiment_settings.outer_settings.loss, optimizer=experiment_settings.outer_settings.optimizer, metrics=experiment_settings.outer_settings.metrics, ) return model
5,355,308
def LoadTrainingTime(stateNum): """ Load the number of seconds spent training """ filename = 'time_' + str(stateNum) + '.pth' try: timeVals = pickle.load( open(GetModelPath() + filename, "rb")) return timeVals["trainingTime"] except: print("ERROR: Failed to load training times! Returning 0") return 0
5,355,309
def adjust_learning_rate(optimizer, epoch): """Sets the learning rate to the initial LR decayed by 10 every 15 epochs""" lr = args.lr * (0.1 ** (epoch // args.lr_epochs)) print('Learning rate:', lr) for param_group in optimizer.param_groups: if args.retrain and ('mask' in param_group['key']): # retraining param_group['lr'] = 0.0 elif args.prune_target and ('mask' in param_group['key']): if args.prune_target in param_group['key']: param_group['lr'] = lr else: param_group['lr'] = 0.0 else: param_group['lr'] = lr return lr
5,355,310
def check_one_file(test_info, ref_file, output_file, eq_args): """Check a single output file produced by a test run against a reference file Parameters ---------- test_info : dict Dictionary containing miscellaneous test information ref_file : str Name of reference file, excluding path, against which test output will be checked output_file : str Name of output file, excluding path eq_args : str Equality arguments determining how the reference and output file should be compared by :py:func:`table_test` """ retval, failures = table_test([ref_file, output_file] + shlex.split(eq_args), verbose=True) msg = "\n ".join(failures) err_message = "Unequal output in files %s vs %s, module %s:\n %s" % ( ref_file, output_file, test_info["module_name"], msg ) nose.tools.assert_equal(retval, 0, msg=err_message)
5,355,311
def disk_status(hardware, disk, dgtype): """ Status disk """ value = int(float(disk['used']) / float(disk['total']) * 100.0) if value >= 90: level = DiagnosticStatus.ERROR elif value >= 70: level = DiagnosticStatus.WARN else: level = DiagnosticStatus.OK # Make board diagnostic status d_board = DiagnosticStatus( level=level, name='jetson_stats {type} disk'.format(type=dgtype), message="{0:2.1f}GB/{1:2.1f}GB".format(disk['used'], disk['total']), hardware_id=hardware, values=[ KeyValue(key="Used", value=str(disk['used'])), KeyValue(key="Total", value=str(disk['total'])), KeyValue(key="Unit", value="GB")]) return d_board
5,355,312
def hr_admin(request): """ Views for HR2 Admin page """ user = request.user # extra_info = ExtraInfo.objects.select_related().get(user=user) designat = HoldsDesignation.objects.select_related().get(user=user) if designat.designation.name =='hradmin': template = 'hr2Module/hradmin.html' # searched employee query = request.GET.get('search') if(request.method == "GET"): if(query != None): emp = ExtraInfo.objects.filter( Q(user__first_name__icontains=query) | Q(user__last_name__icontains=query)| Q(id__icontains=query) ).distinct() emp = emp.filter(user_type="faculty") else: emp = ExtraInfo.objects.all() emp = emp.filter(user_type="faculty") else: emp = ExtraInfo.objects.all() emp = emp.filter(user_type="faculty") context = {'emps': emp} return render(request, template, context) else: return HttpResponse('Unauthorized', status=401)
5,355,313
def test_extra(): """Returns dict of extrapolation testing modules.""" return {name: module.test_extra() for name, module in six.iteritems(all_)}
5,355,314
def add_copy_elf_task(self): """creates a task to copy the elf file into the output root (task :py:class:`f_ti_arm_cgt.copy_elf`)""" if self.bld.variant_dir == self.link_task.outputs[0].parent.abspath(): return if not hasattr(self, "link_task"): return if self.bld.variant_dir: out_dir = self.bld.variant_dir else: out_dir = self.bld.path.get_bld() self.copy_elf_task = self.create_task( "copy_elf", src=self.link_task.outputs[0], tgt=[ self.bld.path.find_or_declare( os.path.join(out_dir, self.link_task.outputs[0].name) ) ], )
5,355,315
def determine_family(reaction: 'ARCReaction', db: Optional[RMGDatabase] = None, ): """ Determine the RMG reaction family for an ARC reaction. A wrapper for ARCReaction.determine_family(). This wrapper is useful because it makes a new instance of the rmgdb if needed. Args: reaction ('ARCReaction'): An ARCReaction object instance. db (RMGDatabase, optional): The RMG database instance. """ if reaction.family is None: if db is None: db = make_rmg_database_object() load_families_only(db) if reaction.rmg_reaction is None: reaction.rmg_reaction_from_arc_species() reaction.determine_family(db)
5,355,316
def _switch_component( x: torch.Tensor, ones: torch.Tensor, zeros: torch.Tensor ) -> torch.Tensor: """ Basic component of switching functions. Args: x (torch.Tensor): Switch functions. ones (torch.Tensor): Tensor with ones. zeros (torch.Tensor): Zero tensor Returns: torch.Tensor: Output tensor. """ x_ = torch.where(x <= 0, ones, x) return torch.where(x <= 0, zeros, torch.exp(-ones / x_))
5,355,317
def get_vector(x_array, y_array, pair): """This function is for calculating a vector of a bone from the openpose skelleton""" x = x_array[:,pair[0]]-x_array[:,pair[1]] y = y_array[:,pair[0]]-y_array[:,pair[1]] return [x, y]
5,355,318
def update_bar(tweets_json, handle): """ Pull data from signal and updates aggregate bar graph This is using thresholds that combine toxicity and severe toxicity models suggested by Lucas. """ if not tweets_json: raise PreventUpdate('no data yet!') tweets_df = pd.read_json(tweets_json, orient='split') low_count = tweets_df['LOW_LEVEL'].value_counts().get(True, 0) med_count = tweets_df['MED_LEVEL'].value_counts().get(True, 0) hi_count = tweets_df['HI_LEVEL'].value_counts().get(True, 0) begin_date = tweets_df['display_time'].iloc[-1] end_date = tweets_df['display_time'].iloc[0] title = f"tweets at {handle}: {begin_date} – {end_date} (UTC)" data = dict( type='bar', x=['Low', 'Medium', 'High'], y=[low_count, med_count, hi_count], marker=dict( color=[colors['low'], colors['medium'], colors['high']]) ) return { 'data': [data], 'layout': dict( type='layout', title=title, xaxis={'title': 'toxicity level'}, yaxis={'title': 'count'}, ) }
5,355,319
def defineConsole(): """ defines the program console line commands """ parser = argparse.ArgumentParser(description="SBML to BNGL translator") parser.add_argument( "-f1", "--file1", type=str, help="reference file", required=True ) parser.add_argument( "-f2", "--file2", type=str, help="comparison file", required=True ) # parser.add_argument('-o', '--output', type=str, help='output file', required=True) return parser
5,355,320
def query_command(): """ Interactive querying of code snippets """ keyword = input("What do you want to query: ").lower().strip() query = StackoverflowQuery(keyword, ['python', 'js', 'ruby']) for snippet in query.code_snippets(): print("=======================") print("Language: " + snippet.snippet_language) print("Author: " + snippet.author) print("Question URL: " + snippet.question_url) print("Answer URL: " + snippet.answer_url) print("License: " + snippet.license) print("=======================") print(snippet.code) command = input("Do you want to show the next result or execute a new query? (n/q)") if command == 'n': continue elif command == 'q': break
5,355,321
def device_boot(): """ Starts timer to activate pump every PUMP_ACTIVATION_DELTA hours. If for some reason the device gets rebooted during a water release it should turn off the pump after being restarted. """ logger.info('Turning pump off after device start') pump = WaterPumpControl() pump.off() while True: data = _read_data() now = datetime.now() last_watering = data.get('last_watering') if last_watering: last_watering = parse_date(last_watering) if not last_watering or \ (now - last_watering).seconds > PUMP_ACTIVATION_DELTA: # if it was never watered or if last watering is > then max delta, # activate pump pump.release() # updates local data file _write_data({ 'last_watering': now.isoformat(), }) logger.info('Sleeping for 30s') time.sleep(30)
5,355,322
def render_foreign_derivation(tpl: str, parts: List[str], data: Dict[str, str]) -> str: """ >>> render_foreign_derivation("bor", ["en", "ar", "الْعِرَاق", "", "Iraq"], defaultdict(str)) 'Arabic <i>الْعِرَاق</i> (<i>ālʿrāq</i>, “Iraq”)' >>> render_foreign_derivation("der", ["en", "fro", "-"], defaultdict(str)) 'Old French' >>> render_foreign_derivation("etyl", ["enm", "en"], defaultdict(str)) 'Middle English' >>> render_foreign_derivation("etyl", ["grc"], defaultdict(str)) 'Ancient Greek' >>> render_foreign_derivation("inh", ["en", "enm", "water"], defaultdict(str)) 'Middle English <i>water</i>' >>> render_foreign_derivation("inh", ["en", "ang", "wæter", "", "water"], defaultdict(str)) 'Old English <i>wæter</i> (“water”)' >>> render_foreign_derivation("inh", ["en", "ang", "etan"], defaultdict(str, {"t":"to eat"})) 'Old English <i>etan</i> (“to eat”)' >>> render_foreign_derivation("inh", ["en", "ine-pro", "*werdʰh₁om", "*wr̥dʰh₁om"], defaultdict(str)) 'Proto-Indo-European <i>*wr̥dʰh₁om</i>' >>> render_foreign_derivation("noncog", ["fro", "livret"], defaultdict(str, {"t":"book, booklet"})) 'Old French <i>livret</i> (“book, booklet”)' >>> render_foreign_derivation("noncog", ["xta", "I̱ta Ita"], defaultdict(str, {"lit":"flower river"})) #xochopa 'Alcozauca Mixtec <i>I̱ta Ita</i> (literally “flower river”)' >>> render_foreign_derivation("noncog", ["egy", "ḫt n ꜥnḫ", "", "grain, food"], defaultdict(str, {"lit":"wood/stick of life"})) 'Egyptian <i>ḫt n ꜥnḫ</i> (“grain, food”, literally “wood/stick of life”)' >>> render_foreign_derivation("cal", ["fr" , "en", "light year"], defaultdict(str, {"alt":"alt", "tr":"tr", "t":"t", "g":"m", "pos":"pos", "lit":"lit"})) 'Calque of English <i>alt</i> <i>m</i> (<i>tr</i>, “t”, pos, literally “lit”)' >>> render_foreign_derivation("pcal", ["en" , "de", "Leberwurst"], defaultdict(str, {"nocap":"1"})) 'partial calque of German <i>Leberwurst</i>' >>> render_foreign_derivation("sl", ["en", "ru", "пле́нум", "", "plenary session"], defaultdict(str, {"nocap":"1"})) 'semantic loan of Russian <i>пле́нум</i> (<i>plenum</i>, “plenary session”)' >>> render_foreign_derivation("learned borrowing", ["en", "la", "consanguineus"], defaultdict(str)) 'Learned borrowing from Latin <i>consanguineus</i>' >>> render_foreign_derivation("learned borrowing", ["en", "LL.", "trapezium"], defaultdict(str, {"notext":"1"})) 'Late Latin <i>trapezium</i>' >>> render_foreign_derivation("slbor", ["en", "fr", "mauvaise foi"], defaultdict(str, {"nocap":"1"})) 'semi-learned borrowing from French <i>mauvaise foi</i>' >>> render_foreign_derivation("obor", ["en", "ru", "СССР"], defaultdict(str)) 'Orthographic borrowing from Russian <i>СССР</i> (<i>SSSR</i>)' >>> render_foreign_derivation("unadapted borrowing", ["en", "ar", "قِيَاس", "", "measurement, analogy"], defaultdict(str)) 'Unadapted borrowing from Arabic <i>قِيَاس</i> (<i>qīās</i>, “measurement, analogy”)' >>> render_foreign_derivation("psm", ["en", "yue", "-"], defaultdict(str)) 'Phono-semantic matching of Cantonese' >>> render_foreign_derivation("translit", ["en", "ar", "عَالِيَة"], defaultdict(str)) 'Transliteration of Arabic <i>عَالِيَة</i> (<i>ʿālī</i>)' >>> render_foreign_derivation("back-form", ["en", "zero derivation"], defaultdict(str, {"nocap":"1"})) 'back-formation from <i>zero derivation</i>' >>> render_foreign_derivation("bf", ["en"], defaultdict(str)) 'Back-formation' >>> render_foreign_derivation("l", ["cs", "háček"], defaultdict(str)) 'háček' >>> render_foreign_derivation("l", ["en", "go", "went"], defaultdict(str)) 'went' >>> render_foreign_derivation("l", ["en", "God be with you"], defaultdict(str)) 'God be with you' >>> render_foreign_derivation("l", ["la", "similis"], defaultdict(str, {"t":"like"})) 'similis (“like”)' >>> render_foreign_derivation("l", ["la", "similis", "", "like"], defaultdict(str)) 'similis (“like”)' >>> render_foreign_derivation("l", ["mul", "☧", ""], defaultdict(str)) '☧' >>> render_foreign_derivation("l", ["ru", "ру́сский", "", "Russian"], defaultdict(str, {"g":"m"})) 'ру́сский <i>m</i> (<i>russkij</i>, “Russian”)' >>> render_foreign_derivation("link", ["en", "water vapour"], defaultdict(str)) 'water vapour' >>> render_foreign_derivation("ll", ["en", "cod"], defaultdict(str)) 'cod' >>> render_foreign_derivation("m", ["en", "more"], defaultdict(str)) '<b>more</b>' >>> render_foreign_derivation("m", ["enm", "us"], defaultdict(str)) '<i>us</i>' >>> render_foreign_derivation("m", ["ine-pro", "*h₁ed-"], defaultdict(str, {"t":"to eat"})) '<i>*h₁ed-</i> (“to eat”)' >>> render_foreign_derivation("m", ["ar", "عِرْق", "", "root"], defaultdict(str)) '<i>عِرْق</i> (<i>ʿrq</i>, “root”)' >>> render_foreign_derivation("m", ["pal"], defaultdict(str, {"tr":"ˀl'k'", "ts":"erāg", "t":"lowlands"})) "(<i>ˀl'k'</i> /erāg/, “lowlands”)" >>> render_foreign_derivation("m", ["ar", "عَرِيق", "", "deep-rooted"], defaultdict(str)) '<i>عَرِيق</i> (<i>ʿrīq</i>, “deep-rooted”)' >>> render_foreign_derivation("langname-mention", ["en", "-"], defaultdict(str)) 'English' >>> render_foreign_derivation("m+", ["en", "-"], defaultdict(str)) 'English' >>> render_foreign_derivation("m+", ["ja", "力車"], defaultdict(str, {"tr":"rikisha"})) 'Japanese <i>力車</i> (<i>rikisha</i>)' """ # noqa # Short path for the {{m|en|WORD}} template if tpl == "m" and len(parts) == 2 and parts[0] == "en" and not data: return strong(parts[1]) mentions = ( "back-formation", "back-form", "bf", "l", "link", "ll", "mention", "m", ) dest_lang_ignore = ( "cog", "cognate", "etyl", "langname-mention", "m+", "nc", "ncog", "noncog", "noncognate", *mentions, ) if tpl not in dest_lang_ignore: parts.pop(0) # Remove the destination language dst_locale = parts.pop(0) if tpl == "etyl" and parts: parts.pop(0) phrase = "" starter = "" word = "" if data["notext"] != "1": if tpl in ("calque", "cal", "clq"): starter = "calque of " elif tpl in ("partial calque", "pcal"): starter = "partial calque of " elif tpl in ("semantic loan", "sl"): starter = "semantic loan of " elif tpl in ("learned borrowing", "lbor"): starter = "learned borrowing from " elif tpl in ("semi-learned borrowing", "slbor"): starter = "semi-learned borrowing from " elif tpl in ("orthographic borrowing", "obor"): starter = "orthographic borrowing from " elif tpl in ("unadapted borrowing", "ubor"): starter = "unadapted borrowing from " elif tpl in ("phono-semantic matching", "psm"): starter = "phono-semantic matching of " elif tpl in ("transliteration", "translit"): starter = "transliteration of " elif tpl in ("back-formation", "back-form", "bf"): starter = "back-formation" if parts: starter += " from" phrase = starter if data["nocap"] == "1" else starter.capitalize() lang = langs.get(dst_locale, "") phrase += lang if tpl not in mentions else "" if parts: word = parts.pop(0) if word == "-": return phrase word = data["alt"] or word gloss = data["t"] or data["gloss"] if parts: word = parts.pop(0) or word # 4, alt= if tpl in ("l", "link", "ll"): phrase += f" {word}" elif word: phrase += f" {italic(word)}" if data["g"]: phrase += f' {italic(data["g"])}' trans = "" if not data["tr"]: trans = transliterate(dst_locale, word) if parts: gloss = parts.pop(0) # 5, t=, gloss= phrase += gloss_tr_poss(data, gloss, trans) return phrase.lstrip()
5,355,323
def create_from_ray(ray): """Converts a ray to a line. The line will extend from 'ray origin -> ray origin + ray direction'. :param numpy.array ray: The ray to convert. :rtype: numpy.array :return: A line beginning at the ray start and extending for 1 unit in the direction of the ray. """ # convert ray relative direction to absolute # position return np.array([ray[0], ray[0] + ray[1]], dtype=ray.dtype)
5,355,324
def cluster_sampling(sents: List[Sentence], tag_type: str, **kwargs) -> List[int]: """Cluster sampling. We create cluster sampling as a kind of diversity sampling method. Different with most of sampling methods that are based on sentence level, Cluster sampling method is implemented on entity level. Cluster sampling classify all entity into cluster, and find the centen in each cluster. We calculate the similarity between center and entity in the same cluster, the low similarity pair means high diversity. Args: sents (List[Sentence]): [description] tag_type (str): [description] Returns: List[int]: [description] """ label_names = kwargs["label_names"] if "O" in label_names: label_names.remove("O") embeddings = kwargs["embeddings"] embedding_dim = None # Get entities in each class, each entity has {sent_idx, token_idx, token_text, token_embedding} label_entity_list = [] for sent_idx, sent in enumerate(sents): if len(sent.get_spans("ner")) != 0: embeddings.embed(sent) for token_idx, token in enumerate(sent): tag = token.get_tag("ner") if ( tag.value == "O" ): # Skip if the "O" label. tag.value is the label name continue tag_info = { "sent_idx": sent_idx, "token_idx": token_idx, "token_text": token.text, "token_embedding": token.embedding, } if embedding_dim is None: embedding_dim = len(token.embedding.shape) - 1 label_entity_list.append(tag_info) # Get all entity embedding matrix entity_embedding_matrix = [tag["token_embedding"] for tag in label_entity_list] if entity_embedding_matrix == []: return random_sampling(sents) else: entity_embedding_matrix = stack(entity_embedding_matrix) # Clustering kmeans = KMeans(n_clusters=len(label_names)) kmeans.fit(entity_embedding_matrix) cluster_centers_matrix = kmeans.cluster_centers_ entity_labels = kmeans.labels_ # Find the center in matrix center_cluster_num = {} # {center_num_in_cluster: center_index_in_matrix} for i, token_matrix in enumerate(entity_embedding_matrix): for center_matrix in cluster_centers_matrix: if center_matrix == token_matrix: center_num_in_cluster = entity_labels[i] center_cluster_num[center_num_in_cluster] = i # Find the entity in each cluster label_entity_cluster = { cluster_num: {"cluster_center_idx": 0, "cluster_member_idx": []} for cluster_num in center_cluster_num.keys() } for cluster_num in label_entity_cluster.keys(): label_entity_cluster[cluster_num]["cluster_center"] = center_cluster_num[ cluster_num ] for i, entity_cluster_num in enumerate(entity_labels): if entity_cluster_num == cluster_num: label_entity_cluster[cluster_num]["cluster_member_idx"].append(i) # Calculate each the similarity between center and entities for cluster_num, cluster_info in label_entity_cluster.items(): center_idx = cluster_info["cluster_center_idx"] scores = [] for member_idx in cluster_info["cluster_member_idx"]: cos = nn.CosineSimilarity(dim=embedding_dim) cosine_score = cos( entity_embedding_matrix[center_idx], entity_embedding_matrix[member_idx] ) scores.append(cosine_score) label_entity_cluster["sim_scores"] = scores # Used for debug the order for cluster_num, cluster_info in label_entity_cluster.items(): cluster_member_idx = cluster_info["cluster_member_idx"] sim_scores = cluster_info["sim_scores"] cluster_info["sim_scores"] = [ x for _, x in sorted(zip(sim_scores, cluster_member_idx)) ] cluster_info["cluster_member_idx"] = sorted(sim_scores) # Flat the entity score entity_scores = [0] * len(label_entity_list) for cluster_num, cluster_info in label_entity_cluster.items(): for i, member_idx in enumerate(cluster_info["cluster_member_idx"]): entity_scores[member_idx] += cluster_info["sim_scores"][i] # Reorder the sentence index sentence_scores = [99] * len(sents) for entity_idx, entity_info in enumerate(label_entity_list): sent_idx = entity_info["sent_idx"] sentence_scores[sent_idx] += entity_scores[entity_idx] ascend_indices = np.argsort(sentence_scores) return ascend_indices
5,355,325
def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str] """Returns a list of directories extracted from the given list of paths.""" dir_names = set() for path in paths: while True: path = os.path.dirname(path) if not path or path == os.path.sep: break dir_names.add(path + os.path.sep) return sorted(dir_names)
5,355,326
def opts2dict(opts): """Converts options returned from an OptionParser into a dict""" ret = {} for k in dir(opts): if callable(getattr(opts, k)): continue if k.startswith('_'): continue ret[k] = getattr(opts, k) return ret
5,355,327
def alignment_scan_timing_system_start_images(image_numbers): """Configure timing system image_numbers: list of 1-based integers e.g. image_numbers = alignment_pass(1)""" nimages = len(image_numbers) # The detector trigger pulse at the beginning of the first image is to # dump zingers that may have accumuated on the CCD. This image is discarded. # An extra detector trigger is required after the last image, waitt = [align.waitt]*nimages+[align.waitt] burst_waitt = [0.012]*nimages+[0.012] burst_delay = [0]*nimages+[0] npulses = [align.npulses]*nimages+[align.npulses] laser_on = [0]*nimages+[0] ms_on = [1]*nimages+[0] xatt_on = [align.attenuate_xray]*nimages+[align.attenuate_xray] trans_on = [1]*nimages+[0] xdet_on = [1]*nimages+[1] xosct_on = [1]*nimages+[0] image_numbers = image_numbers+[image_numbers[-1]+1] ##timing_sequencer.inton_sync = 0 timing_system.image_number.count = 0 timing_system.pulses.count = 0 ##timing_sequencer.running = False timing_sequencer.acquire( waitt=waitt, burst_waitt=burst_waitt, burst_delay=burst_delay, npulses=npulses, laser_on=laser_on, ms_on=ms_on, xatt_on=xatt_on, trans_on=trans_on, xdet_on=xdet_on, xosct_on=xosct_on, image_numbers=image_numbers, )
5,355,328
def combined_directions(a_list, b_list): """ Takes two NoteList objects. Returns a list of (3)tuples each of the form: ( int: a dir, int: b dir, (int: bar #, float: beat #) ) """ onsets = note_onsets(a_list, b_list) a_dirs = directions(a_list) b_dirs = directions(b_list) dirs = {} for time in onsets: dirs[time] = (0, 0) for dir, time in a_dirs: dirs[time] = (dir, dirs[time][1]) for dir, time in b_dirs: dirs[time] = (dirs[time][0], dir) return [ (dirs[time][0], dirs[time][1], time) for time in onsets ]
5,355,329
def _naive_csh_seismology(l, m, theta, phi): """ Compute the spherical harmonics according to the seismology convention, in a naive way. This appears to be equal to the sph_harm function in scipy.special. """ return (lpmv(m, l, np.cos(theta)) * np.exp(1j * m * phi) * np.sqrt(((2 * l + 1) * factorial(l - m)) / (4 * np.pi * factorial(l + m))))
5,355,330
def is_room_valid(room): """Check if room is valid.""" _, names, checksum = room letters = defaultdict(int) complete_name = ''.join(names) for letter in complete_name: letters[letter] += 1 sorted_alphabetic = sorted(letters) sorted_by_occurrences = sorted( sorted_alphabetic, key=letters.__getitem__, reverse=True) return ''.join(sorted_by_occurrences).startswith(checksum)
5,355,331
def _get_attributes_entropy(dataset: FingerprintDataset, attributes: AttributeSet ) -> Dict[Attribute, float]: """Give a dictionary with the entropy of each attribute. Args: dataset: The fingerprint dataset used to compute the entropy. attributes: The attributes for which we compute the entropy. Raises: ValueError: There are attributes and the fingerprint dataset is empty. KeyError: An attribute is not in the fingerprint dataset. Returns: A dictionary with each attribute (Attribute) and its entropy. """ # Some checks before starting the exploration if attributes and dataset.dataframe.empty: raise ValueError('Cannot compute the entropy on an empty dataset.') for attribute in attributes: if attribute not in dataset.candidate_attributes: raise KeyError(f'The attribute {attribute} is not in the dataset.') # We will work on a dataset with only a fingerprint per browser to avoid # overcounting effects df_one_fp_per_browser = dataset.get_df_w_one_fp_per_browser() # If we execute on a single process if not params.getboolean('Multiprocessing', 'explorations'): logger.debug('Measuring the attributes entropy on a single process...') return _compute_attribute_entropy(df_one_fp_per_browser, attributes) # The dictionary to update when using multiprocessing logger.debug('Measuring the attributes entropy using multiprocessing...') attributes_entropy = {} # Infer the number of cores to use free_cores = params.getint('Multiprocessing', 'free_cores') nb_cores = max(cpu_count() - free_cores, 1) attributes_per_core = int(ceil(len(attributes)/nb_cores)) logger.debug(f'Sharing {len(attributes)} attributes over ' f'{nb_cores}(+{free_cores}) cores, hence ' f'{attributes_per_core} attributes per core.') def update_attributes_entropy(attrs_entropy: Dict[Attribute, float]): """Update the complete dictionary attributes_entropy. Args: attrs_size: The dictionary containing the subset of the results computed by a process. Note: This is executed by the main thread and does not pose any concurrency or synchronization problem. """ for attribute, attribute_entropy in attrs_entropy.items(): attributes_entropy[attribute] = attribute_entropy # Spawn a number of processes equal to the number of cores attributes_list = list(attributes) async_results = [] with Pool(processes=nb_cores) as pool: for process_id in range(nb_cores): # Generate the candidate attributes for this process start_id = process_id * attributes_per_core end_id = (process_id + 1) * attributes_per_core attributes_subset = AttributeSet(attributes_list[start_id:end_id]) async_result = pool.apply_async( _compute_attribute_entropy, args=(df_one_fp_per_browser, attributes_subset), callback=update_attributes_entropy) async_results.append(async_result) # Wait for all the processes to finish (otherwise we would exit # before collecting their result) for async_result in async_results: async_result.wait() return attributes_entropy
5,355,332
def _mag_shrink_hard(x, r, t): """ x is the input, r is the magnitude and t is the threshold """ gain = (r >= t).float() return x * gain
5,355,333
def plot_along(a, title=''): """Plot infos from a DataFrame created by run.along Arguments --------- a : Pandas DataFrame run.along output """ f, ax = plt.subplots(2, figsize=(16, 16), dpi= 80, )#wspace=0, hspace=0) x = a['xo'].values pc = a['pc'].values pn = a['pn'].values crl = a['crl'].values chisqr = a['chisqr'].values for i in ax[1:2]: i.grid() i.xaxis.label.set_size(15) i.yaxis.label.set_size(15) i.tick_params(labelsize=15) i.title.set_size(20) ax[0].plot(x, pc, 'k', lw=3, label='$P_c$') ax[0].plot(x, pn, '.6', lw=3, label='$P_n$') ax[0].fill_between(x, pc, pn, where=pc >= pn, alpha=.2, label='Dominantly Specular') ax[0].fill_between(x, pc, pn, where=pc <= pn, alpha=.2, label='Dominatly Diffuse') ax[0].set_title('RSR-derived Coherent and Incoherent Energies', fontweight="bold", fontsize=20) ax[0].set_ylabel('$[dB]$') ax[0].set_xlim(0, x.max()) ax[0].legend(loc=3, ncol=2, fontsize='large') ax_chisqr = ax[1].twinx() ax_chisqr.plot(x, chisqr, '.6', lw=3) ax_chisqr.set_ylabel('Chi-square', color='.6') ax_chisqr.yaxis.label.set_size(15) ax_chisqr.tick_params(labelsize=15) ax[1].plot(x, crl, 'k', lw=3) ax[1].set_title('Quality Metrics', fontweight="bold", fontsize=20) ax[1].set_ylabel('Correlation Coefficient') ax[1].set_xlim(0, x.max()) ax[1].set_ylim(0, 1.1) ax[1].legend(loc=3, ncol=2, fontsize='large') ax[1].set_xlabel('Bin #')
5,355,334
def is_macports_env(): """ Check if Python interpreter was installed via Macports command 'port'. :return: True if Macports else otherwise. """ # Python path prefix should start with Macports prefix. env_prefix = get_macports_prefix() if env_prefix and base_prefix.startswith(env_prefix): return True return False
5,355,335
async def test_async_remove_no_platform(hass): """Test async_remove method when no platform set.""" ent = entity.Entity() ent.hass = hass ent.entity_id = "test.test" await ent.async_update_ha_state() assert len(hass.states.async_entity_ids()) == 1 await ent.async_remove() assert len(hass.states.async_entity_ids()) == 0
5,355,336
def wiki_data(request, pro_id): """ 文章标题展示 """ data = models.Wiki.objects.filter(project_id=pro_id).values('id', 'title', 'parent_id').order_by('deepth') return JsonResponse({'status': True, 'data': list(data)})
5,355,337
def chain(*tasks): """ Given a number of tasks, builds a dependency chain. chain(task_1, task_2, task_3, task_4) is equivalent to task_1.set_downstream(task_2) task_2.set_downstream(task_3) task_3.set_downstream(task_4) """ for up_task, down_task in zip(tasks[:-1], tasks[1:]): up_task.set_downstream(down_task)
5,355,338
def print_epoch_progress(train_loss, val_loss, time_duration, train_metric, val_metric): """Print all the information after each epoch. :train_loss: average training loss :val_loss: average validation loss :time_duration: time duration for current epoch :train_metric_collects: a performance dictionary for training :val_metric_collects: a performance dictionary for validation :returns: None """ train_acc, val_acc = train_metric['accuracy'], val_metric['accuracy'] train_prec, val_prec = train_metric['precisions'], val_metric['precisions'] train_recalls, val_recalls = train_metric['recalls'], val_metric['recalls'] log_str = 'Train/Val| Loss: {:.4f}/{:.4f}|'.format(train_loss, val_loss) log_str += 'Acc: {:.4f}/{:.4f}|'.format(train_acc, val_acc) n_classes = len(train_prec) templ = 'Pr: ' + ', '.join(['{:.4f}'] * (n_classes-1)) + '/' log_str += templ.format(*(train_prec[1:].tolist())) templ = ', '.join(['{:.4f}'] * (n_classes-1)) + '|' log_str += templ.format(*(val_prec[1:].tolist())) templ = 'Re: ' + ', '.join(['{:.4f}'] * (n_classes - 1)) + '/' log_str += templ.format(*(train_recalls[1:].tolist())) templ = ', '.join(['{:.4f}'] * (n_classes - 1)) + '|' log_str += templ.format(*(val_recalls[1:].tolist())) log_str += 'T(s) {:.2f}'.format(time_duration) print(log_str)
5,355,339
def create_task_spec_def(): """Returns the a :class:`TaskSpecDef` based on the environment variables for distributed training. References ---------- - `ML-engine trainer considerations <https://cloud.google.com/ml-engine/docs/trainer-considerations#use_tf_config>`__ - `TensorPort Distributed Computing <https://www.tensorport.com/documentation/code-details/>`__ """ if 'TF_CONFIG' in os.environ: # TF_CONFIG is used in ML-engine env = json.loads(os.environ.get('TF_CONFIG', '{}')) task_data = env.get('task', None) or {'type': 'master', 'index': 0} cluster_data = env.get('cluster', None) or {'ps': None, 'worker': None, 'master': None} return TaskSpecDef( task_type=task_data['type'], index=task_data['index'], trial=task_data['trial'] if 'trial' in task_data else None, ps_hosts=cluster_data['ps'], worker_hosts=cluster_data['worker'], master=cluster_data['master'] if 'master' in cluster_data else None) elif 'JOB_NAME' in os.environ: # JOB_NAME, TASK_INDEX, PS_HOSTS, WORKER_HOSTS and MASTER_HOST are used in TensorPort return TaskSpecDef( task_type=os.environ['JOB_NAME'], index=os.environ['TASK_INDEX'], ps_hosts=os.environ.get('PS_HOSTS', None), worker_hosts=os.environ.get('WORKER_HOSTS', None), master=os.environ.get('MASTER_HOST', None)) else: raise Exception('You need to setup TF_CONFIG or JOB_NAME to define the task.')
5,355,340
def clkdirpwm_main(): """ Main routine for clkdirpwm commandline function. """ parser = optparse.OptionParser(usage=CLKDIRPWM_USAGE_STR) parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='verbose mode - print addition information', default=CMDLINE_DEFAULT_VERBOSE) parser.add_option('-w', '--wait', action='store_true', dest='wait', help='return only after sinewave outscan complete', default=CMDLINE_DEFAULT_WAIT) options, args = parser.parse_args() try: command = args[0].lower() except: print 'E: no command argument' sys.exit(1) if command=='read': read(options) elif command=='test': test(options) elif command=='reset': reset(options) elif command=='dfu-mode': dfu_mode(options) elif command=='pwm-to-default': set_pwm_to_default(options) else: print 'E: uknown command %s'%(command,) sys.exit(1)
5,355,341
def test_baked_query(n): """test a baked query of the full entity.""" bakery = baked.bakery() s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery(lambda s: s.query(Customer)) q += lambda q: q.filter(Customer.id == bindparam("id")) q(s).params(id=id_).one()
5,355,342
def many_capitalized_words(s): """Returns a function to check percentage of capitalized words. The function returns 1 if percentage greater then 65% and 0 otherwise. """ return 1 if capitalized_words_percent(s) > 66 else 0
5,355,343
def cmd_te_solution_build(abs_filename,wait=False,print_output=False,clear_output=False): """ソリューションをビルドする(テキストエディタ向け) ファイルが含まれるVisual Studioを探し出してソリューションをビルドする。 VisualStudioの「メニュー -> ビルド -> ソリューションのビルド」と同じ動作。 abs_filename- ファイル名の絶対パス (Ex.) c:/project/my_app/src/main.cpp wait - True ビルド終了まで待つ(完了復帰) False 即時復帰 print_output- True コンパイル結果をコンソールへ表示 False 何もしない clear_output- True VisualStudioの出力ウインドウをクリアする False 何もしない """ return _te_main(cmd_solution_build, abs_filename,wait,print_output,clear_output)
5,355,344
def main(paths, verbose, dry_run): """Rename filesystem entries to ASCII equivalent transliterations.""" for start_path in (os.path.expanduser(decode_filesystem_name(p)) for p in paths): if os.path.isdir(start_path): for root, dirs, files in os.walk(start_path): files = [decode_filesystem_name(f) for f in files] dirs = [decode_filesystem_name(d) for d in dirs] root = decode_filesystem_name(root) for relpath in sorted(dirs): transrename(os.path.join(root, relpath), verbose, dry_run) for relpath in sorted(files): transrename(os.path.join(root, relpath), verbose, dry_run) elif os.path.isfile(start_path): transrename(start_path, verbose, dry_run) else: raise ValueError('Invalid argument %r' % start_path)
5,355,345
def log_ratio_measure( segmented_topics, accumulator, normalize=False, with_std=False, with_support=False): """ If normalize=False: Popularly known as PMI. This function calculates the log-ratio-measure which is used by coherence measures such as c_v. This is defined as: m_lr(S_i) = log[(P(W', W*) + e) / (P(W') * P(W*))] If normalize=True: This function calculates the normalized-log-ratio-measure, popularly knowns as NPMI which is used by coherence measures such as c_v. This is defined as: m_nlr(S_i) = m_lr(S_i) / -log[P(W', W*) + e] Args: segmented_topics (list): Output from the segmentation module of the segmented topics. Is a list of list of tuples. accumulator: word occurrence accumulator from probability_estimation. with_std (bool): True to also include standard deviation across topic segment sets in addition to the mean coherence for each topic; default is False. with_support (bool): True to also include support across topic segments. The support is defined as the number of pairwise similarity comparisons were used to compute the overall topic coherence. Returns: list : of log ratio measure for each topic. """ topic_coherences = [] num_docs = float(accumulator.num_docs) for s_i in segmented_topics: segment_sims = [] for w_prime, w_star in s_i: w_prime_count = accumulator[w_prime] w_star_count = accumulator[w_star] co_occur_count = accumulator[w_prime, w_star] if normalize: # For normalized log ratio measure numerator = log_ratio_measure([[(w_prime, w_star)]], accumulator)[0] co_doc_prob = co_occur_count / num_docs m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON)) else: # For log ratio measure without normalization numerator = (co_occur_count / num_docs) + EPSILON denominator = (w_prime_count / num_docs) * (w_star_count / num_docs) m_lr_i = np.log(numerator / denominator) segment_sims.append(m_lr_i) topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support)) return topic_coherences
5,355,346
def _calculate_monthly_anomaly(data, apply_filter=False, base_period=None, lat_name=None, lon_name=None, time_name=None): """Calculate monthly anomalies at each grid point.""" # Ensure that the data provided is a data array data = rdu.ensure_data_array(data) # Get coordinate names lat_name = lat_name if lat_name is not None else rdu.get_lat_name(data) lon_name = lon_name if lon_name is not None else rdu.get_lon_name(data) time_name = time_name if time_name is not None else rdu.get_time_name(data) # Get subset of data to use for computing anomalies base_period = rdu.check_base_period( data, base_period=base_period, time_name=time_name) input_frequency = rdu.detect_frequency(data, time_name=time_name) if input_frequency not in ('daily', 'monthly'): raise RuntimeError( 'Can only calculate anomalies for daily or monthly data') if input_frequency == 'daily': data = data.resample({time_name: '1MS'}).mean() base_period_data = data.where( (data[time_name] >= base_period[0]) & (data[time_name] <= base_period[1]), drop=True) monthly_clim = base_period_data.groupby( base_period_data[time_name].dt.month).mean(time_name) monthly_anom = data.groupby(data[time_name].dt.month) - monthly_clim if apply_filter: monthly_anom = monthly_anom.rolling( {time_name: 3}).mean().dropna(time_name, how='all') # Approximate sampling frequency seconds_per_day = 60 * 60 * 24.0 fs = 1.0 / (seconds_per_day * 30) # Remove all modes with period greater than 7 years fmin = 1.0 / (seconds_per_day * 365.25 * 7) monthly_anom = _apply_fft_high_pass_filter( monthly_anom, fmin=fmin, fs=fs, detrend=True, time_name=time_name) return monthly_anom
5,355,347
def process_song(song_id): """ 歌曲id、歌曲名、歌手id、所属专辑id、歌词、评论数 process song information :param song_id: 歌曲id :return: 处理状态(True or False) """ log("正在处理歌曲:{}".format(song_id)) if db.hexists("song:" + song_id, "id"): log("有缓存(已做过处理),歌曲id:{}".format(song_id)) return True else: song_url = url_prefix + "song?id={}".format(song_id) song_html = process_url(song_url) song_content = pq(song_html) head_data = song_content(".cnt") song_name = head_data(".tit").text() # todo 增加多歌手的元素选取 sid = head_data("p:nth-child(2) a").attr("href").split("=")[1] album_id = head_data("p:nth-child(3) a").attr("href").split("=")[1] lyric = process_lyric_from_html(song_content) comment_count = head_data("#cnt_comment_count").text() data = { "id": song_id, "name": song_name, "singer_id": sid, "album_id": album_id, "lyric": lyric, "comment_count": comment_count } try: db.hmset("song:" + song_id, data) except Exception as e: log("song存入Redis时发生错误:{}".format(e)) return False log("歌曲{}({})处理完毕".format(song_id, song_name)) return True
5,355,348
def pip(requirements_file='requirements.txt'): """Run pip install.""" require('site_path') with cd(env.site_path): if exists(requirements_file): run('./bin/pip install -r {0}'.format(requirements_file))
5,355,349
def expand_path(path): """ Convert a path to an absolute path. This does home directory expansion, meaning a leading ~ or ~user is translated to the current or given user's home directory. Relative paths are relative to the current working directory. :param path: Relative or absolute path of file. :return: Absolute path """ return os.path.abspath(os.path.expanduser(path))
5,355,350
def oxe_system_alaw_to_mulaw(host, token, mode): """Summary Args: host (TYPE): Description token (TYPE): Description mode (TYPE): Description Returns: TYPE: Description """ payload = { 'T0_Mu_Law': mode } packages.urllib3.disable_warnings(packages.urllib3.exceptions.InsecureRequestWarning) try: modification = put( 'https://' + host + '/api/mgt/1.0/Node/1/System_Parameters/1/System_Parameters_2/1/System_/T0_Mu_Law', json=payload, headers=oxe_set_headers(token, 'PUT'), verify=False) except exceptions.RequestException as e: pprint(e) return modification.status_code
5,355,351
def test_makepipebranch(): """py.test for makepipebranch""" tdata = ( ( "p_branch", [ "BRANCH", "p_branch", 0.0, "", "Pipe:Adiabatic", "p_branch_pipe", "p_branch_pipe_inlet", "p_branch_pipe_outlet", "Bypass", ], [ "PIPE:ADIABATIC", "p_branch_pipe", "p_branch_pipe_inlet", "p_branch_pipe_outlet", ], ), # pb_name, branch_obj, pipe_obj ) for pb_name, branch_obj, pipe_obj in tdata: fhandle = StringIO("") idf = IDF(fhandle) result = hvacbuilder.makepipebranch(idf, pb_name) assert result.obj == branch_obj thepipe = idf.getobject("PIPE:ADIABATIC", result.Component_1_Name) assert thepipe.obj == pipe_obj
5,355,352
def update_weights(comment_weights, comment_usage): """Updates the weights used to upvote comments so that the actual voting power usage is equal to the estimated usage. """ desired_usage = 1.0 - VP_COMMENTS / 100.0 actual_usage = 1.0 - comment_usage / 100.0 scaler = np.log(desired_usage) / np.log(actual_usage) for category in comment_weights.keys(): comment_weights[category] *= scaler return comment_weights
5,355,353
def smaller2k(n): """ Returns power of 2 which is smaller than n. Handles negative numbers. """ if n == 0: return 0 if n < 0: return -2**math.ceil(math.log2(-n)) else: return 2**math.floor(math.log2(n))
5,355,354
def create_anime_image_data(anime): """Create (or load) a dict for each anime that has a high level CNN representation of the associated MAL image. Parameters: ----------- anime : Pandas dataframe the dataframe corresponding to the list of all anime in the dataset. Returns: -------- image_data : dict A dict where each title is a key and the CNN representation of its MAL image is the value. """ from PIL import Image from lxml import etree import sys import os.path import pickle import time import urllib.request import io import requests dir_path = os.path.dirname(os.path.realpath(__file__)) fname = dir_path + '/../data/image_data.p' if os.path.isfile(fname): print('Using cached image data.') return pickle.load(open(fname, 'rb')) # To import mynet from a directory below, I must add that directory to path sys.path.insert(0, dir_path + '/../') import tensorflow as tf from mynet import CaffeNet #MAL credentials username = 'username'; password = 'password' #Get the tensorflow model started images = tf.placeholder(tf.float32, [None, 224, 224, 3]) net = CaffeNet({'data':images}) sesh = tf.Session() sesh.run(tf.global_variables_initializer()) # Load the data net.load('mynet.npy', sesh) image_data = {} width, height = (225, 350) #all MAL images are this size new_width, new_height = (224, 224) left = int((width - new_width)/2) top = int((height - new_height)/2) right = (left+new_width) bottom = (top + new_height) # Now to actually construct the dataset for name in anime.name: #First, get the full anime XML from MAL's search query title = "+".join(name.split() ) query = 'https://%s:%[email protected]/api/anime/search.xml?q=%s' \ % (username, password, title) r = requests.get(query) #Make sure that the request goes through while r.status_code != requests.codes.ok: r = requests.get(query) time.sleep(1.0) # don't overload their server... #From the XML file, pull all images that fit the query doc = etree.fromstring(r.content) image = doc.xpath('.//image/text()') ''' For sake of simplicity, I assume that the first image, corresponding to the first matching response to the query, is what we want. This isn't strictly correct, but for my goals here it's good enough.''' URL = image[0] with urllib.request.urlopen(URL) as url: f = io.BytesIO(url.read()) img = Image.open(f, 'r') #Center crop image so it's 225x225x3, and convert to numpy. img = np.array(img.crop((left, top, right, bottom))) #Now use the Illustration2Vec pre-trained model to extract features. output = sesh.run(net.get_output(), feed_dict={images: img[None,:]}) image_data[name] = output print('Finished with ' + anime.name) pickle.dump(image_data, open(fname, 'wb')) sesh.close()
5,355,355
def add_template_to_graph(graph, template): """Add a template object and its edges to the graph Iterates through any tiles on the template and adds the relevant link as an edge. Also looks for additional references in the dependencies """ def analyse_images(): """Find all the image references and create edges """ for image in xml_parser.iterfind("control", "SIM"): if "image" in image: image["type"] = "link" image["link_type"] = "static image" graph.add_edge(template.guid, image["image"].lower(), attr_dict=image) for image in xml_parser.iterfind("form"): if "image" in image: image["type"] = "link" image["link_type"] = "background image" graph.add_edge(template.guid, image["image"].lower(), attr_dict=image) def analyse_tiles(): """Find all the tiles and creates edges to the objects they reference """ for tile in xml_parser.iterfind("control", "TIL"): tile["type"] = "tile" if not "entity" in tile and template.entity: tile["entity"] = template.entity if "template" in tile: graph.add_edge(template.guid, tile["template"].lower(), attr_dict=tile) if "formflow" in tile: graph.add_edge(template.guid, tile["formflow"].lower(), attr_dict=tile) if "command" in tile: entity = get_command_entity(tile["command"], tile["entity"]) command = "{}-{}".format(tile["command"], entity) graph.add_edge(template.guid, command, attr_dict=tile) if "image" in tile: graph.add_edge(template.guid, tile["image"].lower(), attr_dict=tile) if "property" in tile: reference = "{}-{}".format(tile["property"], template.entity) add_property_edge_if_exists(graph, template.guid, reference, tile) def analyse_captions(): """Find all the caption over-rides on the form """ cap_link = { "type": "link", "link_type": "caption override" } for caption, cap_dict in xml_parser.properties_by_name("caption").iteritems(): cap_dict.update({ "name": caption, "type": "caption" }) prop = cap_dict.get("property") cap_ref = "{}-{}".format(caption, prop) graph.add_node(cap_ref, cap_dict) graph.add_edge(template.guid, cap_ref, attr_dict=cap_link) if prop: prop_ref = "{}-{}".format(prop, template.entity) add_property_edge_if_exists(graph, template.guid, prop_ref, cap_link) add_property_edge_if_exists(graph, cap_ref, prop_ref, cap_link) def analyse_components(): """Find all the component references to other templates """ for component, comp_dict in xml_parser.properties_by_name("component").iteritems(): comp_dict.update({ "type": "link", "link_type": "component template" }) graph.add_edge(template.guid, component.lower(), attr_dict=comp_dict) def analyse_bindings(): """Find references to properties in various forms """ for prop, prop_dict in xml_parser.properties_by_name("property").iteritems(): reference = "{}-{}".format(prop, template.entity) prop_dict.update({ "type": "link", "link_type": "bound property" }) add_property_edge_if_exists(graph, template.guid, reference, prop_dict) prop_dict = { "type": "link", "link_type": "bound property" } for prop in xml_parser.control_properties("binding"): reference = "{}-{}".format(prop, template.entity) add_property_edge_if_exists(graph, template.guid, reference, prop_dict) cd_dict = { "type": "link", "link_type": "column definition" } for search_list, prop in xml_parser.search_list_properties("columns", "FieldName"): if search_list == "Global": index_name = prop.upper() graph.add_node(index_name) graph.add_edge(template.guid, index_name, attr_dict=cd_dict) else: reference = "{}-{}".format(prop, template.entity) add_property_edge_if_exists(graph, template.guid, reference, cd_dict) def analyse_formflows(): """Find references to formflows from various controls """ ff_dict = { "type": "link", "link_type": "formflow reference" } for formflow, _ in xml_parser.properties_by_name("formflow").iteritems(): graph.add_edge(template.guid, formflow, attr_dict=ff_dict) # main template processing FORMSTEP_LOOKUP[template.name] = template.guid graph.add_node(template.guid, template.map()) if template.data: xml_parser = XMLParser(template.data) analyse_images() analyse_tiles() analyse_captions() analyse_components() analyse_bindings() analyse_formflows()
5,355,356
def deferred_bots_for_alias(alias): """Returns a dict where the keys are bot names whose commands have an alias that conflicts with the provided alias, and the values are a list of prefixes that would cause that conflict.""" return { # TODO Support more prefixes than one config['name']: [config['prefix']] for config in CONFIG['deferral'] if alias.lower() in config['commands'] }
5,355,357
def test(): """ This is not usually called, other than for testing the API. """ head = {"request": "test/api/{}".format(API_KEY), "api_key": API_KEY} ret=requests.get(url,params=head) print(ret.text) print(ret.json()) print(ret.json()['message'])
5,355,358
def del_rel_path(paths: Set[str]) -> None: """Delete all relative :param:`paths` from current root/docs directory.""" for path in paths: log.debug("Deleting file in the path %s", path) root_dir.joinpath(path.lstrip("/")).unlink()
5,355,359
def modeify(intcode, i): """Apply a mode to a parameter""" j = i + 1 _opcode = opcode(intcode[i]) params = intcode[j: j + _opcode['param_count']] modes = _opcode['modes'] mode_covert = { 0: lambda x: intcode[x], # position mode 1: lambda x: x # immediate mode } output = [mode_covert[mode](param) for mode, param in zip(modes, params)] return output
5,355,360
def unwind(g, num): """Return <num> first elements from iterator <g> as array.""" return [next(g) for _ in range(num)]
5,355,361
def find_best_control(db, input_features, max_distance=200.0, debug=False, control_cache=None): """ Search all controls with AST vector magnitudes within max_distance and find the best hit (lowest product of AST*call distance) against suitable controls. Does not currently use literal distance for the calculation. Could be improved.... returns up to two hits representing the best and next best hits (although the latter may be None). """ assert db is not None origin_url = input_features.get('url', input_features.get('id')) # LEGACY: url field used to be named id field cited_on = input_features.get('origin', None) # report owning HTML page also if possible (useful for data analysis) origin_js_id = input_features.get("js_id", None) # ensure we can find the script directly without URL lookup if isinstance(origin_js_id, tuple) or isinstance(origin_js_id, list): # BUG FIXME: should not be a tuple but is... where is that coming from??? so... origin_js_id = origin_js_id[0] assert isinstance(origin_js_id, str) and len(origin_js_id) == 24 best_distance = float('Inf') input_ast_vector, ast_sum = calculate_ast_vector(input_features['statements_by_count']) # NB: UNweighted vector fcall_sum = sum(input_features['calls_by_count'].values()) best_control = BestControl(control_url='', origin_url=origin_url, cited_on=cited_on, sha256_matched=False, ast_dist=float('Inf'), function_dist=float('Inf'), literal_dist=0.0, diff_functions='', origin_js_id=origin_js_id) second_best_control = None # we open the distance to explore "near by" a little bit... but the scoring for these hits is unchanged if debug: print("find_best_control({})".format(origin_url)) plausible_controls = find_plausible_controls(db, ast_sum, fcall_sum, max_distance=max_distance) feasible_controls = find_feasible_controls(db, plausible_controls, debug=debug, control_cache=control_cache) for fc_tuple in feasible_controls: control, control_ast_sum, control_ast_vector, control_call_vector = fc_tuple # NB: unweighted ast vector assert isinstance(control, dict) assert control_ast_sum > 0 assert isinstance(control_ast_vector, list) control_url = control.get('origin') # compute what we can for now and if we can update it later we will. Otherwise the second_best control may have some fields not-computed new_distance, ast_dist, call_dist, diff_functions = distance(input_ast_vector, control_ast_vector, input_features['calls_by_count'], control_call_vector, debug=debug) if call_dist < 5.0 and new_distance > max_distance: print("WARNING: rejecting possibly feasible control due to bad total distance: {} {} {} {} {}".format(new_distance, ast_dist, call_dist, control_url, origin_url)) if new_distance < best_distance and new_distance <= max_distance: if debug: print("Got good distance {} for {} (was {}, max={})".format(new_distance, control_url, best_distance, max_distance)) new_control = BestControl(control_url=control_url, # control artefact from CDN (ground truth) origin_url=origin_url, # JS at spidered site origin_js_id=origin_js_id, cited_on=cited_on, sha256_matched=False, ast_dist=ast_dist, function_dist=call_dist, literal_dist=0.0, diff_functions=' '.join(diff_functions)) # NB: look at product of two distances before deciding to update best_* - hopefully this results in a lower false positive rate # (with accidental ast hits) as the number of controls in the database increases if best_control.is_better(new_control, max_distance=max_distance): second_dist = second_best_control.distance() if second_best_control is not None else 0.0 if second_best_control is None or second_dist > new_control.distance(): if debug: print("NOTE: improved second_best control was {} now is {}".format(second_best_control, new_control)) second_best_control = new_control # NB: dont update best_* since we dont consider this hit a replacement for current best_control else: best_distance = new_distance second_best_control = best_control best_control = new_control if best_distance < 0.00001: # small distance means we can try for a hash match against control? assert control_url == best_control.control_url hash_match = (control['sha256'] == input_features['sha256']) best_control.sha256_matched = hash_match break # save time since we've likely found the best control but this may mean next_best_control is not second best in rare cases else: if debug: print("Rejecting control {} ast_dist={} fcall_dist={} total={}".format(control['origin'], ast_dist, call_dist, new_distance)) # NB: literal fields in best_control/next_best_control are updated elsewhere... not here return (best_control, second_best_control)
5,355,362
def test_delete_multiple_objects(): """批量删除文件""" file_id = str(random.randint(0, 1000)) + str(random.randint(0, 1000)) file_name1 = "tmp" + file_id + "_delete1" file_name2 = "tmp" + file_id + "_delete2" response1 = client.put_object( Bucket=test_bucket, Key=file_name1, Body='A' * 1024 * 1024 ) assert response1 response2 = client.put_object( Bucket=test_bucket, Key=file_name2, Body='B' * 1024 * 1024 * 2 ) assert response2 objects = { "Quiet": "true", "Object": [ { "Key": file_name1 }, { "Key": file_name2 } ] } response = client.delete_objects( Bucket=test_bucket, Delete=objects )
5,355,363
def CPPComments(text): """Remove all C-comments and replace with C++ comments.""" # Keep the copyright header style. line_list = text.splitlines(True) copyright_list = line_list[0:10] code_list = line_list[10:] copy_text = ''.join(copyright_list) code_text = ''.join(code_list) # Remove */ for C-comments, don't care about trailing blanks. comment_end = re.compile(r'\n[ ]*\*/[ ]*') code_text = re.sub(comment_end, '', code_text) comment_end = re.compile(r'\*/') code_text = re.sub(comment_end, '', code_text) # Remove comment lines in the middle of comments, replace with C++ comments. comment_star = re.compile(r'(?<=\n)[ ]*(?!\*\w)\*[ ]*') code_text = re.sub(comment_star, r'// ', code_text) # Remove start of C comment and replace with C++ comment. comment_start = re.compile(r'/\*[ ]*\n') code_text = re.sub(comment_start, '', code_text) comment_start = re.compile(r'/\*[ ]*(.)') code_text = re.sub(comment_start, r'// \1', code_text) # Add copyright info. return copy_text + code_text
5,355,364
def get_bulk_statement( stmt_type, table_name, column_names, dicts=True, value_string="%s", odku=False ): """Get a SQL statement suitable for use with bulk execute functions Parameters ---------- stmt_type : str One of REPLACE, INSERT, or INSERT IGNORE. **Note:** Backend support for this varies. table_name : str Name of SQL table to use in statement column_names : list A list of column names to load dicts : bool, optional If true, assume the data will be a list of dict rows value_string : str, optional The parameter replacement string used by the underyling DB API odku : bool or list, optional If true, add ON DUPLICATE KEY UPDATE clause for all columns. If a list then only add it for the specified columns. **Note:** Backend support for this varies. Returns ------- sql : str The sql query string to use with bulk execute functions """ if not stmt_type.lower() in ("replace", "insert", "insert ignore"): raise AssertionError("Invalid statement type: %s" % stmt_type) columns_clause = ", ".join(["`%s`" % c for c in column_names]) if dicts: values_clause = ", ".join(["%%(%s)s" % c for c in column_names]) else: values_clause = ", ".join(["%s" % value_string for c in column_names]) sql = "%s INTO %s (%s) VALUES (%s)" % ( stmt_type, table_name, columns_clause, values_clause, ) if odku: odku_cols = column_names if isinstance(odku, (list, tuple)): odku_cols = odku odku_clause = ", ".join(["%s=VALUES(%s)" % (col, col) for col in odku_cols]) sql = sql + " ON DUPLICATE KEY UPDATE %s" % odku_clause return escape_string(sql)
5,355,365
def test_interfacegroup_construction_item(): """Check that we construct address groups when sub-props are not a list.""" interface = Interface(hostname="h1", interface="i1") interface_group = InterfaceGroup("g1", interfaces=[interface]) assert InterfaceGroup("g1", interfaces=interface) == interface_group
5,355,366
def map_remove_by_value_range(bin_name, value_start, value_end, return_type, inverted=False): """Creates a map_remove_by_value_range operation to be used with operate or operate_ordered The operation removes items, with values between value_start(inclusive) and value_end(exclusive) from the map Args: bin_name (str): The name of the bin containing the map. value_start: The start of the range of values to be removed. (Inclusive) value_end: The end of the range of values to be removed. (Exclusive) return_type (int): Value specifying what should be returned from the operation. This should be one of the aerospike.MAP_RETURN_* values. inverted (bool): If True, values outside of the specified range will be removed, and values inside of the range will be kept. Default: False Returns: A dictionary usable in operate or operate_ordered. The format of the dictionary should be considered an internal detail, and subject to change. """ op_dict = { OP_KEY: aerospike.OP_MAP_REMOVE_BY_VALUE_RANGE, BIN_KEY: bin_name, VALUE_KEY: value_start, RANGE_KEY: value_end, RETURN_TYPE_KEY: return_type, INVERTED_KEY: inverted } return op_dict
5,355,367
def insertions_sort(A): """Sort list of comparable elements into nondecreasing order""" for i in range(1, len(A)): # from 1 to n-1 curr = A[i] # current element to be possibly moved j = i # variable used to find correct index for current while ( j > 0 and A[j - 1] > curr ): # goes back and checks previous elements that are less than curr A[j] = A[j - 1] # moves the previous item up j -= 1 # decrements the index A[j] = curr
5,355,368
def make_access_shp(access_shp_path): """Create a 100x100 accessibility polygon shapefile with two access values. Args: access_shp_path (str): the path for the shapefile. Returns: None. """ srs = osr.SpatialReference() srs.ImportFromEPSG(26910) projection_wkt = srs.ExportToWkt() origin = (1180000, 690000) pos_x = origin[0] pos_y = origin[1] # Setup parameters for creating point shapefile fields = {'FID': ogr.OFTInteger64, 'ACCESS': ogr.OFTReal} attrs = [{'FID': 0, 'ACCESS': 0.2}, {'FID': 1, 'ACCESS': 1.0}] poly_geoms = { 'poly_1': [(pos_x, pos_y), (pos_x + 100, pos_y), (pos_x + 100, pos_y - 100 / 2.0), (pos_x, pos_y - 100 / 2.0), (pos_x, pos_y)], 'poly_2': [(pos_x, pos_y - 50.0), (pos_x + 100, pos_y - 50.0), (pos_x + 100, (pos_y - 50.0) - (100 / 2.0)), (pos_x, (pos_y - 50.0) - (100 / 2.0)), (pos_x, pos_y - 50.0)]} poly_geometries = [ Polygon(poly_geoms['poly_1']), Polygon(poly_geoms['poly_2'])] # Create point shapefile to use for testing input pygeoprocessing.shapely_geometry_to_vector( poly_geometries, access_shp_path, projection_wkt, 'ESRI Shapefile', fields=fields, attribute_list=attrs, ogr_geom_type=ogr.wkbPolygon)
5,355,369
async def async_setup_racelandshop_websockt_api(): """Set up WS API handlers.""" racelandshop = get_racelandshop() racelandshop.log.info("Setup task %s", RacelandshopSetupTask.WEBSOCKET) websocket_api.async_register_command(racelandshop.hass, racelandshop_settings) websocket_api.async_register_command(racelandshop.hass, racelandshop_config) websocket_api.async_register_command(racelandshop.hass, racelandshop_repositories) websocket_api.async_register_command(racelandshop.hass, racelandshop_repository) websocket_api.async_register_command(racelandshop.hass, racelandshop_repository_data) websocket_api.async_register_command(racelandshop.hass, check_local_path) websocket_api.async_register_command(racelandshop.hass, racelandshop_status) websocket_api.async_register_command(racelandshop.hass, racelandshop_removed) websocket_api.async_register_command(racelandshop.hass, acknowledge_critical_repository) websocket_api.async_register_command(racelandshop.hass, get_critical_repositories)
5,355,370
def rationalApproximation(points, N, tol=1e-3, lowest_order_only=True): """ Return rational approximations for a set of 2D points. For a set of points :math:`(x,y)` where :math:`0 < x,y \\leq1`, return all possible rational approximations :math:`(a,b,c) \\; a,b,c \\in \\mathbb{Z}` such that :math:`(x,y) \\approx (a/c, b/c)`. Arguments: points: 2D (L x 2) points to approximate N: max order Returns: ``dict``: Dictionary with ``points`` as *keys* and the corresponding ``set`` of tuples ``(a,b,c)`` as values. """ L,_ = points.shape # since this solutions assumes a>0, a 'quick' hack to also obtain solutions # with a < 0 is to flip the dimensions of the points and explore those # solutions as well points = np.vstack((points, np.fliplr(points))) solutions = defaultdict(set) sequences = {1: set(fareySequence(1))} for n in range(2, N+1): sequences[n] = set(fareySequence(n)) - sequences[n-1] for h,k in fareySequence(N,1): if 0 in (h,k): continue # print h,k for x,y in resonanceSequence(N, k): # avoid 0-solutions if 0 in (x,y): continue norm = np.sqrt(x**2+y**2) n = np.array([ y/norm, x/norm]) * np.ones_like(points) n[points[:,0] < h/k, 0] *= -1 # points approaching from the left # nomenclature inspired in http://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Vector_formulation ap = np.array([h/k, 0]) - points apn = np.zeros((1,L)) d = np.zeros_like(points) apn = np.sum(n*ap, 1, keepdims=True) d = ap - apn*n ## DON'T RETURN IMMEDIATELY; THERE MIGHT BE OTHER SOLUTIONS OF THE SAME ORDER indices, = np.nonzero(np.sqrt(np.sum(d*d,1)) <= tol) for i in indices: # print "h/k:", h , "/", k # print "point:", points[i,:] if points[i,0] >= h/k: if i<L: # print "non-flipped >= h/k" solutions[i].add((x,-y, h*x/k)) # print i, (x,-y, h*x/k) elif x*(-y)<0: # only consider solutions where (a,b) have different sign for the "flipped" points (the other solutions should have already been found for the non-flipped points) # print "flipped >= h/k" solutions[i-L].add((-y, x, h*x/k)) # print i-L, (-y, x, h*x/k) else: if i<L: # print "non-flipped < h/k" solutions[i].add((x, y, h*x/k)) # print i, (x, y, h*x/k) elif x*y>0: # only consider solutions where (a,b) have different sign for the "flipped" points (the other solutions should have already been found for the non-flipped points) # print "flipped < h/k" solutions[i-L].add((y, x, h*x/k)) # print i-L, (y, x, h*x/k) if lowest_order_only: # removed = 0 for k in solutions: # keep lowest order solutions only lowest_order = 2*N s = set([]) for sol in solutions[k]: K = abs(sol[0])+abs(sol[1])+abs(sol[2]) if K == lowest_order: s.add(sol) elif K < lowest_order: lowest_order = K # if len(s) > 0: # print("point: ({},{}) -> removing {} for {}".format(points[k,0], points[k,1], s, sol)) # removed += len(s) s = set([sol]) solutions[k] = s # print("Removed {} solutions".format(removed)) return solutions
5,355,371
def delete_group(group_id: int) -> None: """ 移除某个群的所有订阅 :param group_id: 群号 """ sub_list = get_group_sub(group_id) for lid in sub_list.keys(): delete_sub(lid, group_id)
5,355,372
def get_notebook_server_instance(try_use_existing=False): """Create a notebook server instance to use. Optionally attempting to re-use existing instances. """ pid = get_cache_pid() servers = list_running_servers() # If we already have a server, use that for server in servers: if server["pid"] == pid: return (server, None) # Otherwise, if we are allowed, try to piggyback on another session if try_use_existing and servers: return (servers[0], None) # Fine, I'll make my own server, with blackjack, and userhooks! try: server_process = subprocess.Popen(["jupyter", "notebook", "--no-browser"]) except OSError as err: raise RuntimeError("Failed to start server: {}".format(err)) print("Started Jupyter Notebook server pid {}".format(server_process.pid)) # wait for 1 second for server to come up sleep(1) server = None for retry in range(5): try: server = {s["pid"]: s for s in list_running_servers()}[server_process.pid] break except KeyError: # Sleep for increasing times to give server a chance to come up sleep(5) if server: return (server, server_process) # Don't leave orphans! server_process.kill() raise RuntimeError("Failed to acquire server instance after 25s")
5,355,373
def to_dict(doc, fields): """Warning: Using this convenience fn is probably not as efficient as the plain old manually building up a dict. """ def map_field(prop): val = getattr(doc, prop) if isinstance(val, list): return [(e.to_dict() if hasattr(e, 'to_dict') else e) for e in val] else: return val.to_dict() if hasattr(val, 'to_dict') else val return {f: map_field(f) for f in fields}
5,355,374
def _auto_wrap_external(real_env_creator): """Wrap an environment in the ExternalEnv interface if needed. Args: real_env_creator (fn): Create an env given the env_config. """ def wrapped_creator(env_config): real_env = real_env_creator(env_config) if not isinstance(real_env, (ExternalEnv, ExternalMultiAgentEnv)): logger.info( "The env you specified is not a supported (sub-)type of " "ExternalEnv. Attempting to convert it automatically to " "ExternalEnv." ) if isinstance(real_env, MultiAgentEnv): external_cls = ExternalMultiAgentEnv else: external_cls = ExternalEnv class ExternalEnvWrapper(external_cls): def __init__(self, real_env): super().__init__( observation_space=real_env.observation_space, action_space=real_env.action_space, ) def run(self): # Since we are calling methods on this class in the # client, run doesn't need to do anything. time.sleep(999999) return ExternalEnvWrapper(real_env) return real_env return wrapped_creator
5,355,375
def _get_binary_link_deps( base_path, name, linker_flags = (), allocator = "malloc", default_deps = True): """ Return a list of dependencies that should apply to *all* binary rules that link C/C++ code. This also creates a sanitizer configuration rule if necessary, so this function should not be called more than once for a given rule. Args: base_path: The package path name: The name of the rule linker_flags: If provided, flags to pass to allocator/converage/sanitizers to make sure proper dependent rules are generated. allocator: The allocator to use. This is generally set by a configuration option and retreived in alloctors.bzl default_deps: If set, add in a list of "default deps", dependencies that should generally be added to make sure binaries work consistently. e.g. common/init Returns: A list of `RuleTarget` structs that should be added as dependencies. """ deps = [] # If we're not using a sanitizer add allocator deps. if sanitizers.get_sanitizer() == None: deps.extend(allocators.get_allocator_deps(allocator)) # Add in any dependencies required for sanitizers. deps.extend(sanitizers.get_sanitizer_binary_deps()) deps.append( _create_sanitizer_configuration( base_path, name, linker_flags, ), ) # Add in any dependencies required for code coverage if coverage.get_coverage(): deps.extend(coverage.get_coverage_binary_deps()) # We link in our own implementation of `kill` to binaries (S110576). if default_deps: deps.append(_COMMON_INIT_KILL) return deps
5,355,376
def create_dummy_ligand(ligand, cut_idx=None): """ Takes mol object and splits it based on a primary amine such that the frags can connect to the tertiary amine on the Mo core. Args: cut_idx tuple(int): ligand (mol): Returns: ligands List(mol) : """ # TODO AllChem.ReplaceCore() could be used here instead # Initialize dummy mol dummy = Chem.MolFromSmiles("*") # Create explicit hydrogens ligand = Chem.AddHs(ligand) # Get the neigbouring bonds to the amine given by cut_idx atom = ligand.GetAtomWithIdx(cut_idx) # Create list of tuples that contain the amine idx a nd idx of neighbor. indices = [ (cut_idx, x.GetIdx()) for x in atom.GetNeighbors() if x.GetAtomicNum() != 1 ][0] # Get the bonds to the neighbors. bond = [] bond.append(ligand.GetBondBetweenAtoms(indices[0], indices[1]).GetIdx()) # Get the two fragments, the ligand and the NH2 frag = Chem.FragmentOnBonds(ligand, bond, addDummies=True, dummyLabels=[(1, 1)]) frags = Chem.GetMolFrags(frag, asMols=True, sanitizeFrags=False) # Pattern for NH2+dummy smart = "[1*][N]([H])([H])" patt = Chem.MolFromSmarts(smart) # Get the ligand that is not NH2 ligands = [struct for struct in frags if len(struct.GetSubstructMatches(patt)) == 0] return ligands[0]
5,355,377
def double2pointerToArray(ptr, n, m_sizes): """ Converts ctypes 2D array into a 2D numpy array. Arguments: ptr: [ctypes double pointer] n: [int] number of cameras m_sizes: [list] number of measurements for each camera Return: arr_list: [list of ndarrays] list of numpy arrays, each list entry containing data for individual cameras """ arr_list = [] # Go through every camera for i in range(n): # Init a new empty data array arr = np.zeros(shape=(m_sizes[i])) # Go through ctypes array and extract data for this camera for j in range(m_sizes[i]): arr[j] = ptr[i][j] # Add the data for this camera to the final list arr_list.append(arr) return arr_list
5,355,378
def test_loading_cached_properties(): """Load cached properties.""" loader = Loader(new_path_syntax=True) obj = loader.get_object_documentation("tests.fixtures.cached_properties:C") assert len(obj.children) == 1 assert obj.children[0].name == obj.children[0].docstring == "aaa" assert "cached" in obj.children[0].properties
5,355,379
def read_varint(stream: bytes): """ 读取 varint。 Args: stream (bytes): 字节流。 Returns: tuple[int, int],真实值和占用长度。 """ value = 0 position = 0 shift = 0 while True: if position >= len(stream): break byte = stream[position] value += (byte & 0b01111111) << shift if byte & 0b10000000 == 0: break position += 1 shift += 7 return value, position + 1
5,355,380
def plot_mass_hist(data, popup=False): """ Plots a histogram of the Natural Log value for mass of the Meteorite Landings dataset. Shows the figure if popup = true, and saves it if popup is false """ plt.close() strikes = data['strikes'] # Plots histogram with ln_mass column strikes['ln_mass'].hist(bins=MASS_BINS) # Labels plot plt.xlabel('Mass (ln grams)') plt.ylabel('Count') plt.title('Histogram of Mass (Natural Log Scale)') if popup: plt.show() else: plt.savefig('mass_hist.png')
5,355,381
def check_fragment_count(blob_list, fragmentId, log_path, _fragment_count_flag): """Count fragment images || フラグメント画像の枚数確認 Args: blob_list(list): list of file name registered in blob || azure.storage.blobのコンテナーへ登録されたファイル名のリスト fragmentId (list): list of fragment images (GUID) || フラグメント画像につけられたGUID のリスト log_path(str): output destination of log file || ログファイル出力先 _fragment_count_flag(bool): error flag || エラー発生フラグ """ # If the number of fragment images is different from the number of images saved to blob || フラグメント画像件数とazure.storage.blobコンテナへのファイル保存の件数が違う時 if len(fragmentId) != len(blob_list): for i in range(len(fragmentId)): for j in blob_list: if (fragmentId[i]) == j: break else: # Output error logs || エラーログ出力 genarate_logger(fragmentId[i] + ".jpg : Not being saved in azure.storage.blob. (check_fragment_count)", log_path) _fragment_count_flag = False
5,355,382
def get_arguments(): """ get commandline arguments """ # Parse command line arguments parser = argparse.ArgumentParser(description="P1 reader interface") parser.add_argument("--config-file", default=__file__.replace('.py', '.yml').replace('/bin/', '/etc/'), help="P1 config file, default %(default)s", metavar='FILE' ) parser.add_argument("--log", help="Set log level (default info)", choices=['debug', 'info', 'warning', 'error', 'critical'], default="info" ) parser.add_argument("--debug", action='store_true', help="debug mode" ) parser.add_argument('--version', action='version', version=__version__ ) arguments = parser.parse_args() return arguments
5,355,383
def any(array, mapFunc): """ Checks if any of the elements of array returns true, when applied on a function that returns a boolean. :param array: The array that will be checked, for if any of the elements returns true, when applied on the function. \t :type array: [mixed] \n :param mapFunc: The function that gives a boolean value, when applied on the element of the array. \t :type mapFunc: function \n :returns: Whether any of the elements of the array, returned true or not. \t :rtype: : bool \n """ for elem in array: if mapFunc(elem): return True return False
5,355,384
def qt_point_to_point(qt_point, unit=None): """Create a Point from a QPoint or QPointF Args: qt_point (QPoint or QPointF): The source point unit (Unit): An optional unit to convert values to in the output `Point`. If omitted, values in the output `Point` will be plain `int` or `float` values. Returns: Point """ if unit: return Point(qt_point.x(), qt_point.y()).to_unit(unit) else: return Point(qt_point.x(), qt_point.y())
5,355,385
def test_s3_bucket_policy(): """ To test that bucket policy is applied if passed in """ template = Template() policy = { "Version": "2012-10-17", "Statement": [ { "Sid": "AWSCloudTrailAclCheck20150319", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": "arn:aws:s3:::mycloudtrailamzs3" }, { "Sid": "AWSCloudTrailWrite20150319", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": Join('', ["arn:aws:s3:::", 'WithPolicy', "/cloudtrail/AWSLogs/", {'Ref': 'AWS::AccountId'}, "/*"]), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } } ] } s3_bucket = S3('WithPolicy', template, 'Private', policy) # Test policy exists assert_is(type(s3_bucket.s3_b_policy), s3.BucketPolicy) # Test that s3 bucket used for policy is right assert_equals(s3_bucket.s3_b_policy.Bucket, s3_bucket.s3_b.title)
5,355,386
def apiRequest(method, payload=None): """ Get request from vk server :param get: method for vkApi :param payload: parameters for vkApi :return: answer from vkApi """ if payload is None: payload = {} if not ('access_token' in payload): payload.update({'access_token': GROUP_TOKEN, 'v': V}) response = requests.post(BASE_URL + method, payload) data = json.loads(response.text) return data
5,355,387
def page_required_no_auth(f): """Full page, requires user to be logged out to access, otherwise redirects to main page.""" @wraps(f) def wrapper(*args, **kwargs): if "username" in session: return redirect("/") else: return f(*args, **kwargs) return wrapper
5,355,388
def parse_arguments(args): """ Parse all given arguments. :param args: list :return: argparse.Namespace """ parser = argparse.ArgumentParser( description=__description__, epilog="Example-usage in apache-config:\n" 'CustomLog "| /path/to/anonip.py ' '[OPTIONS] --output /path/to/log" ' "combined\n ", formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "-4", "--ipv4mask", metavar="INTEGER", help="truncate the last n bits (default: %(default)s)", type=lambda x: _validate_ipmask(x, 32), ) parser.set_defaults(ipv4mask=12) parser.add_argument( "-6", "--ipv6mask", type=lambda x: _validate_ipmask(x, 128), metavar="INTEGER", help="truncate the last n bits (default: %(default)s)", ) parser.set_defaults(ipv6mask=84) parser.add_argument( "-i", "--increment", metavar="INTEGER", type=lambda x: _validate_integer_ht_0(x), help="increment the IP address by n (default: %(default)s)", ) parser.set_defaults(increment=0) parser.add_argument("-o", "--output", metavar="FILE", help="file to write to") parser.add_argument( "--input", metavar="FILE", help="File or FIFO to read from (default: stdin)" ) parser.add_argument( "-c", "--column", metavar="INTEGER", dest="columns", nargs="+", type=lambda x: _validate_integer_ht_0(x), help="assume IP address is in column n (1-based indexed; default: 1)", ) parser.add_argument( "-l", "--delimiter", metavar="STRING", type=str, help='log delimiter (default: " ")', ) parser.add_argument( "--regex", metavar="STRING", nargs="+", help="regex for detecting IP addresses (use optionally instead of -c)", type=regex_arg_type, ) parser.add_argument( "-r", "--replace", metavar="STRING", help="replacement string in case address parsing fails (Example: 0.0.0.0)", ) parser.add_argument( "-p", "--skip-private", dest="skip_private", action="store_true", help="do not mask addresses in private ranges. " "See IANA Special-Purpose Address Registry.", ) parser.add_argument( "-d", "--debug", action="store_true", help="print debug messages" ) parser.add_argument("-v", "--version", action="version", version=__version__) args = parser.parse_args(args) if args.regex and (args.columns is not None or args.delimiter is not None): raise parser.error( 'Ambiguous arguments: When using "--regex", "-c" and "-l" can\'t be used.' ) if not args.regex and args.columns is None: args.columns = [1] if not args.regex and args.delimiter is None: args.delimiter = " " if args.regex: try: args.regex = re.compile(r"|".join(args.regex)) except re.error: # pragma: no cover raise argparse.ArgumentTypeError("Failed to compile concatenated regex!") return args
5,355,389
def p_xmlkv_opt_list(p): """wc_stringlist : wc_string""" p[0] = ParseTreeNode('EQ', raw='assign') p[0].add_child(p[1])
5,355,390
def generate_forward(): """ Generate dataset with forward method It tries to integrate random function. The integral may not be symbolically possible, or may contains invalid operators. In those cases, it returns None. """ formula = symbolic.fixed_init(15) integrated = sympy.integrate(formula, symbolic.x, meijerg=False) if symbolic.is_integral_valid(integrated): return (formula, integrated) else: return None
5,355,391
def exit_prompt(message=''): """Function to exit the program after prompting the use to press Enter""" if message != '': print(str(message)) input('\nPress [Enter] to exit...') sys.exit()
5,355,392
def load_agent(agent_args, domain_settings, experiment_settings): """ This function loads the agent from the results directory results/env_name/method_name/filename Args: experiment_settings Return: sarsa_lambda agent """ with open('results/' + experiment_settings['env'] + '/sarsa_lambda/agents/' + experiment_settings['filename'] + '.pkl', 'rb') as input: my_agent = pickle.load(input) return my_agent, None
5,355,393
def unpickle_robust(bytestr): """ robust unpickle of one byte string """ fin = BytesIO(bytestr) unpickler = robust_unpickler(fin) return unpickler.load()
5,355,394
def test_POST_log_entry_admin_user(test_client,test_login,): """ Ensure that when the user issues a POST request to the new_request page a log entry is inserted into the user action log table with the correct log event """ # Simulate pressing the "Setup samples" button data = dict( labname="Wang",correspondence_email="[email protected]", request_name="Demo request", description="This is a demo request", species="mouse",number_of_samples=2, sample_submit_button=True ) response = test_client.post(url_for('requests.new_request'), data=data, follow_redirects=True, ) log_event = db_admin.UserActionLog().fetch()[-1]['event'] assert log_event == '''ahoag POST request to route: "new_request()" in lightserv.requests.routes''' # assert b'Background Info' in response.data and b"Clearing setup" not in response.data
5,355,395
def build_ins_embed_branch(cfg, input_shape): """ Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. """ name = cfg.MODEL.INS_EMBED_HEAD.NAME return INS_EMBED_BRANCHES_REGISTRY.get(name)(cfg, input_shape)
5,355,396
def atl03sp(ipx_region, parm, asset=icesat2.DEFAULT_ASSET): """ Performs ATL03 subsetting in parallel on ATL03 data and returns photon segment data. See the `atl03sp <../api_reference/icesat2.html#atl03sp>`_ function for more details. Parameters ---------- ipx_region: Query icepyx region object defining the query of granules to be processed parms: dict parameters used to configure ATL03 subsetting (see `Parameters <../user_guide/ICESat-2.html#parameters>`_) asset: str data source asset (see `Assets <../user_guide/ICESat-2.html#assets>`_) Returns ------- list ATL03 segments (see `Photon Segments <../user_guide/ICESat-2.html#photon-segments>`_) """ try: version = ipx_region.product_version resources = ipx_region.avail_granules(ids=True)[0] except: logger.critical("must supply an icepyx query as region") return icesat2.__emptyframe() # try to get the subsetting region if ipx_region.extent_type in ('bbox','polygon'): parm.update({'poly': to_region(ipx_region)}) return icesat2.atl03sp(parm, asset, version=version, resources=resources)
5,355,397
def get_sample_activity_from_batch(activity_batch, idx=0): """Return layer activity for sample ``idx`` of an ``activity_batch``. """ return [(layer_act[0][idx], layer_act[1]) for layer_act in activity_batch]
5,355,398
def dump(location): """Run pg_dump.""" os.environ['PGPASSWORD'] = current_app.config['PG_PASSWORD'] pg_dump = current_app.config.get('PG_BIN_DIR') + 'pg_dump' subprocess.call(( pg_dump, '--host={}'.format(current_app.config['PG_HOST']), '--username={}'.format(current_app.config['PG_USERNAME']), '--format=c', current_app.config['PG_DB_NAME'], '--file=%s' % location, ))
5,355,399