text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Create a new model from raw data, like word frequencies, Brown clusters <END_TASK> <USER_TASK:> Description: def init_model( lang, output_dir, freqs_loc=None, clusters_loc=None, jsonl_loc=None, vectors_loc=None, prune_vectors=-1, ): """ Create a new model from raw data, like word frequencies, Brown clusters and word vectors. If vectors are provided in Word2Vec format, they can be either a .txt or zipped as a .zip or .tar.gz. """
if jsonl_loc is not None: if freqs_loc is not None or clusters_loc is not None: settings = ["-j"] if freqs_loc: settings.append("-f") if clusters_loc: settings.append("-c") msg.warn( "Incompatible arguments", "The -f and -c arguments are deprecated, and not compatible " "with the -j argument, which should specify the same " "information. Either merge the frequencies and clusters data " "into the JSONL-formatted file (recommended), or use only the " "-f and -c files, without the other lexical attributes.", ) jsonl_loc = ensure_path(jsonl_loc) lex_attrs = srsly.read_jsonl(jsonl_loc) else: clusters_loc = ensure_path(clusters_loc) freqs_loc = ensure_path(freqs_loc) if freqs_loc is not None and not freqs_loc.exists(): msg.fail("Can't find words frequencies file", freqs_loc, exits=1) lex_attrs = read_attrs_from_deprecated(freqs_loc, clusters_loc) with msg.loading("Creating model..."): nlp = create_model(lang, lex_attrs) msg.good("Successfully created model") if vectors_loc is not None: add_vectors(nlp, vectors_loc, prune_vectors) vec_added = len(nlp.vocab.vectors) lex_added = len(nlp.vocab) msg.good( "Sucessfully compiled vocab", "{} entries, {} vectors".format(lex_added, vec_added), ) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) return nlp
<SYSTEM_TASK:> Load the model, set up the pipeline and train the entity recognizer. <END_TASK> <USER_TASK:> Description: def main(model=None, output_dir=None, n_iter=100): """Load the model, set up the pipeline and train the entity recognizer."""
if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # create the built-in pipeline components and add them to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner, last=True) # otherwise, get it so we can add labels else: ner = nlp.get_pipe("ner") # add labels for _, annotations in TRAIN_DATA: for ent in annotations.get("entities"): ner.add_label(ent[2]) # get names of other pipes to disable them during training other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"] with nlp.disable_pipes(*other_pipes): # only train NER # reset and initialize the weights randomly – but only if we're # training a new model if model is None: nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update( texts, # batch of texts annotations, # batch of annotations drop=0.5, # dropout - make it harder to memorise data losses=losses, ) print("Losses", losses) # test the trained model for text, _ in TRAIN_DATA: doc = nlp(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) for text, _ in TRAIN_DATA: doc = nlp2(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
<SYSTEM_TASK:> Perform an update over a single batch of documents. <END_TASK> <USER_TASK:> Description: def make_update(model, docs, optimizer, drop=0.0, objective="L2"): """Perform an update over a single batch of documents. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. optimizer (callable): An optimizer. RETURNS loss: A float for the loss. """
predictions, backprop = model.begin_update(docs, drop=drop) loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective) backprop(gradients, sgd=optimizer) # Don't want to return a cupy object here # The gradients are modified in-place by the BERT MLM, # so we get an accurate loss return float(loss)
<SYSTEM_TASK:> Compute a mean-squared error loss between the documents' vectors and <END_TASK> <USER_TASK:> Description: def get_vectors_loss(ops, docs, prediction, objective="L2"): """Compute a mean-squared error loss between the documents' vectors and the prediction. Note that this is ripe for customization! We could compute the vectors in some other word, e.g. with an LSTM language model, or use some other type of objective. """
# The simplest way to implement this would be to vstack the # token.vector values, but that's a bit inefficient, especially on GPU. # Instead we fetch the index into the vectors table for each of our tokens, # and look them up all at once. This prevents data copying. ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs]) target = docs[0].vocab.vectors.data[ids] if objective == "L2": d_target = prediction - target loss = (d_target ** 2).sum() elif objective == "cosine": loss, d_target = get_cossim_loss(prediction, target) return loss, d_target
<SYSTEM_TASK:> Round large numbers as integers, smaller numbers as decimals. <END_TASK> <USER_TASK:> Description: def _smart_round(figure, width=10, max_decimal=4): """Round large numbers as integers, smaller numbers as decimals."""
n_digits = len(str(int(figure))) n_decimal = width - (n_digits + 1) if n_decimal <= 1: return str(int(figure)) else: n_decimal = min(n_decimal, max_decimal) format_str = "%." + str(n_decimal) + "f" return format_str % figure
<SYSTEM_TASK:> Detect base noun phrases. Works on both Doc and Span. <END_TASK> <USER_TASK:> Description: def noun_chunks(obj): """ Detect base noun phrases. Works on both Doc and Span. """
# It follows the logic of the noun chunks finder of English language, # adjusted to some Greek language special characteristics. # obj tag corrects some DEP tagger mistakes. # Further improvement of the models will eliminate the need for this tag. labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"] doc = obj.doc # Ensure works on both Doc and Span. np_deps = [doc.vocab.strings.add(label) for label in labels] conj = doc.vocab.strings.add("conj") nmod = doc.vocab.strings.add("nmod") np_label = doc.vocab.strings.add("NP") seen = set() for i, word in enumerate(obj): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.i in seen: continue if word.dep in np_deps: if any(w.i in seen for w in word.subtree): continue flag = False if word.pos == NOUN: # check for patterns such as γραμμή παραγωγής for potential_nmod in word.rights: if potential_nmod.dep == nmod: seen.update( j for j in range(word.left_edge.i, potential_nmod.i + 1) ) yield word.left_edge.i, potential_nmod.i + 1, np_label flag = True break if flag is False: seen.update(j for j in range(word.left_edge.i, word.i + 1)) yield word.left_edge.i, word.i + 1, np_label elif word.dep == conj: # covers the case: έχει όμορφα και έξυπνα παιδιά head = word.head while head.dep == conj and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: if any(w.i in seen for w in word.subtree): continue seen.update(j for j in range(word.left_edge.i, word.i + 1)) yield word.left_edge.i, word.i + 1, np_label
<SYSTEM_TASK:> Validate and convert arguments. Reused in Doc, Token and Span. <END_TASK> <USER_TASK:> Description: def get_ext_args(**kwargs): """Validate and convert arguments. Reused in Doc, Token and Span."""
default = kwargs.get("default") getter = kwargs.get("getter") setter = kwargs.get("setter") method = kwargs.get("method") if getter is None and setter is not None: raise ValueError(Errors.E089) valid_opts = ("default" in kwargs, method is not None, getter is not None) nr_defined = sum(t is True for t in valid_opts) if nr_defined != 1: raise ValueError(Errors.E083.format(nr_defined=nr_defined)) if setter is not None and not hasattr(setter, "__call__"): raise ValueError(Errors.E091.format(name="setter", value=repr(setter))) if getter is not None and not hasattr(getter, "__call__"): raise ValueError(Errors.E091.format(name="getter", value=repr(getter))) if method is not None and not hasattr(method, "__call__"): raise ValueError(Errors.E091.format(name="method", value=repr(method))) return (default, method, getter, setter)
<SYSTEM_TASK:> Return labels indicating the position of the word in the document. <END_TASK> <USER_TASK:> Description: def get_position_label(i, words, tags, heads, labels, ents): """Return labels indicating the position of the word in the document. """
if len(words) < 20: return "short-doc" elif i == 0: return "first-word" elif i < 10: return "early-word" elif i < 20: return "mid-word" elif i == len(words) - 1: return "last-word" else: return "late-word"
<SYSTEM_TASK:> Load a generic spaCy model and add the sentencizer for sentence tokenization <END_TASK> <USER_TASK:> Description: def load_default_model_sentencizer(lang): """ Load a generic spaCy model and add the sentencizer for sentence tokenization"""
loading_start = time.time() lang_class = get_lang_class(lang) nlp = lang_class() nlp.add_pipe(nlp.create_pipe('sentencizer')) loading_end = time.time() loading_time = loading_end - loading_start return nlp, loading_time, lang + "_default_" + 'sentencizer'
<SYSTEM_TASK:> Turn a list of errors into frequency-sorted tuples thresholded by a certain total number <END_TASK> <USER_TASK:> Description: def get_freq_tuples(my_list, print_total_threshold): """ Turn a list of errors into frequency-sorted tuples thresholded by a certain total number """
d = {} for token in my_list: d.setdefault(token, 0) d[token] += 1 return sorted(d.items(), key=operator.itemgetter(1), reverse=True)[:print_total_threshold]
<SYSTEM_TASK:> Heuristic to determine whether the treebank has blinded texts or not <END_TASK> <USER_TASK:> Description: def _contains_blinded_text(stats_xml): """ Heuristic to determine whether the treebank has blinded texts or not """
tree = ET.parse(stats_xml) root = tree.getroot() total_tokens = int(root.find('size/total/tokens').text) unique_lemmas = int(root.find('lemmas').get('unique')) # assume the corpus is largely blinded when there are less than 1% unique tokens return (unique_lemmas / total_tokens) < 0.01
<SYSTEM_TASK:> Fetch the txt files for all treebanks for a given set of languages <END_TASK> <USER_TASK:> Description: def fetch_all_treebanks(ud_dir, languages, corpus, best_per_language): """" Fetch the txt files for all treebanks for a given set of languages """
all_treebanks = dict() treebank_size = dict() for l in languages: all_treebanks[l] = [] treebank_size[l] = 0 for treebank_dir in ud_dir.iterdir(): if treebank_dir.is_dir(): for txt_path in treebank_dir.iterdir(): if txt_path.name.endswith('-ud-' + corpus + '.txt'): file_lang = txt_path.name.split('_')[0] if file_lang in languages: gold_path = treebank_dir / txt_path.name.replace('.txt', '.conllu') stats_xml = treebank_dir / "stats.xml" # ignore treebanks where the texts are not publicly available if not _contains_blinded_text(stats_xml): if not best_per_language: all_treebanks[file_lang].append(txt_path) # check the tokens in the gold annotation to keep only the biggest treebank per language else: with gold_path.open(mode='r', encoding='utf-8') as gold_file: gold_ud = conll17_ud_eval.load_conllu(gold_file) gold_tokens = len(gold_ud.tokens) if treebank_size[file_lang] < gold_tokens: all_treebanks[file_lang] = [txt_path] treebank_size[file_lang] = gold_tokens return all_treebanks
<SYSTEM_TASK:> Run an evaluation for each language with its specified models and treebanks <END_TASK> <USER_TASK:> Description: def run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks): """" Run an evaluation for each language with its specified models and treebanks """
print_header = True for tb_lang, treebank_list in treebanks.items(): print() print("Language", tb_lang) for text_path in treebank_list: print(" Evaluating on", text_path) gold_path = text_path.parent / (text_path.stem + '.conllu') print(" Gold data from ", gold_path) # nested try blocks to ensure the code can continue with the next iteration after a failure try: with gold_path.open(mode='r', encoding='utf-8') as gold_file: gold_ud = conll17_ud_eval.load_conllu(gold_file) for nlp, nlp_loading_time, nlp_name in models[tb_lang]: try: print(" Benchmarking", nlp_name) tmp_output_path = text_path.parent / str('tmp_' + nlp_name + '.conllu') run_single_eval(nlp, nlp_loading_time, nlp_name, text_path, gold_ud, tmp_output_path, out_file, print_header, check_parse, print_freq_tasks) print_header = False except Exception as e: print(" Ran into trouble: ", str(e)) except Exception as e: print(" Ran into trouble: ", str(e))
<SYSTEM_TASK:> Assemble all treebanks and models to run evaluations with. <END_TASK> <USER_TASK:> Description: def main(out_path, ud_dir, check_parse=False, langs=ALL_LANGUAGES, exclude_trained_models=False, exclude_multi=False, hide_freq=False, corpus='train', best_per_language=False): """" Assemble all treebanks and models to run evaluations with. When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality """
languages = [lang.strip() for lang in langs.split(",")] print_freq_tasks = [] if not hide_freq: print_freq_tasks = ['Tokens'] # fetching all relevant treebank from the directory treebanks = fetch_all_treebanks(ud_dir, languages, corpus, best_per_language) print() print("Loading all relevant models for", languages) models = dict() # multi-lang model multi = None if not exclude_multi and not check_parse: multi = load_model('xx_ent_wiki_sm', add_sentencizer=True) # initialize all models with the multi-lang model for lang in languages: models[lang] = [multi] if multi else [] # add default models if we don't want to evaluate parsing info if not check_parse: # Norwegian is 'nb' in spaCy but 'no' in the UD corpora if lang == 'no': models['no'].append(load_default_model_sentencizer('nb')) else: models[lang].append(load_default_model_sentencizer(lang)) # language-specific trained models if not exclude_trained_models: if 'de' in models: models['de'].append(load_model('de_core_news_sm')) if 'es' in models: models['es'].append(load_model('es_core_news_sm')) models['es'].append(load_model('es_core_news_md')) if 'pt' in models: models['pt'].append(load_model('pt_core_news_sm')) if 'it' in models: models['it'].append(load_model('it_core_news_sm')) if 'nl' in models: models['nl'].append(load_model('nl_core_news_sm')) if 'en' in models: models['en'].append(load_model('en_core_web_sm')) models['en'].append(load_model('en_core_web_md')) models['en'].append(load_model('en_core_web_lg')) if 'fr' in models: models['fr'].append(load_model('fr_core_news_sm')) models['fr'].append(load_model('fr_core_news_md')) with out_path.open(mode='w', encoding='utf-8') as out_file: run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks)
<SYSTEM_TASK:> Detect base noun phrases from a dependency parse. Works on both Doc and Span. <END_TASK> <USER_TASK:> Description: def noun_chunks(obj): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """
# this iterator extracts spans headed by NOUNs starting from the left-most # syntactic dependent until the NOUN itself for close apposition and # measurement construction, the span is sometimes extended to the right of # the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" # and not just "eine Tasse", same for "das Thema Familie". labels = [ "sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app", ] doc = obj.doc # Ensure works on both Doc and Span. np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") rbracket = 0 for i, word in enumerate(obj): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: rbracket = word.i + 1 # try to extend the span to the right # to capture close apposition/measurement constructions for rdep in doc[word.i].rights: if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app: rbracket = rdep.i + 1 yield word.left_edge.i, rbracket, np_label
<SYSTEM_TASK:> Wrap a model that should run on CPU, transferring inputs and outputs <END_TASK> <USER_TASK:> Description: def with_cpu(ops, model): """Wrap a model that should run on CPU, transferring inputs and outputs as necessary."""
model.to_cpu() def with_cpu_forward(inputs, drop=0.0): cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop) gpu_outputs = _to_device(ops, cpu_outputs) def with_cpu_backprop(d_outputs, sgd=None): cpu_d_outputs = _to_cpu(d_outputs) return backprop(cpu_d_outputs, sgd=sgd) return gpu_outputs, with_cpu_backprop return wrap(with_cpu_forward, model)
<SYSTEM_TASK:> Convert a model into a BERT-style masked language model <END_TASK> <USER_TASK:> Description: def masked_language_model(vocab, model, mask_prob=0.15): """Convert a model into a BERT-style masked language model"""
random_words = _RandomWords(vocab) def mlm_forward(docs, drop=0.0): mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob) mask = model.ops.asarray(mask).reshape((mask.shape[0], 1)) output, backprop = model.begin_update(docs, drop=drop) def mlm_backward(d_output, sgd=None): d_output *= 1 - mask return backprop(d_output, sgd=sgd) return output, mlm_backward return wrap(mlm_forward, model)
<SYSTEM_TASK:> Allocate model, using width from tensorizer in pipeline. <END_TASK> <USER_TASK:> Description: def begin_training(self, _=tuple(), pipeline=None, sgd=None, **kwargs): """Allocate model, using width from tensorizer in pipeline. gold_tuples (iterable): Gold-standard training data. pipeline (list): The pipeline the model is part of. """
if self.model is True: self.model = self.Model(pipeline[0].model.nO) link_vectors_to_models(self.vocab) if sgd is None: sgd = self.create_optimizer() return sgd
<SYSTEM_TASK:> Render SVG. <END_TASK> <USER_TASK:> Description: def render_svg(self, render_id, words, arcs): """Render SVG. render_id (int): Unique ID, typically index of document. words (list): Individual words and their tags. arcs (list): Individual arcs and their start, end, direction and label. RETURNS (unicode): Rendered SVG markup. """
self.levels = self.get_levels(arcs) self.highest_level = len(self.levels) self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke self.width = self.offset_x + len(words) * self.distance self.height = self.offset_y + 3 * self.word_spacing self.id = render_id words = [self.render_word(w["text"], w["tag"], i) for i, w in enumerate(words)] arcs = [ self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i) for i, a in enumerate(arcs) ] content = "".join(words) + "".join(arcs) return TPL_DEP_SVG.format( id=self.id, width=self.width, height=self.height, color=self.color, bg=self.bg, font=self.font, content=content, dir=self.direction, lang=self.lang, )
<SYSTEM_TASK:> Render individual arrow. <END_TASK> <USER_TASK:> Description: def render_arrow(self, label, start, end, direction, i): """Render individual arrow. label (unicode): Dependency label. start (int): Index of start word. end (int): Index of end word. direction (unicode): Arrow direction, 'left' or 'right'. i (int): Unique ID, typically arrow index. RETURNS (unicode): Rendered SVG markup. """
level = self.levels.index(end - start) + 1 x_start = self.offset_x + start * self.distance + self.arrow_spacing if self.direction == "rtl": x_start = self.width - x_start y = self.offset_y x_end = ( self.offset_x + (end - start) * self.distance + start * self.distance - self.arrow_spacing * (self.highest_level - level) / 4 ) if self.direction == "rtl": x_end = self.width - x_end y_curve = self.offset_y - level * self.distance / 2 if self.compact: y_curve = self.offset_y - level * self.distance / 6 if y_curve == 0 and len(self.levels) > 5: y_curve = -self.distance arrowhead = self.get_arrowhead(direction, x_start, y, x_end) arc = self.get_arc(x_start, y, y_curve, x_end) label_side = "right" if self.direction == "rtl" else "left" return TPL_DEP_ARCS.format( id=self.id, i=i, stroke=self.arrow_stroke, head=arrowhead, label=label, label_side=label_side, arc=arc, )
<SYSTEM_TASK:> Render individual arc. <END_TASK> <USER_TASK:> Description: def get_arc(self, x_start, y, y_curve, x_end): """Render individual arc. x_start (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. y_curve (int): Y-corrdinate of Cubic Bézier y_curve point. x_end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arc path ('d' attribute). """
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}" if self.compact: template = "M{x},{y} {x},{c} {e},{c} {e},{y}" return template.format(x=x_start, y=y, c=y_curve, e=x_end)
<SYSTEM_TASK:> Render individual arrow head. <END_TASK> <USER_TASK:> Description: def get_arrowhead(self, direction, x, y, end): """Render individual arrow head. direction (unicode): Arrow direction, 'left' or 'right'. x (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arrow head path ('d' attribute). """
if direction == "left": pos1, pos2, pos3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2) else: pos1, pos2, pos3 = ( end, end + self.arrow_width - 2, end - self.arrow_width + 2, ) arrowhead = ( pos1, y + 2, pos2, y - self.arrow_width, pos3, y - self.arrow_width, ) return "M{},{} L{},{} {},{}".format(*arrowhead)
<SYSTEM_TASK:> Calculate available arc height "levels". <END_TASK> <USER_TASK:> Description: def get_levels(self, arcs): """Calculate available arc height "levels". Used to calculate arrow heights dynamically and without wasting space. args (list): Individual arcs and their start, end, direction and label. RETURNS (list): Arc levels sorted from lowest to highest. """
levels = set(map(lambda arc: arc["end"] - arc["start"], arcs)) return sorted(list(levels))
<SYSTEM_TASK:> Render entities in text. <END_TASK> <USER_TASK:> Description: def render_ents(self, text, spans, title): """Render entities in text. text (unicode): Original text. spans (list): Individual entity spans and their start, end and label. title (unicode or None): Document title set in Doc.user_data['title']. """
markup = "" offset = 0 for span in spans: label = span["label"] start = span["start"] end = span["end"] entity = escape_html(text[start:end]) fragments = text[offset:start].split("\n") for i, fragment in enumerate(fragments): markup += escape_html(fragment) if len(fragments) > 1 and i != len(fragments) - 1: markup += "</br>" if self.ents is None or label.upper() in self.ents: color = self.colors.get(label.upper(), self.default_color) ent_settings = {"label": label, "text": entity, "bg": color} if self.direction == "rtl": markup += TPL_ENT_RTL.format(**ent_settings) else: markup += TPL_ENT.format(**ent_settings) else: markup += entity offset = end markup += escape_html(text[offset:]) markup = TPL_ENTS.format(content=markup, dir=self.direction) if title: markup = TPL_TITLE.format(title=title) + markup return markup
<SYSTEM_TASK:> Merge subtokens into a single token. <END_TASK> <USER_TASK:> Description: def merge_subtokens(doc, label="subtok"): """Merge subtokens into a single token. doc (Doc): The Doc object. label (unicode): The subtoken dependency label. RETURNS (Doc): The Doc object with merged subtokens. DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens """
merger = Matcher(doc.vocab) merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}]) matches = merger(doc) spans = [doc[start : end + 1] for _, start, end in matches] with doc.retokenize() as retokenizer: for span in spans: retokenizer.merge(span) return doc
<SYSTEM_TASK:> Returns mean score between tasks in pipeline that can be used for early stopping. <END_TASK> <USER_TASK:> Description: def _score_for_model(meta): """ Returns mean score between tasks in pipeline that can be used for early stopping. """
mean_acc = list() pipes = meta["pipeline"] acc = meta["accuracy"] if "tagger" in pipes: mean_acc.append(acc["tags_acc"]) if "parser" in pipes: mean_acc.append((acc["uas"] + acc["las"]) / 2) if "ner" in pipes: mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3) return sum(mean_acc) / len(mean_acc)
<SYSTEM_TASK:> Load pre-trained weights for the 'token-to-vector' part of the component <END_TASK> <USER_TASK:> Description: def _load_pretrained_tok2vec(nlp, loc): """Load pre-trained weights for the 'token-to-vector' part of the component models, which is typically a CNN. See 'spacy pretrain'. Experimental. """
with loc.open("rb") as file_: weights_data = file_.read() loaded = [] for name, component in nlp.pipeline: if hasattr(component, "model") and hasattr(component.model, "tok2vec"): component.tok2vec.from_bytes(weights_data) loaded.append(name) return loaded
<SYSTEM_TASK:> Convert conllu files into JSON format for use with train cli. <END_TASK> <USER_TASK:> Description: def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None): """ Convert conllu files into JSON format for use with train cli. use_morphology parameter enables appending morphology to tags, which is useful for languages such as Spanish, where UD tags are not so rich. Extract NER tags if available and convert them so that they follow BILUO and the Wikipedia scheme """
# by @dvsrepo, via #11 explosion/spacy-dev-resources # by @katarkor docs = [] sentences = [] conll_tuples = read_conllx(input_data, use_morphology=use_morphology) checked_for_ner = False has_ner_tags = False for i, (raw_text, tokens) in enumerate(conll_tuples): sentence, brackets = tokens[0] if not checked_for_ner: has_ner_tags = is_ner(sentence[5][0]) checked_for_ner = True sentences.append(generate_sentence(sentence, has_ner_tags)) # Real-sized documents could be extracted using the comments on the # conluu document if len(sentences) % n_sents == 0: doc = create_doc(sentences, i) docs.append(doc) sentences = [] return docs
<SYSTEM_TASK:> Check the 10th column of the first token to determine if the file contains <END_TASK> <USER_TASK:> Description: def is_ner(tag): """ Check the 10th column of the first token to determine if the file contains NER tags """
tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag) if tag_match: return True elif tag == "O": return True else: return False
<SYSTEM_TASK:> Load the model, set up the pipeline and train the parser. <END_TASK> <USER_TASK:> Description: def main(model=None, output_dir=None, n_iter=15): """Load the model, set up the pipeline and train the parser."""
if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # We'll use the built-in dependency parser class, but we want to create a # fresh instance – just in case. if "parser" in nlp.pipe_names: nlp.remove_pipe("parser") parser = nlp.create_pipe("parser") nlp.add_pipe(parser, first=True) for text, annotations in TRAIN_DATA: for dep in annotations.get("deps", []): parser.add_label(dep) other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"] with nlp.disable_pipes(*other_pipes): # only train parser optimizer = nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, losses=losses) print("Losses", losses) # test the trained model test_model(nlp) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) test_model(nlp2)
<SYSTEM_TASK:> Get a pipeline component for a given component name. <END_TASK> <USER_TASK:> Description: def get_pipe(self, name): """Get a pipeline component for a given component name. name (unicode): Name of pipeline component to get. RETURNS (callable): The pipeline component. DOCS: https://spacy.io/api/language#get_pipe """
for pipe_name, component in self.pipeline: if pipe_name == name: return component raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
<SYSTEM_TASK:> Replace a component in the pipeline. <END_TASK> <USER_TASK:> Description: def replace_pipe(self, name, component): """Replace a component in the pipeline. name (unicode): Name of the component to replace. component (callable): Pipeline component. DOCS: https://spacy.io/api/language#replace_pipe """
if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) self.pipeline[self.pipe_names.index(name)] = (name, component)
<SYSTEM_TASK:> Rename a pipeline component. <END_TASK> <USER_TASK:> Description: def rename_pipe(self, old_name, new_name): """Rename a pipeline component. old_name (unicode): Name of the component to rename. new_name (unicode): New name of the component. DOCS: https://spacy.io/api/language#rename_pipe """
if old_name not in self.pipe_names: raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names)) if new_name in self.pipe_names: raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names)) i = self.pipe_names.index(old_name) self.pipeline[i] = (new_name, self.pipeline[i][1])
<SYSTEM_TASK:> Remove a component from the pipeline. <END_TASK> <USER_TASK:> Description: def remove_pipe(self, name): """Remove a component from the pipeline. name (unicode): Name of the component to remove. RETURNS (tuple): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe """
if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) return self.pipeline.pop(self.pipe_names.index(name))
<SYSTEM_TASK:> Make a "rehearsal" update to the models in the pipeline, to prevent <END_TASK> <USER_TASK:> Description: def rehearse(self, docs, sgd=None, losses=None, config=None): """Make a "rehearsal" update to the models in the pipeline, to prevent forgetting. Rehearsal updates run an initial copy of the model over some data, and update the model so its current predictions are more like the initial ones. This is useful for keeping a pre-trained model on-track, even if you're updating it with a smaller set of examples. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. EXAMPLE: >>> raw_text_batches = minibatch(raw_texts) >>> for labelled_batch in minibatch(zip(train_docs, train_golds)): >>> docs, golds = zip(*train_docs) >>> nlp.update(docs, golds) >>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)] >>> nlp.rehearse(raw_batch) """
# TODO: document if len(docs) == 0: return if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer(Model.ops) sgd = self._optimizer docs = list(docs) for i, doc in enumerate(docs): if isinstance(doc, basestring_): docs[i] = self.make_doc(doc) pipes = list(self.pipeline) random.shuffle(pipes) if config is None: config = {} grads = {} def get_grads(W, dW, key=None): grads[key] = (W, dW) get_grads.alpha = sgd.alpha get_grads.b1 = sgd.b1 get_grads.b2 = sgd.b2 for name, proc in pipes: if not hasattr(proc, "rehearse"): continue grads = {} proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {})) for key, (W, dW) in grads.items(): sgd(W, dW, key=key) return losses
<SYSTEM_TASK:> Can be called before training to pre-process gold data. By default, <END_TASK> <USER_TASK:> Description: def preprocess_gold(self, docs_golds): """Can be called before training to pre-process gold data. By default, it handles nonprojectivity and adds missing tags to the tag map. docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects. YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects. """
for name, proc in self.pipeline: if hasattr(proc, "preprocess_gold"): docs_golds = proc.preprocess_gold(docs_golds) for doc, gold in docs_golds: yield doc, gold
<SYSTEM_TASK:> Allocate models, pre-process training data and acquire a trainer and <END_TASK> <USER_TASK:> Description: def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg): """Allocate models, pre-process training data and acquire a trainer and optimizer. Used as a contextmanager. get_gold_tuples (function): Function returning gold data component_cfg (dict): Config parameters for specific components. **cfg: Config parameters. RETURNS: An optimizer. DOCS: https://spacy.io/api/language#begin_training """
if get_gold_tuples is None: get_gold_tuples = lambda: [] # Populate vocab else: for _, annots_brackets in get_gold_tuples(): for annots, _ in annots_brackets: for word in annots[1]: _ = self.vocab[word] # noqa: F841 if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd if component_cfg is None: component_cfg = {} for name, proc in self.pipeline: if hasattr(proc, "begin_training"): kwargs = component_cfg.get(name, {}) kwargs.update(cfg) proc.begin_training( get_gold_tuples, pipeline=self.pipeline, sgd=self._optimizer, **kwargs ) return self._optimizer
<SYSTEM_TASK:> Continue training a pre-trained model. <END_TASK> <USER_TASK:> Description: def resume_training(self, sgd=None, **cfg): """Continue training a pre-trained model. Create and return an optimizer, and initialize "rehearsal" for any pipeline component that has a .rehearse() method. Rehearsal is used to prevent models from "forgetting" their initialised "knowledge". To perform rehearsal, collect samples of text you want the models to retain performance on, and call nlp.rehearse() with a batch of Doc objects. """
if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd for name, proc in self.pipeline: if hasattr(proc, "_rehearsal_model"): proc._rehearsal_model = deepcopy(proc.model) return self._optimizer
<SYSTEM_TASK:> Replace weights of models in the pipeline with those provided in the <END_TASK> <USER_TASK:> Description: def use_params(self, params, **cfg): """Replace weights of models in the pipeline with those provided in the params dictionary. Can be used as a contextmanager, in which case, models go back to their original weights after the block. params (dict): A dictionary of parameters keyed by model ID. **cfg: Config parameters. EXAMPLE: >>> with nlp.use_params(optimizer.averages): >>> nlp.to_disk('/tmp/checkpoint') """
contexts = [ pipe.use_params(params) for name, pipe in self.pipeline if hasattr(pipe, "use_params") ] # TODO: Having trouble with contextlib # Workaround: these aren't actually context managers atm. for context in contexts: try: next(context) except StopIteration: pass yield for context in contexts: try: next(context) except StopIteration: pass
<SYSTEM_TASK:> Process texts as a stream, and yield `Doc` objects in order. <END_TASK> <USER_TASK:> Description: def pipe( self, texts, as_tuples=False, n_threads=-1, batch_size=1000, disable=[], cleanup=False, component_cfg=None, ): """Process texts as a stream, and yield `Doc` objects in order. texts (iterator): A sequence of texts to process. as_tuples (bool): If set to True, inputs should be a sequence of (text, context) tuples. Output will then be a sequence of (doc, context) tuples. Defaults to False. batch_size (int): The number of texts to buffer. disable (list): Names of the pipeline components to disable. cleanup (bool): If True, unneeded strings are freed to control memory use. Experimental. component_cfg (dict): An optional dictionary with extra keyword arguments for specific components. YIELDS (Doc): Documents in the order of the original text. DOCS: https://spacy.io/api/language#pipe """
if n_threads != -1: deprecation_warning(Warnings.W016) if as_tuples: text_context1, text_context2 = itertools.tee(texts) texts = (tc[0] for tc in text_context1) contexts = (tc[1] for tc in text_context2) docs = self.pipe( texts, batch_size=batch_size, disable=disable, component_cfg=component_cfg, ) for doc, context in izip(docs, contexts): yield (doc, context) return docs = (self.make_doc(text) for text in texts) if component_cfg is None: component_cfg = {} for name, proc in self.pipeline: if name in disable: continue kwargs = component_cfg.get(name, {}) # Allow component_cfg to overwrite the top-level kwargs. kwargs.setdefault("batch_size", batch_size) if hasattr(proc, "pipe"): docs = proc.pipe(docs, **kwargs) else: # Apply the function, but yield the doc docs = _pipe(proc, docs, kwargs) # Track weakrefs of "recent" documents, so that we can see when they # expire from memory. When they do, we know we don't need old strings. # This way, we avoid maintaining an unbounded growth in string entries # in the string store. recent_refs = weakref.WeakSet() old_refs = weakref.WeakSet() # Keep track of the original string data, so that if we flush old strings, # we can recover the original ones. However, we only want to do this if we're # really adding strings, to save up-front costs. original_strings_data = None nr_seen = 0 for doc in docs: yield doc if cleanup: recent_refs.add(doc) if nr_seen < 10000: old_refs.add(doc) nr_seen += 1 elif len(old_refs) == 0: old_refs, recent_refs = recent_refs, old_refs if original_strings_data is None: original_strings_data = list(self.vocab.strings) else: keys, strings = self.vocab.strings._cleanup_stale_strings( original_strings_data ) self.vocab._reset_cache(keys, strings) self.tokenizer._reset_cache(keys) nr_seen = 0
<SYSTEM_TASK:> Save the current state to a directory. If a model is loaded, this <END_TASK> <USER_TASK:> Description: def to_disk(self, path, exclude=tuple(), disable=None): """Save the current state to a directory. If a model is loaded, this will include the model. path (unicode or Path): Path to a directory, which will be created if it doesn't exist. exclude (list): Names of components or serialization fields to exclude. DOCS: https://spacy.io/api/language#to_disk """
if disable is not None: deprecation_warning(Warnings.W014) exclude = disable path = util.ensure_path(path) serializers = OrderedDict() serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(p, exclude=["vocab"]) serializers["meta.json"] = lambda p: p.open("w").write(srsly.json_dumps(self.meta)) for name, proc in self.pipeline: if not hasattr(proc, "name"): continue if name in exclude: continue if not hasattr(proc, "to_disk"): continue serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"]) serializers["vocab"] = lambda p: self.vocab.to_disk(p) util.to_disk(path, serializers, exclude)
<SYSTEM_TASK:> Loads state from a directory. Modifies the object in place and <END_TASK> <USER_TASK:> Description: def from_disk(self, path, exclude=tuple(), disable=None): """Loads state from a directory. Modifies the object in place and returns it. If the saved `Language` object contains a model, the model will be loaded. path (unicode or Path): A path to a directory. exclude (list): Names of components or serialization fields to exclude. RETURNS (Language): The modified `Language` object. DOCS: https://spacy.io/api/language#from_disk """
if disable is not None: deprecation_warning(Warnings.W014) exclude = disable path = util.ensure_path(path) deserializers = OrderedDict() deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p)) deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self) deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"]) for name, proc in self.pipeline: if name in exclude: continue if not hasattr(proc, "from_disk"): continue deserializers[name] = lambda p, proc=proc: proc.from_disk(p, exclude=["vocab"]) if not (path / "vocab").exists() and "vocab" not in exclude: # Convert to list here in case exclude is (default) tuple exclude = list(exclude) + ["vocab"] util.from_disk(path, deserializers, exclude) self._path = path return self
<SYSTEM_TASK:> Serialize the current state to a binary string. <END_TASK> <USER_TASK:> Description: def to_bytes(self, exclude=tuple(), disable=None, **kwargs): """Serialize the current state to a binary string. exclude (list): Names of components or serialization fields to exclude. RETURNS (bytes): The serialized form of the `Language` object. DOCS: https://spacy.io/api/language#to_bytes """
if disable is not None: deprecation_warning(Warnings.W014) exclude = disable serializers = OrderedDict() serializers["vocab"] = lambda: self.vocab.to_bytes() serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"]) serializers["meta.json"] = lambda: srsly.json_dumps(self.meta) for name, proc in self.pipeline: if name in exclude: continue if not hasattr(proc, "to_bytes"): continue serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"]) exclude = util.get_serialization_exclude(serializers, exclude, kwargs) return util.to_bytes(serializers, exclude)
<SYSTEM_TASK:> Load state from a binary string. <END_TASK> <USER_TASK:> Description: def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs): """Load state from a binary string. bytes_data (bytes): The data to load from. exclude (list): Names of components or serialization fields to exclude. RETURNS (Language): The `Language` object. DOCS: https://spacy.io/api/language#from_bytes """
if disable is not None: deprecation_warning(Warnings.W014) exclude = disable deserializers = OrderedDict() deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b)) deserializers["vocab"] = lambda b: self.vocab.from_bytes(b) and _fix_pretrained_vectors_name(self) deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(b, exclude=["vocab"]) for name, proc in self.pipeline: if name in exclude: continue if not hasattr(proc, "from_bytes"): continue deserializers[name] = lambda b, proc=proc: proc.from_bytes(b, exclude=["vocab"]) exclude = util.get_serialization_exclude(deserializers, exclude, kwargs) util.from_bytes(bytes_data, deserializers, exclude) return self
<SYSTEM_TASK:> Restore the pipeline to its state when DisabledPipes was created. <END_TASK> <USER_TASK:> Description: def restore(self): """Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)] if unexpected: # Don't change the pipeline if we're raising an error. self.nlp.pipeline = current raise ValueError(Errors.E008.format(names=unexpected)) self[:] = []
<SYSTEM_TASK:> Yields all rules import paths. <END_TASK> <USER_TASK:> Description: def get_rules_import_paths(): """Yields all rules import paths. :rtype: Iterable[Path] """
# Bundled rules: yield Path(__file__).parent.joinpath('rules') # Rules defined by user: yield settings.user_dir.joinpath('rules') # Packages with third-party rules: for path in sys.path: for contrib_module in Path(path).glob('thefuck_contrib_*'): contrib_rules = contrib_module.joinpath('rules') if contrib_rules.is_dir(): yield contrib_rules
<SYSTEM_TASK:> Returns all enabled rules. <END_TASK> <USER_TASK:> Description: def get_rules(): """Returns all enabled rules. :rtype: [Rule] """
paths = [rule_path for path in get_rules_import_paths() for rule_path in sorted(path.glob('*.py'))] return sorted(get_loaded_rules(paths), key=lambda rule: rule.priority)
<SYSTEM_TASK:> Yields sorted commands without duplicates. <END_TASK> <USER_TASK:> Description: def organize_commands(corrected_commands): """Yields sorted commands without duplicates. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: Iterable[thefuck.types.CorrectedCommand] """
try: first_command = next(corrected_commands) yield first_command except StopIteration: return without_duplicates = { command for command in sorted( corrected_commands, key=lambda command: command.priority) if command != first_command} sorted_commands = sorted( without_duplicates, key=lambda corrected_command: corrected_command.priority) logs.debug('Corrected commands: '.format( ', '.join(u'{}'.format(cmd) for cmd in [first_command] + sorted_commands))) for command in sorted_commands: yield command
<SYSTEM_TASK:> Returns generator with sorted and unique corrected commands. <END_TASK> <USER_TASK:> Description: def get_corrected_commands(command): """Returns generator with sorted and unique corrected commands. :type command: thefuck.types.Command :rtype: Iterable[thefuck.types.CorrectedCommand] """
corrected_commands = ( corrected for rule in get_rules() if rule.is_match(command) for corrected in rule.get_corrected_commands(command)) return organize_commands(corrected_commands)
<SYSTEM_TASK:> Fixes previous command. Used when `thefuck` called without arguments. <END_TASK> <USER_TASK:> Description: def fix_command(known_args): """Fixes previous command. Used when `thefuck` called without arguments."""
settings.init(known_args) with logs.debug_time('Total'): logs.debug(u'Run with settings: {}'.format(pformat(settings))) raw_command = _get_raw_command(known_args) try: command = types.Command.from_raw_script(raw_command) except EmptyCommand: logs.debug('Empty command, nothing to do') return corrected_commands = get_corrected_commands(command) selected_command = select_command(corrected_commands) if selected_command: selected_command.run(command) else: sys.exit(1)
<SYSTEM_TASK:> Split the command using shell-like syntax. <END_TASK> <USER_TASK:> Description: def split_command(self, command): """Split the command using shell-like syntax."""
encoded = self.encode_utf8(command) try: splitted = [s.replace("??", "\\ ") for s in shlex.split(encoded.replace('\\ ', '??'))] except ValueError: splitted = encoded.split(' ') return self.decode_utf8(splitted)
<SYSTEM_TASK:> Return a shell-escaped version of the string s. <END_TASK> <USER_TASK:> Description: def quote(self, s): """Return a shell-escaped version of the string s."""
if six.PY2: from pipes import quote else: from shlex import quote return quote(s)
<SYSTEM_TASK:> To get brew default commands on local environment <END_TASK> <USER_TASK:> Description: def _get_brew_commands(brew_path_prefix): """To get brew default commands on local environment"""
brew_cmd_path = brew_path_prefix + BREW_CMD_PATH return [name[:-3] for name in os.listdir(brew_cmd_path) if name.endswith(('.rb', '.sh'))]
<SYSTEM_TASK:> Resolves git aliases and supports testing for both git and hub. <END_TASK> <USER_TASK:> Description: def git_support(fn, command): """Resolves git aliases and supports testing for both git and hub."""
# supports GitHub's `hub` command # which is recommended to be used with `alias git=hub` # but at this point, shell aliases have already been resolved if not is_app(command, 'git', 'hub'): return False # perform git aliases expansion if 'trace: alias expansion:' in command.output: search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)", command.output) alias = search.group(1) # by default git quotes everything, for example: # 'commit' '--amend' # which is surprising and does not allow to easily test for # eg. 'git commit' expansion = ' '.join(shell.quote(part) for part in shell.split_command(search.group(2))) new_script = command.script.replace(alias, expansion) command = command.update(script=new_script) return fn(command)
<SYSTEM_TASK:> Get output of the script. <END_TASK> <USER_TASK:> Description: def get_output(script, expanded): """Get output of the script. :param script: Console script. :type script: str :param expanded: Console script with expanded aliases. :type expanded: str :rtype: str """
if shell_logger.is_available(): return shell_logger.get_output(script) if settings.instant_mode: return read_log.get_output(script) else: return rerun.get_output(script, expanded)
<SYSTEM_TASK:> It's too dangerous to use `-y` and `-r` together. <END_TASK> <USER_TASK:> Description: def _add_conflicting_arguments(self): """It's too dangerous to use `-y` and `-r` together."""
group = self._parser.add_mutually_exclusive_group() group.add_argument( '-y', '--yes', '--yeah', action='store_true', help='execute fixed command without confirmation') group.add_argument( '-r', '--repeat', action='store_true', help='repeat on failure')
<SYSTEM_TASK:> Fills `settings` with values from `settings.py` and env. <END_TASK> <USER_TASK:> Description: def init(self, args=None): """Fills `settings` with values from `settings.py` and env."""
from .logs import exception self._setup_user_dir() self._init_settings_file() try: self.update(self._settings_from_file()) except Exception: exception("Can't load settings from file", sys.exc_info()) try: self.update(self._settings_from_env()) except Exception: exception("Can't load settings from env", sys.exc_info()) self.update(self._settings_from_args(args))
<SYSTEM_TASK:> Returns user config dir, create it when it doesn't exist. <END_TASK> <USER_TASK:> Description: def _setup_user_dir(self): """Returns user config dir, create it when it doesn't exist."""
user_dir = self._get_user_dir_path() rules_dir = user_dir.joinpath('rules') if not rules_dir.is_dir(): rules_dir.mkdir(parents=True) self.user_dir = user_dir
<SYSTEM_TASK:> Transforms rules list from env-string to python. <END_TASK> <USER_TASK:> Description: def _rules_from_env(self, val): """Transforms rules list from env-string to python."""
val = val.split(':') if 'DEFAULT_RULES' in val: val = const.DEFAULT_RULES + [rule for rule in val if rule != 'DEFAULT_RULES'] return val
<SYSTEM_TASK:> Loads settings from env. <END_TASK> <USER_TASK:> Description: def _settings_from_env(self): """Loads settings from env."""
return {attr: self._val_from_env(env, attr) for env, attr in const.ENV_TO_ATTR.items() if env in os.environ}
<SYSTEM_TASK:> Loads settings from args. <END_TASK> <USER_TASK:> Description: def _settings_from_args(self, args): """Loads settings from args."""
if not args: return {} from_args = {} if args.yes: from_args['require_confirmation'] = not args.yes if args.debug: from_args['debug'] = args.debug if args.repeat: from_args['repeat'] = args.repeat return from_args
<SYSTEM_TASK:> When arguments order is wrong first argument will be destination. <END_TASK> <USER_TASK:> Description: def _get_destination(script_parts): """When arguments order is wrong first argument will be destination."""
for part in script_parts: if part not in {'ln', '-s', '--symbolic'} and os.path.exists(part): return part
<SYSTEM_TASK:> Removes sudo before calling fn and adds it after. <END_TASK> <USER_TASK:> Description: def sudo_support(fn, command): """Removes sudo before calling fn and adds it after."""
if not command.script.startswith('sudo '): return fn(command) result = fn(command.update(script=command.script[5:])) if result and isinstance(result, six.string_types): return u'sudo {}'.format(result) elif isinstance(result, list): return [u'sudo {}'.format(x) for x in result] else: return result
<SYSTEM_TASK:> Tries to kill the process otherwise just logs a debug message, the <END_TASK> <USER_TASK:> Description: def _kill_process(proc): """Tries to kill the process otherwise just logs a debug message, the process will be killed when thefuck terminates. :type proc: Process """
try: proc.kill() except AccessDenied: logs.debug(u'Rerun: process PID {} ({}) could not be terminated'.format( proc.pid, proc.exe()))
<SYSTEM_TASK:> Returns `True` if we can get output of the command in the <END_TASK> <USER_TASK:> Description: def _wait_output(popen, is_slow): """Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool """
proc = Process(popen.pid) try: proc.wait(settings.wait_slow_command if is_slow else settings.wait_command) return True except TimeoutExpired: for child in proc.children(recursive=True): _kill_process(child) _kill_process(proc) return False
<SYSTEM_TASK:> Gets the packages that provide the given command using `pkgfile`. <END_TASK> <USER_TASK:> Description: def get_pkgfile(command): """ Gets the packages that provide the given command using `pkgfile`. If the command is of the form `sudo foo`, searches for the `foo` command instead. """
try: command = command.strip() if command.startswith('sudo '): command = command[5:] command = command.split(" ")[0] packages = subprocess.check_output( ['pkgfile', '-b', '-v', command], universal_newlines=True, stderr=utils.DEVNULL ).splitlines() return [package.split()[0] for package in packages] except subprocess.CalledProcessError as err: if err.returncode == 1 and err.output == "": return [] else: raise err
<SYSTEM_TASK:> Returns a list of the child directories of the given parent directory <END_TASK> <USER_TASK:> Description: def _get_sub_dirs(parent): """Returns a list of the child directories of the given parent directory"""
return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
<SYSTEM_TASK:> Returns new command with replaced fields. <END_TASK> <USER_TASK:> Description: def update(self, **kwargs): """Returns new command with replaced fields. :rtype: Command """
kwargs.setdefault('script', self.script) kwargs.setdefault('output', self.output) return Command(**kwargs)
<SYSTEM_TASK:> Creates instance of `Command` from a list of script parts. <END_TASK> <USER_TASK:> Description: def from_raw_script(cls, raw_script): """Creates instance of `Command` from a list of script parts. :type raw_script: [basestring] :rtype: Command :raises: EmptyCommand """
script = format_raw_script(raw_script) if not script: raise EmptyCommand expanded = shell.from_shell(script) output = get_output(script, expanded) return cls(expanded, output)
<SYSTEM_TASK:> Returns `True` when rule enabled. <END_TASK> <USER_TASK:> Description: def is_enabled(self): """Returns `True` when rule enabled. :rtype: bool """
if self.name in settings.exclude_rules: return False elif self.name in settings.rules: return True elif self.enabled_by_default and ALL_ENABLED in settings.rules: return True else: return False
<SYSTEM_TASK:> Returns `True` if rule matches the command. <END_TASK> <USER_TASK:> Description: def is_match(self, command): """Returns `True` if rule matches the command. :type command: Command :rtype: bool """
if command.output is None and self.requires_output: return False try: with logs.debug_time(u'Trying rule: {};'.format(self.name)): if self.match(command): return True except Exception: logs.rule_failed(self, sys.exc_info())
<SYSTEM_TASK:> Returns generator with corrected commands. <END_TASK> <USER_TASK:> Description: def get_corrected_commands(self, command): """Returns generator with corrected commands. :type command: Command :rtype: Iterable[CorrectedCommand] """
new_commands = self.get_new_command(command) if not isinstance(new_commands, list): new_commands = (new_commands,) for n, new_command in enumerate(new_commands): yield CorrectedCommand(script=new_command, side_effect=self.side_effect, priority=(n + 1) * self.priority)
<SYSTEM_TASK:> Returns fixed commands script. <END_TASK> <USER_TASK:> Description: def _get_script(self): """Returns fixed commands script. If `settings.repeat` is `True`, appends command with second attempt of running fuck in case fixed command fails again. """
if settings.repeat: repeat_fuck = '{} --repeat {}--force-command {}'.format( get_alias(), '--debug ' if settings.debug else '', shell.quote(self.script)) return shell.or_(self.script, repeat_fuck) else: return self.script
<SYSTEM_TASK:> Runs command from rule for passed command. <END_TASK> <USER_TASK:> Description: def run(self, old_cmd): """Runs command from rule for passed command. :type old_cmd: Command """
if self.side_effect: self.side_effect(old_cmd, self.script) if settings.alter_history: shell.put_to_history(self.script) # This depends on correct setting of PYTHONIOENCODING by the alias: logs.debug(u'PYTHONIOENCODING: {}'.format( os.environ.get('PYTHONIOENCODING', '!!not-set!!'))) print(self._get_script())
<SYSTEM_TASK:> Returns `True` when we know that `fuck` called second time. <END_TASK> <USER_TASK:> Description: def _is_second_run(): """Returns `True` when we know that `fuck` called second time."""
tracker_path = _get_not_configured_usage_tracker_path() if not tracker_path.exists(): return False current_pid = _get_shell_pid() with tracker_path.open('r') as tracker: try: info = json.load(tracker) except ValueError: return False if not (isinstance(info, dict) and info.get('pid') == current_pid): return False return (_get_previous_command() == 'fuck' or time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT)
<SYSTEM_TASK:> Returns `True` when alias already in shell config. <END_TASK> <USER_TASK:> Description: def _is_already_configured(configuration_details): """Returns `True` when alias already in shell config."""
path = Path(configuration_details.path).expanduser() with path.open('r') as shell_config: return configuration_details.content in shell_config.read()
<SYSTEM_TASK:> Adds alias to shell config. <END_TASK> <USER_TASK:> Description: def _configure(configuration_details): """Adds alias to shell config."""
path = Path(configuration_details.path).expanduser() with path.open('a') as shell_config: shell_config.write(u'\n') shell_config.write(configuration_details.content) shell_config.write(u'\n')
<SYSTEM_TASK:> Shows useful information about how-to configure alias on a first run <END_TASK> <USER_TASK:> Description: def main(): """Shows useful information about how-to configure alias on a first run and configure automatically on a second. It'll be only visible when user type fuck and when alias isn't configured. """
settings.init() configuration_details = shell.how_to_configure() if ( configuration_details and configuration_details.can_configure_automatically ): if _is_already_configured(configuration_details): logs.already_configured(configuration_details) return elif _is_second_run(): _configure(configuration_details) logs.configured_successfully(configuration_details) return else: _record_first_run() logs.how_to_configure_alias(configuration_details)
<SYSTEM_TASK:> Adds default values to settings if it not presented. <END_TASK> <USER_TASK:> Description: def default_settings(params): """Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt) """
def _default_settings(fn, command): for k, w in params.items(): settings.setdefault(k, w) return fn(command) return decorator(_default_settings)
<SYSTEM_TASK:> Returns closest match or just first from possibilities. <END_TASK> <USER_TASK:> Description: def get_closest(word, possibilities, cutoff=0.6, fallback_to_first=True): """Returns closest match or just first from possibilities."""
possibilities = list(possibilities) try: return difflib_get_close_matches(word, possibilities, 1, cutoff)[0] except IndexError: if fallback_to_first: return possibilities[0]
<SYSTEM_TASK:> Overrides `difflib.get_close_match` to controle argument `n`. <END_TASK> <USER_TASK:> Description: def get_close_matches(word, possibilities, n=None, cutoff=0.6): """Overrides `difflib.get_close_match` to controle argument `n`."""
if n is None: n = settings.num_close_matches return difflib_get_close_matches(word, possibilities, n, cutoff)
<SYSTEM_TASK:> Replaces command line argument. <END_TASK> <USER_TASK:> Description: def replace_argument(script, from_, to): """Replaces command line argument."""
replaced_in_the_end = re.sub(u' {}$'.format(re.escape(from_)), u' {}'.format(to), script, count=1) if replaced_in_the_end != script: return replaced_in_the_end else: return script.replace( u' {} '.format(from_), u' {} '.format(to), 1)
<SYSTEM_TASK:> Returns `True` if command is call to one of passed app names. <END_TASK> <USER_TASK:> Description: def is_app(command, *app_names, **kwargs): """Returns `True` if command is call to one of passed app names."""
at_least = kwargs.pop('at_least', 0) if kwargs: raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys())) if len(command.script_parts) > at_least: return command.script_parts[0] in app_names return False
<SYSTEM_TASK:> Specifies that matching script is for on of app names. <END_TASK> <USER_TASK:> Description: def for_app(*app_names, **kwargs): """Specifies that matching script is for on of app names."""
def _for_app(fn, command): if is_app(command, *app_names, **kwargs): return fn(command) else: return False return decorator(_for_app)
<SYSTEM_TASK:> Caches function result in temporary file. <END_TASK> <USER_TASK:> Description: def cache(*depends_on): """Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Only functions should be wrapped in `cache`, not methods. """
def cache_decorator(fn): @memoize @wraps(fn) def wrapper(*args, **kwargs): if cache.disabled: return fn(*args, **kwargs) else: return _cache.get_value(fn, depends_on, args, kwargs) return wrapper return cache_decorator
<SYSTEM_TASK:> Creates single script from a list of script parts. <END_TASK> <USER_TASK:> Description: def format_raw_script(raw_script): """Creates single script from a list of script parts. :type raw_script: [basestring] :rtype: basestring """
if six.PY2: script = ' '.join(arg.decode('utf-8') for arg in raw_script) else: script = ' '.join(raw_script) return script.strip()
<SYSTEM_TASK:> Exports latest saved model to .nn format for Unity embedding. <END_TASK> <USER_TASK:> Description: def export_model(self): """ Exports latest saved model to .nn format for Unity embedding. """
with self.graph.as_default(): target_nodes = ','.join(self._process_graph()) ckpt = tf.train.get_checkpoint_state(self.model_path) freeze_graph.freeze_graph( input_graph=self.model_path + '/raw_graph_def.pb', input_binary=True, input_checkpoint=ckpt.model_checkpoint_path, output_node_names=target_nodes, output_graph=(self.model_path + '/frozen_graph_def.pb'), clear_devices=True, initializer_nodes='', input_saver='', restore_op_name='save/restore_all', filename_tensor_name='save/Const:0') tf2bc.convert(self.model_path + '/frozen_graph_def.pb', self.model_path + '.nn') logger.info('Exported ' + self.model_path + '.nn file')
<SYSTEM_TASK:> Resets all the local local_buffers <END_TASK> <USER_TASK:> Description: def reset_local_buffers(self): """ Resets all the local local_buffers """
agent_ids = list(self.keys()) for k in agent_ids: self[k].reset_agent()
<SYSTEM_TASK:> A dict from brain name to the brain's curriculum's lesson number. <END_TASK> <USER_TASK:> Description: def lesson_nums(self): """A dict from brain name to the brain's curriculum's lesson number."""
lesson_nums = {} for brain_name, curriculum in self.brains_to_curriculums.items(): lesson_nums[brain_name] = curriculum.lesson_num return lesson_nums
<SYSTEM_TASK:> Attempts to increments all the lessons of all the curriculums in this <END_TASK> <USER_TASK:> Description: def increment_lessons(self, measure_vals, reward_buff_sizes=None): """Attempts to increments all the lessons of all the curriculums in this MetaCurriculum. Note that calling this method does not guarantee the lesson of a curriculum will increment. The lesson of a curriculum will only increment if the specified measure threshold defined in the curriculum has been reached and the minimum number of episodes in the lesson have been completed. Args: measure_vals (dict): A dict of brain name to measure value. reward_buff_sizes (dict): A dict of brain names to the size of their corresponding reward buffers. Returns: A dict from brain name to whether that brain's lesson number was incremented. """
ret = {} if reward_buff_sizes: for brain_name, buff_size in reward_buff_sizes.items(): if self._lesson_ready_to_increment(brain_name, buff_size): measure_val = measure_vals[brain_name] ret[brain_name] = (self.brains_to_curriculums[brain_name] .increment_lesson(measure_val)) else: for brain_name, measure_val in measure_vals.items(): ret[brain_name] = (self.brains_to_curriculums[brain_name] .increment_lesson(measure_val)) return ret
<SYSTEM_TASK:> Sets all the curriculums in this meta curriculum to a specified <END_TASK> <USER_TASK:> Description: def set_all_curriculums_to_lesson_num(self, lesson_num): """Sets all the curriculums in this meta curriculum to a specified lesson number. Args: lesson_num (int): The lesson number which all the curriculums will be set to. """
for _, curriculum in self.brains_to_curriculums.items(): curriculum.lesson_num = lesson_num
<SYSTEM_TASK:> Get the combined configuration of all curriculums in this <END_TASK> <USER_TASK:> Description: def get_config(self): """Get the combined configuration of all curriculums in this MetaCurriculum. Returns: A dict from parameter to value. """
config = {} for _, curriculum in self.brains_to_curriculums.items(): curr_config = curriculum.get_config() config.update(curr_config) return config
<SYSTEM_TASK:> Inform Metrics class that experience collection is done. <END_TASK> <USER_TASK:> Description: def end_experience_collection_timer(self): """ Inform Metrics class that experience collection is done. """
if self.time_start_experience_collection: curr_delta = time() - self.time_start_experience_collection if self.delta_last_experience_collection is None: self.delta_last_experience_collection = curr_delta else: self.delta_last_experience_collection += curr_delta self.time_start_experience_collection = None
<SYSTEM_TASK:> Inform Metrics class about time to step in environment. <END_TASK> <USER_TASK:> Description: def add_delta_step(self, delta: float): """ Inform Metrics class about time to step in environment. """
if self.delta_last_experience_collection: self.delta_last_experience_collection += delta else: self.delta_last_experience_collection = delta
<SYSTEM_TASK:> Inform Metrics class that policy update has started. <END_TASK> <USER_TASK:> Description: def end_policy_update(self): """ Inform Metrics class that policy update has started. """
if self.time_policy_update_start: self.delta_policy_update = time() - self.time_policy_update_start else: self.delta_policy_update = 0 delta_train_start = time() - self.time_training_start LOGGER.debug(" Policy Update Training Metrics for {}: " "\n\t\tTime to update Policy: {:0.3f} s \n" "\t\tTime elapsed since training: {:0.3f} s \n" "\t\tTime for experience collection: {:0.3f} s \n" "\t\tBuffer Length: {} \n" "\t\tReturns : {:0.3f}\n" .format(self.brain_name, self.delta_policy_update, delta_train_start, self.delta_last_experience_collection, self.last_buffer_length, self.last_mean_return)) self._add_row(delta_train_start)
<SYSTEM_TASK:> Creates TF ops to track and increment recent average cumulative reward. <END_TASK> <USER_TASK:> Description: def create_reward_encoder(): """Creates TF ops to track and increment recent average cumulative reward."""
last_reward = tf.Variable(0, name="last_reward", trainable=False, dtype=tf.float32) new_reward = tf.placeholder(shape=[], dtype=tf.float32, name='new_reward') update_reward = tf.assign(last_reward, new_reward) return last_reward, new_reward, update_reward
<SYSTEM_TASK:> Creates TF ops to track and increment global training step. <END_TASK> <USER_TASK:> Description: def create_global_steps(): """Creates TF ops to track and increment global training step."""
global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int32) increment_step = tf.assign(global_step, tf.add(global_step, 1)) return global_step, increment_step
<SYSTEM_TASK:> Yield items from any nested iterable; see REF. <END_TASK> <USER_TASK:> Description: def flatten(items,enter=lambda x:isinstance(x, list)): # http://stackoverflow.com/a/40857703 # https://github.com/ctmakro/canton/blob/master/canton/misc.py """Yield items from any nested iterable; see REF."""
for x in items: if enter(x): yield from flatten(x) else: yield x
<SYSTEM_TASK:> Exports latest saved models to .nn format for Unity embedding. <END_TASK> <USER_TASK:> Description: def _export_graph(self): """ Exports latest saved models to .nn format for Unity embedding. """
for brain_name in self.trainers.keys(): self.trainers[brain_name].export_model()
<SYSTEM_TASK:> Resets the environment. <END_TASK> <USER_TASK:> Description: def _reset_env(self, env: BaseUnityEnvironment): """Resets the environment. Returns: A Data structure corresponding to the initial reset state of the environment. """
if self.meta_curriculum is not None: return env.reset(train_mode=self.fast_simulation, config=self.meta_curriculum.get_config()) else: return env.reset(train_mode=self.fast_simulation)