text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def accuracy(self, test_set, format=None): """Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format. """
if isinstance(test_set, basestring): # test_set is a filename test_data = self._read_data(test_set) else: # test_set is a list of tuples test_data = test_set test_features = [(self.extract_features(d), c) for d, c in test_data] return nltk.classify.accuracy(self.classifier, test_features)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def update(self, new_data, *args, **kwargs): '''Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``. ''' self.train_set += new_data self.train_features = [(self.extract_features(d), c) for d, c in self.train_set] try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) except AttributeError: # Descendant has not defined nltk_class raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.") return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def train(self, *args, **kwargs): """Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. :rtype: A classifier """
self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior) return self.classifier
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def update(self, new_positive_data=None, new_unlabeled_data=None, positive_prob_prior=0.5, *args, **kwargs): '''Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings. ''' self.positive_prob_prior = positive_prob_prior if new_positive_data: self.positive_set += new_positive_data self.positive_features += [self.extract_features(d) for d in new_positive_data] if new_unlabeled_data: self.unlabeled_set += new_unlabeled_data self.unlabeled_features += [self.extract_features(d) for d in new_unlabeled_data] self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior, *args, **kwargs) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def variations(iterable, optional=lambda x: False): """ Returns all possible variations of a sequence with optional items. """
# For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?")) # defines a sequence where constraint A and B are optional: # [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")] iterable = tuple(iterable) # Create a boolean sequence where True means optional: # ("A?", "B?", "C") => [True, True, False] o = [optional(x) for x in iterable] # Find all permutations of the boolean sequence: # [True, False, True], [True, False, False], [False, False, True], [False, False, False]. # Map to sequences of constraints whose index in the boolean sequence yields True. a = set() for p in product([False, True], repeat=sum(o)): p = list(p) v = [b and (b and p.pop(0)) for b in o] v = tuple(iterable[i] for i in xrange(len(v)) if not v[i]) a.add(v) # Longest-first. return sorted(a, cmp=lambda x, y: len(y) - len(x))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parents(self, term, recursive=False, **kwargs): """ Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root. """
def dfs(term, recursive=False, visited={}, **kwargs): if term in visited: # Break on cyclic relations. return [] visited[term], a = True, [] if dict.__contains__(self, term): a = self[term][0].keys() for classifier in self.classifiers: a.extend(classifier.parents(term, **kwargs) or []) if recursive: for w in a: a += dfs(w, recursive, visited, **kwargs) return a return unique(dfs(self._normalize(term), recursive, {}, **kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def constraint(self, word): """ Returns the constraint that matches the given Word, or None. """
if word.index in self._map1: return self._map1[word.index]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def constraints(self, chunk): """ Returns a list of constraints that match the given Chunk. """
a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]): """ Returns a parsed Text from the given parsed string. """
return Text(s, token)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def word_tokenize(text, tokenizer=None, include_punc=True, *args, **kwargs): """Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator). """
_tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() words = chain.from_iterable( WordTokenizer(tokenizer=_tokenizer).itokenize(sentence, include_punc, *args, **kwargs) for sentence in sent_tokenize(text, tokenizer=_tokenizer)) return words
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def word_tokenize(self, text, include_punc=True): """The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014) """
#: Do not process empty strings (Issue #3) if text.strip() == "": return [] _tokens = self.word_tok.tokenize(text) #: Handle strings consisting of a single punctuation mark seperately (Issue #4) if len(_tokens) == 1: if _tokens[0] in PUNCTUATION: if include_punc: return _tokens else: return [] if include_punc: return _tokens else: # Return each word token # Strips punctuation unless the word comes from a contraction # e.g. "gibt's" => ["gibt", "'s"] in "Heute gibt's viel zu tun!" # e.g. "hat's" => ["hat", "'s"] # e.g. "home." => ['home'] words = [ word if word.startswith("'") else strip_punc( word, all=False) for word in _tokens if strip_punc( word, all=False)] return list(words)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sent_tokenize(self, text, **kwargs): """Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """
sentences = find_sentences(text, punctuation=kwargs.get( "punctuation", PUNCTUATION), abbreviations=kwargs.get( "abbreviations", ABBREVIATIONS_DE), replace=kwargs.get("replace", replacements), linebreak=r"\n{2,}") return sentences
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, text): """Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string. """
#: Do not process empty strings (Issue #3) if text.strip() == "": return "" #: Do not process strings consisting of a single punctuation mark (Issue #4) elif text.strip() in PUNCTUATION: _sym = text.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym if self.lemmata: return "{0}/{1}/O/O/{0}".format(_sym, _tag) else: return "{0}/{1}/O/O".format(_sym, _tag) if self.tokenize: _tokenized = " ".join(self.tokenizer.tokenize(text)) else: _tokenized = text _parsed = pattern_parse(_tokenized, # text is tokenized before it is passed on to # pattern.de.parse tokenize=False, tags=self.tags, chunks=self.chunks, relations=self.relations, lemmata=self.lemmata, encoding=self.encoding, tagset=self.tagset) if self.pprint: _parsed = pattern_pprint(_parsed) return _parsed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_extracted(self, extracted_list): """Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) filter :param list extracted_list: A list of noun phrases extracted from parser output. """
_filtered = [] for np in extracted_list: _np = np.split() if _np[0] in INSIGNIFICANT: _np.pop(0) try: if _np[-1] in INSIGNIFICANT: _np.pop(-1) # e.g. 'welcher die ...' if _np[0] in INSIGNIFICANT: _np.pop(0) except IndexError: _np = [] if len(_np) > 0: _filtered.append(" ".join(_np)) return _filtered
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tag(self, sentence, tokenize=True): """Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string). """
#: Do not process empty strings (Issue #3) if sentence.strip() == "": return [] #: Do not process strings consisting of a single punctuation mark (Issue #4) elif sentence.strip() in PUNCTUATION: if self.include_punc: _sym = sentence.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym return [(_sym, _tag)] else: return [] if tokenize: _tokenized = " ".join(self.tokenizer.tokenize(sentence)) sentence = _tokenized # Sentence is tokenized before it is passed on to pattern.de.tag # (i.e. it is either submitted tokenized or if ) _tagged = pattern_tag(sentence, tokenize=False, encoding=self.encoding, tagset=self.tagset) if self.include_punc: return _tagged else: _tagged = [ (word, t) for word, t in _tagged if not PUNCTUATION_REGEX.match( unicode(t))] return _tagged
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """
# Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any([cmd.lower().endswith(ext.lower()) for ext in pathext]): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def translate(self, from_lang=None, to="de"): """Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``) """
if from_lang is None: from_lang = self.translator.detect(self.string) return self.translator.translate(self.string, from_lang=from_lang, to_lang=to)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lemmatize(self): """Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property. """
_lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer()) # WordList object --> Sentence.string # add a period (improves parser accuracy) _raw = " ".join(self) + "." _lemmas = _lemmatizer.lemmatize(_raw) return self.__class__([Word(l, t) for l, t in _lemmas])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize(self, tokenizer=None): """Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer. """
t = tokenizer if tokenizer is not None else self.tokenizer return WordList(t.tokenize(self.raw))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def noun_phrases(self): """Returns a list of noun phrases for this blob."""
return WordList([phrase.strip() for phrase in self.np_extractor.extract(self.raw) if len(phrase.split()) > 1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """
return WordList( word_tokenize(self.raw, self.tokenizer, include_punc=False))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read(path, encoding="utf-8", comment=";;;"): """ Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode. """
if path: if isinstance(path, basestring) and os.path.exists(path): # From file path. if PY2: f = codecs.open(path, 'r', encoding='utf-8') else: f = open(path, 'r', encoding='utf-8') elif isinstance(path, basestring): # From string. f = path.splitlines() else: # From file or buffer. f = path for i, line in enumerate(f): line = line.strip(codecs.BOM_UTF8) if i == 0 and isinstance(line, binary_type) else line line = line.strip() line = decode_utf8(line, encoding) if not line or (comment and line.startswith(comment)): continue yield line return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _suffix_rules(token, tag="NN"): """ Default morphological tagging rules for English, based on word suffixes. """
if isinstance(token, (list, tuple)): token, tag = token if token.endswith("ing"): tag = "VBG" if token.endswith("ly"): tag = "RB" if token.endswith("s") and not token.endswith(("is", "ous", "ss")): tag = "NNS" if token.endswith(("able", "al", "ful", "ible", "ient", "ish", "ive", "less", "tic", "ous")) or "-" in token: tag = "JJ" if token.endswith("ed"): tag = "VBN" if token.endswith(("ate", "ify", "ise", "ize")): tag = "VBP" return [token, tag]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _multilingual(function, *args, **kwargs): """ Returns the value from the function with the given name in the given language module. By default, language="en". """
return getattr(_module(kwargs.pop("language", "en")), function)(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_tokens(self, string, **kwargs): """ Returns a list of sentences from the given string. Punctuation marks are separated from each word by a space. """
# "The cat purs." => ["The cat purs ."] return find_tokens(string, punctuation = kwargs.get( "punctuation", PUNCTUATION), abbreviations = kwargs.get("abbreviations", ABBREVIATIONS), replace = kwargs.get( "replace", replacements), linebreak = r"\n{2,}")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_chunks(self, tokens, **kwargs): """ Annotates the given list of tokens with chunk tags. Several tags can be added, for example chunk + preposition tags. """
# [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] => # [["The", "DT", "B-NP"], ["cat", "NN", "I-NP"], ["purs", "VB", "B-VP"]] return find_prepositions( find_chunks(tokens, language = kwargs.get("language", self.language)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split(self, sep=TOKENS): """ Returns a list of sentences, where each sentence is a list of tokens, where each token is a list of word + tags. """
if sep != TOKENS: return unicode.split(self, sep) if len(self) == 0: return [] return [[[x.replace("&slash;", "/") for x in token.split("/")] for token in sentence.split(" ")] for sentence in unicode.split(self, "\n")]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lemma(self, verb, parse=True): """ Returns the infinitive form of the given verb, or None. """
if dict.__len__(self) == 0: self.load() if verb.lower() in self._inverse: return self._inverse[verb.lower()] if verb in self._inverse: return self._inverse[verb] if parse is True: # rule-based return self.find_lemma(verb)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lexeme(self, verb, parse=True): """ Returns a list of all possible inflections of the given verb. """
a = [] b = self.lemma(verb, parse=parse) if b in self: a = [x for x in self[b] if x != ""] elif parse is True: # rule-based a = self.find_lexeme(b) u = []; [u.append(x) for x in a if x not in u] return u
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _edit1(self, w): """ Returns a set of words with edit distance 1 from the given word. """
# Of all spelling errors, 80% is covered by edit distance 1. # Edit distance 1 = one character deleted, swapped, replaced or inserted. split = [(w[:i], w[i:]) for i in range(len(w) + 1)] delete, transpose, replace, insert = ( [a + b[1:] for a, b in split if b], [a + b[1] + b[0] + b[2:] for a, b in split if len(b) > 1], [a + c + b[1:] for a, b in split for c in Spelling.ALPHA if b], [a + c + b[0:] for a, b in split for c in Spelling.ALPHA] ) return set(delete + transpose + replace + insert)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _edit2(self, w): """ Returns a set of words with edit distance 2 from the given word """
# Of all spelling errors, 99% is covered by edit distance 2. # Only keep candidates that are actually known words (20% speedup). return set(e2 for e1 in self._edit1(w) for e2 in self._edit1(e1) if e2 in self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xml_encode(string): """ Returns the string with XML-safe special characters. """
string = string.replace("&", "&amp;") string = string.replace("<", "&lt;") string = string.replace(">", "&gt;") string = string.replace("\"","&quot;") string = string.replace(SLASH, "/") return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xml_decode(string): """ Returns the string with special characters decoded. """
string = string.replace("&amp;", "&") string = string.replace("&lt;", "<") string = string.replace("&gt;", ">") string = string.replace("&quot;","\"") string = string.replace("/", SLASH) return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nltk_tree(sentence): """ Returns an NLTK nltk.tree.Tree object from the given Sentence. The NLTK module should be on the search path somewhere. """
from nltk import tree def do_pnp(pnp): # Returns the PNPChunk (and the contained Chunk objects) in NLTK bracket format. s = ' '.join([do_chunk(ch) for ch in pnp.chunks]) return '(PNP %s)' % s def do_chunk(ch): # Returns the Chunk in NLTK bracket format. Recurse attached PNP's. s = ' '.join(['(%s %s)' % (w.pos, w.string) for w in ch.words]) s+= ' '.join([do_pnp(pnp) for pnp in ch.attachments]) return '(%s %s)' % (ch.type, s) T = ['(S'] v = [] # PNP's already visited. for ch in sentence.chunked(): if not ch.pnp and isinstance(ch, Chink): T.append('(%s %s)' % (ch.words[0].pos, ch.words[0].string)) elif not ch.pnp: T.append(do_chunk(ch)) #elif ch.pnp not in v: elif ch.pnp.anchor is None and ch.pnp not in v: # The chunk is part of a PNP without an anchor. T.append(do_pnp(ch.pnp)) v.append(ch.pnp) T.append(')') return tree.bracket_parse(' '.join(T))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def graphviz_dot(sentence, font="Arial", colors=BLUE): """ Returns a dot-formatted string that can be visualized as a graph in GraphViz. """
s = 'digraph sentence {\n' s += '\tranksep=0.75;\n' s += '\tnodesep=0.15;\n' s += '\tnode [penwidth=1, fontname="%s", shape=record, margin=0.1, height=0.35];\n' % font s += '\tedge [penwidth=1];\n' s += '\t{ rank=same;\n' # Create node groups for words, chunks and PNP chunks. for w in sentence.words: s += '\t\tword%s [label="<f0>%s|<f1>%s"%s];\n' % (w.index, w.string, w.type, _colorize(w, colors)) for w in sentence.words[:-1]: # Invisible edges forces the words into the right order: s += '\t\tword%s -> word%s [color=none];\n' % (w.index, w.index+1) s += '\t}\n' s += '\t{ rank=same;\n' for i, ch in enumerate(sentence.chunks): s += '\t\tchunk%s [label="<f0>%s"%s];\n' % (i+1, "-".join([x for x in ( ch.type, ch.role, str(ch.relation or '')) if x]) or '-', _colorize(ch, colors)) for i, ch in enumerate(sentence.chunks[:-1]): # Invisible edges forces the chunks into the right order: s += '\t\tchunk%s -> chunk%s [color=none];\n' % (i+1, i+2) s += '}\n' s += '\t{ rank=same;\n' for i, ch in enumerate(sentence.pnp): s += '\t\tpnp%s [label="<f0>PNP"%s];\n' % (i+1, _colorize(ch, colors)) s += '\t}\n' s += '\t{ rank=same;\n S [shape=circle, margin=0.25, penwidth=2]; }\n' # Connect words to chunks. # Connect chunks to PNP or S. for i, ch in enumerate(sentence.chunks): for w in ch: s += '\tword%s -> chunk%s;\n' % (w.index, i+1) if ch.pnp: s += '\tchunk%s -> pnp%s;\n' % (i+1, sentence.pnp.index(ch.pnp)+1) else: s += '\tchunk%s -> S;\n' % (i+1) if ch.type == 'VP': # Indicate related chunks with a dotted for r in ch.related: s += '\tchunk%s -> chunk%s [style=dotted, arrowhead=none];\n' % ( i+1, sentence.chunks.index(r)+1) # Connect PNP to anchor chunk or S. for i, ch in enumerate(sentence.pnp): if ch.anchor: s += '\tpnp%s -> chunk%s;\n' % (i+1, sentence.chunks.index(ch.anchor)+1) s += '\tpnp%s -> S [color=none];\n' % (i+1) else: s += '\tpnp%s -> S;\n' % (i+1) s += "}" return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def next(self, type=None): """ Returns the next word in the sentence with the given type. """
i = self.index + 1 s = self.sentence while i < len(s): if type in (s[i].type, None): return s[i] i += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def previous(self, type=None): """ Returns the next previous word in the sentence with the given type. """
i = self.index - 1 s = self.sentence while i > 0: if type in (s[i].type, None): return s[i] i -= 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def related(self): """ Yields a list of all chunks in the sentence with the same relation id. """
return [ch for ch in self.sentence.chunks if ch != self and intersects(unzip(0, ch.relations), unzip(0, self.relations))]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def next(self, type=None): """ Returns the next chunk in the sentence with the given type. """
i = self.stop s = self.sentence while i < len(s): if s[i].chunk is not None and type in (s[i].chunk.type, None): return s[i].chunk i += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def previous(self, type=None): """ Returns the next previous chunk in the sentence with the given type. """
i = self.start - 1 s = self.sentence while i > 0: if s[i].chunk is not None and type in (s[i].chunk.type, None): return s[i].chunk i -= 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _do_pnp(self, pnp, anchor=None): """ Attaches prepositional noun phrases. Identifies PNP's from either the PNP tag or the P-attachment tag. This does not determine the PP-anchor, it only groups words in a PNP chunk. """
if anchor or pnp and pnp.endswith("PNP"): if anchor is not None: m = find(lambda x: x.startswith("P"), anchor) else: m = None if self.pnp \ and pnp \ and pnp != OUTSIDE \ and pnp.startswith("B-") is False \ and self.words[-2].pnp is not None: self.pnp[-1].append(self.words[-1]) elif m is not None and m == self._attachment: self.pnp[-1].append(self.words[-1]) else: ch = PNPChunk(self, [self.words[-1]], type="PNP") self.pnp.append(ch) self._attachment = m
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _do_anchor(self, anchor): """ Collects preposition anchors and attachments in a dictionary. Once the dictionary has an entry for both the anchor and the attachment, they are linked. """
if anchor: for x in anchor.split("-"): A, P = None, None if x.startswith("A") and len(self.chunks) > 0: # anchor A, P = x, x.replace("A","P") self._anchors[A] = self.chunks[-1] if x.startswith("P") and len(self.pnp) > 0: # attachment (PNP) A, P = x.replace("P","A"), x self._anchors[P] = self.pnp[-1] if A in self._anchors and P in self._anchors and not self._anchors[P].anchor: pnp = self._anchors[P] pnp.anchor = self._anchors[A] pnp.anchor.attachments.append(pnp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _do_conjunction(self, _and=("and", "e", "en", "et", "und", "y")): """ Attach conjunctions. CC-words like "and" and "or" between two chunks indicate a conjunction. """
w = self.words if len(w) > 2 and w[-2].type == "CC" and w[-2].chunk is None: cc = w[-2].string.lower() in _and and AND or OR ch1 = w[-3].chunk ch2 = w[-1].chunk if ch1 is not None and \ ch2 is not None: ch1.conjunctions.append(ch2, cc) ch2.conjunctions.append(ch1, cc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, index, tag=LEMMA): """ Returns a tag for the word at the given index. The tag can be WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. """
if tag == WORD: return self.words[index] if tag == LEMMA: return self.words[index].lemma if tag == POS: return self.words[index].type if tag == CHUNK: return self.words[index].chunk if tag == PNP: return self.words[index].pnp if tag == REL: ch = self.words[index].chunk; return ch and ch.relation if tag == ROLE: ch = self.words[index].chunk; return ch and ch.role if tag == ANCHOR: ch = self.words[index].pnp; return ch and ch.anchor if tag in self.words[index].custom_tags: return self.words[index].custom_tags[tag] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def slice(self, start, stop): """ Returns a portion of the sentence from word start index to word stop index. The returned slice is a subclass of Sentence and a deep copy. """
s = Slice(token=self.token, language=self.language) for i, word in enumerate(self.words[start:stop]): # The easiest way to copy (part of) a sentence # is by unpacking all of the token tags and passing them to Sentence.append(). p0 = word.string # WORD p1 = word.lemma # LEMMA p2 = word.type # POS p3 = word.chunk is not None and word.chunk.type or None # CHUNK p4 = word.pnp is not None and "PNP" or None # PNP p5 = word.chunk is not None and unzip(0, word.chunk.relations) or None # REL p6 = word.chunk is not None and unzip(1, word.chunk.relations) or None # ROLE p7 = word.chunk and word.chunk.anchor_id or None # ANCHOR p8 = word.chunk and word.chunk.start == start+i and BEGIN or None # IOB p9 = word.custom_tags # User-defined tags. # If the given range does not contain the chunk head, remove the chunk tags. if word.chunk is not None and (word.chunk.stop > stop): p3, p4, p5, p6, p7, p8 = None, None, None, None, None, None # If the word starts the preposition, add the IOB B-prefix (i.e., B-PNP). if word.pnp is not None and word.pnp.start == start+i: p4 = BEGIN+"-"+"PNP" # If the given range does not contain the entire PNP, remove the PNP tags. # The range must contain the entire PNP, # since it starts with the PP and ends with the chunk head (and is meaningless without these). if word.pnp is not None and (word.pnp.start < start or word.chunk.stop > stop): p4, p7 = None, None s.append(word=p0, lemma=p1, type=p2, chunk=p3, pnp=p4, relation=p5, role=p6, anchor=p7, iob=p8, custom=p9) s.parent = self s._start = start return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def constituents(self, pnp=False): """ Returns an in-order list of mixed Chunk and Word objects. With pnp=True, also contains PNPChunk objects whenever possible. """
a = [] for word in self.words: if pnp and word.pnp is not None: if len(a) == 0 or a[-1] != word.pnp: a.append(word.pnp) elif word.chunk is not None: if len(a) == 0 or a[-1] != word.chunk: a.append(word.chunk) else: a.append(word) return a
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_xml(cls, xml): """ Returns a new Text from the given XML string. """
s = parse_string(xml) return Sentence(s.split("\n")[0], token=s.tags, language=s.language)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_credits(): """Extract credits from `AUTHORS.rst`"""
credits = read(os.path.join(_HERE, "AUTHORS.rst")).split("\n") from_index = credits.index("Active Contributors") credits = "\n".join(credits[from_index + 2:]) return credits
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_series(self, x0, params, varied_data, varied_idx, internal_x0=None, solver=None, propagate=True, **kwargs): """ Solve system for a set of parameters in which one is varied Parameters x0 : array_like Guess (subject to ``self.post_processors``) params : array_like Parameter values vaired_data : array_like Numerical values of the varied parameter. varied_idx : int or str Index of the varied parameter (indexing starts at 0). If ``self.par_by_name`` this should be the name (str) of the varied parameter. internal_x0 : array_like (default: None) Guess (*not* subject to ``self.post_processors``). Overrides ``x0`` when given. solver : str or callback See :meth:`solve`. propagate : bool (default: True) Use last successful solution as ``x0`` in consecutive solves. \\*\\*kwargs : Keyword arguments pass along to :meth:`solve`. Returns ------- xout : array Of shape ``(varied_data.size, x0.size)``. info_dicts : list of dictionaries Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc. """
if self.x_by_name and isinstance(x0, dict): x0 = [x0[k] for k in self.names] if self.par_by_name: if isinstance(params, dict): params = [params[k] for k in self.param_names] if isinstance(varied_idx, str): varied_idx = self.param_names.index(varied_idx) new_params = np.atleast_1d(np.array(params, dtype=np.float64)) xout = np.empty((len(varied_data), len(x0))) self.internal_xout = np.empty_like(xout) self.internal_params_out = np.empty((len(varied_data), len(new_params))) info_dicts = [] new_x0 = np.array(x0, dtype=np.float64) # copy conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys for idx, value in enumerate(varied_data): try: new_params[varied_idx] = value except TypeError: new_params = value # e.g. type(new_params) == int if conds is not None: kwargs['initial_conditions'] = conds x, info_dict = self.solve(new_x0, new_params, internal_x0, solver, **kwargs) if propagate: if info_dict['success']: try: # See ChainedNeqSys.solve new_x0 = info_dict['x_vecs'][0] internal_x0 = info_dict['internal_x_vecs'][0] conds = info_dict['intermediate_info'][0].get( 'conditions', None) except: new_x0 = x internal_x0 = None conds = info_dict.get('conditions', None) xout[idx, :] = x self.internal_xout[idx, :] = self.internal_x self.internal_params_out[idx, :] = self.internal_params info_dicts.append(info_dict) return xout, info_dicts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None, plot_residuals_kwargs=None, **kwargs): """ Solve and plot for a series of a varied parameter. Convenience method, see :meth:`solve_series`, :meth:`plot_series` & :meth:`plot_series_residuals_internal` for more information. """
sol, nfo = self.solve_series( x0, params, varied_data, varied_idx, solver=solver, **kwargs) ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo, **(plot_kwargs or {})) extra = dict(ax_sol=ax_sol, info=nfo) if plot_residuals_kwargs: extra['ax_resid'] = self.plot_series_residuals_internal( varied_data, varied_idx, info=nfo, **(plot_residuals_kwargs or {}) ) return sol, extra
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs): """ Solve with user specified ``solver`` choice. Parameters x0: 1D array of floats Guess (subject to ``self.post_processors``) params: 1D array_like of floats Parameters (subject to ``self.post_processors``) internal_x0: 1D array of floats When given it overrides (processed) ``x0``. ``internal_x0`` is not subject to ``self.post_processors``. solver: str or callable or None or iterable of such if str: uses _solve_``solver``(\*args, \*\*kwargs). if ``None``: chooses from PYNEQSYS_SOLVER environment variable. if iterable: chain solving. attached_solver: callable factory Invokes: solver = attached_solver(self). Returns ------- array: solution vector (post-processed by self.post_processors) dict: info dictionary containing 'success', 'nfev', 'njev' etc. Examples -------- [0.841163901914009663684741869855] [0.158836098085990336315258130144] """
if not isinstance(solver, (tuple, list)): solver = [solver] if not isinstance(attached_solver, (tuple, list)): attached_solver = [attached_solver] + [None]*(len(solver) - 1) _x0, self.internal_params = self.pre_process(x0, params) for solv, attached_solv in zip(solver, attached_solver): if internal_x0 is not None: _x0 = internal_x0 elif self.internal_x0_cb is not None: _x0 = self.internal_x0_cb(x0, params) nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs) _x0 = nfo['x'].copy() self.internal_x = _x0 x0 = self.post_process(self.internal_x, self.internal_params)[0] return x0, nfo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _solve_scipy(self, intern_x0, tol=1e-8, method=None, **kwargs): """ Uses ``scipy.optimize.root`` See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html Parameters intern_x0: array_like initial guess tol: float Tolerance method: str What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``. """
from scipy.optimize import root if method is None: if self.nf > self.nx: method = 'lm' elif self.nf == self.nx: method = 'hybr' else: raise ValueError('Underdetermined problem') if 'band' in kwargs: raise ValueError("Set 'band' at initialization instead.") if 'args' in kwargs: raise ValueError("Set 'args' as params in initialization instead.") new_kwargs = kwargs.copy() if self.band is not None: warnings.warn("Band argument ignored (see SciPy docs)") new_kwargs['band'] = self.band new_kwargs['args'] = self.internal_params return root(self.f_cb, intern_x0, jac=self.j_cb, method=method, tol=tol, **new_kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(guess_a, guess_b, power, solver='scipy'): """ Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """
# The problem is 2 dimensional so we need 2 symbols x = sp.symbols('x:2', real=True) # There is a user specified parameter ``p`` in this problem: p = sp.Symbol('p', real=True, negative=False, integer=True) # Our system consists of 2-non-linear equations: f = [x[0] + (x[0] - x[1])**p/2 - 1, (x[1] - x[0])**p/2 + x[1]] # We construct our ``SymbolicSys`` instance by passing variables, equations and parameters: neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically) # Finally we solve the system using user-specified ``solver`` choice: return neqsys.solve([guess_a, guess_b], [power], solver=solver)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(guess_a=1., guess_b=0., power=3, savetxt='None', verbose=False): """ Example demonstrating how to solve a system of non-linear equations defined as SymPy expressions. The example shows how a non-linear problem can be given a command-line interface which may be preferred by end-users who are not familiar with Python. """
x, sol = solve(guess_a, guess_b, power) # see function definition above assert sol.success if savetxt != 'None': np.savetxt(x, savetxt) else: if verbose: print(sol) else: print(x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_series(xres, varied_data, indices=None, info=None, fail_vline=None, plot_kwargs_cb=None, ls=('-', '--', ':', '-.'), c=('k', 'r', 'g', 'b', 'c', 'm', 'y'), labels=None, ax=None, names=None, latex_names=None): """ Plot the values of the solution vector vs the varied parameter. Parameters xres : array Solution vector of shape ``(varied_data.size, x0.size)``. varied_data : array Numerical values of the varied parameter. indices : iterable of integers, optional Indices of variables to be plotted. default: all fail_vline : bool Show vertical lines where the solver failed. plot_kwargs_cb : callable Takes the index as single argument, returns a dict passed to the plotting function ls : iterable of str Linestyles. c : iterable of str Colors. labels : iterable of str ax : matplotlib Axes instance names : iterable of str latex_names : iterable of str """
import matplotlib.pyplot as plt if indices is None: indices = range(xres.shape[1]) if fail_vline is None: if info is None: fail_vline = False else: fail_vline = True if ax is None: ax = plt.subplot(1, 1, 1) if labels is None: labels = names if latex_names is None else ['$%s$' % ln.strip('$') for ln in latex_names] if plot_kwargs_cb is None: def plot_kwargs_cb(idx, labels=None): kwargs = {'ls': ls[idx % len(ls)], 'c': c[idx % len(c)]} if labels: kwargs['label'] = labels[idx] return kwargs else: plot_kwargs_cb = plot_kwargs_cb or (lambda idx: {}) for idx in indices: ax.plot(varied_data, xres[:, idx], **plot_kwargs_cb(idx, labels=labels)) if fail_vline: for i, nfo in enumerate(info): if not nfo['success']: ax.axvline(varied_data[i], c='k', ls='--') return ax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mpl_outside_legend(ax, **kwargs): """ Places a legend box outside a matplotlib Axes instance. """
box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.75, box.height]) # Put a legend to the right of the current axis ax.legend(loc='upper left', bbox_to_anchor=(1, 1), **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def linear_rref(A, b, Matrix=None, S=None): """ Transform a linear system to reduced row-echelon form Transforms both the matrix and right-hand side of a linear system of equations to reduced row echelon form Parameters A : Matrix-like Iterable of rows. b : iterable Returns ------- A', b' - transformed versions """
if Matrix is None: from sympy import Matrix if S is None: from sympy import S mat_rows = [_map2l(S, list(row) + [v]) for row, v in zip(A, b)] aug = Matrix(mat_rows) raug, pivot = aug.rref() nindep = len(pivot) return raug[:nindep, :-1], raug[:nindep, -1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def linear_exprs(A, x, b=None, rref=False, Matrix=None): """ Returns Ax - b Parameters A : matrix_like of numbers Of shape (len(b), len(x)). x : iterable of symbols b : array_like of numbers (default: None) When ``None``, assume zeros of length ``len(x)``. Matrix : class When ``rref == True``: A matrix class which supports slicing, and methods ``__mul__`` and ``rref``. Defaults to ``sympy.Matrix``. rref : bool Calculate the reduced row echelon form of (A | -b). Returns ------- A list of the elements in the resulting column vector. """
if b is None: b = [0]*len(x) if rref: rA, rb = linear_rref(A, b, Matrix) if Matrix is None: from sympy import Matrix return [lhs - rhs for lhs, rhs in zip(rA * Matrix(len(x), 1, x), rb)] else: return [sum([x0*x1 for x0, x1 in zip(row, x)]) - v for row, v in zip(A, b)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_callback(cls, cb, nx=None, nparams=None, **kwargs): """ Generate a SymbolicSys instance from a callback. Parameters cb : callable Should have the signature ``cb(x, p, backend) -> list of exprs``. nx : int Number of unknowns, when not given it is deduced from ``kwargs['names']``. nparams : int Number of parameters, when not given it is deduced from ``kwargs['param_names']``. \\*\\*kwargs : Keyword arguments passed on to :class:`SymbolicSys`. See also :class:`pyneqsys.NeqSys`. Examples -------- """
if kwargs.get('x_by_name', False): if 'names' not in kwargs: raise ValueError("Need ``names`` in kwargs.") if nx is None: nx = len(kwargs['names']) elif nx != len(kwargs['names']): raise ValueError("Inconsistency between nx and length of ``names``.") if kwargs.get('par_by_name', False): if 'param_names' not in kwargs: raise ValueError("Need ``param_names`` in kwargs.") if nparams is None: nparams = len(kwargs['param_names']) elif nparams != len(kwargs['param_names']): raise ValueError("Inconsistency between ``nparam`` and length of ``param_names``.") if nparams is None: nparams = 0 if nx is None: raise ValueError("Need ``nx`` of ``names`` together with ``x_by_name==True``.") be = Backend(kwargs.pop('backend', None)) x = be.real_symarray('x', nx) p = be.real_symarray('p', nparams) _x = dict(zip(kwargs['names'], x)) if kwargs.get('x_by_name', False) else x _p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p try: exprs = cb(_x, _p, be) except TypeError: exprs = _ensure_3args(cb)(_x, _p, be) return cls(x, exprs, p, backend=be, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_jac(self): """ Return the jacobian of the expressions """
if self._jac is True: if self.band is None: f = self.be.Matrix(self.nf, 1, self.exprs) _x = self.be.Matrix(self.nx, 1, self.x) return f.jacobian(_x) else: # Banded return self.be.Matrix(banded_jacobian( self.exprs, self.x, *self.band)) elif self._jac is False: return False else: return self._jac
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_callback(cls, cb, transf_cbs, nx, nparams=0, pre_adj=None, **kwargs): """ Generate a TransformedSys instance from a callback Parameters cb : callable Should have the signature ``cb(x, p, backend) -> list of exprs``. The callback ``cb`` should return *untransformed* expressions. transf_cbs : pair or iterable of pairs of callables Callables for forward- and backward-transformations. Each callable should take a single parameter (expression) and return a single expression. nx : int Number of unkowns. nparams : int Number of parameters. pre_adj : callable, optional To tweak expression prior to transformation. Takes a sinlge argument (expression) and return a single argument rewritten expression. \\*\\*kwargs : Keyword arguments passed on to :class:`TransformedSys`. See also :class:`SymbolicSys` and :class:`pyneqsys.NeqSys`. Examples -------- """
be = Backend(kwargs.pop('backend', None)) x = be.real_symarray('x', nx) p = be.real_symarray('p', nparams) try: transf = [(transf_cbs[idx][0](xi), transf_cbs[idx][1](xi)) for idx, xi in enumerate(x)] except TypeError: transf = zip(_map2(transf_cbs[0], x), _map2(transf_cbs[1], x)) try: exprs = cb(x, p, be) except TypeError: exprs = _ensure_3args(cb)(x, p, be) return cls(x, _map2l(pre_adj, exprs), transf, p, backend=be, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write(self, handle): '''Write binary header data to a file handle. This method writes exactly 512 bytes to the beginning of the given file handle. Parameters ---------- handle : file handle The given handle will be reset to 0 using `seek` and then 512 bytes will be written to describe the parameters in this Header. The handle must be writeable. ''' handle.seek(0) handle.write(struct.pack(self.BINARY_FORMAT, self.parameter_block, 0x50, self.point_count, self.analog_count, self.first_frame, self.last_frame, self.max_gap, self.scale_factor, self.data_block, self.analog_per_frame, self.frame_rate, b'', self.long_event_labels and 0x3039 or 0x0, self.label_block, b''))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read(self, handle): '''Read and parse binary header data from a file handle. This method reads exactly 512 bytes from the beginning of the given file handle. Parameters ---------- handle : file handle The given handle will be reset to 0 using `seek` and then 512 bytes will be read to initialize the attributes in this Header. The handle must be readable. Raises ------ AssertionError If the magic byte from the header is not 80 (the C3D magic value). ''' handle.seek(0) (self.parameter_block, magic, self.point_count, self.analog_count, self.first_frame, self.last_frame, self.max_gap, self.scale_factor, self.data_block, self.analog_per_frame, self.frame_rate, _, self.long_event_labels, self.label_block, _) = struct.unpack(self.BINARY_FORMAT, handle.read(512)) assert magic == 80, 'C3D magic {} != 80 !'.format(magic)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def binary_size(self): '''Return the number of bytes needed to store this parameter.''' return ( 1 + # group_id 2 + # next offset marker 1 + len(self.name.encode('utf-8')) + # size of name and name bytes 1 + # data size 1 + len(self.dimensions) + # size of dimensions and dimension bytes self.total_bytes + # data 1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write(self, group_id, handle): '''Write binary data for this parameter to a file handle. Parameters ---------- group_id : int The numerical ID of the group that holds this parameter. handle : file handle An open, writable, binary file handle. ''' name = self.name.encode('utf-8') handle.write(struct.pack('bb', len(name), group_id)) handle.write(name) handle.write(struct.pack('<h', self.binary_size() - 2 - len(name))) handle.write(struct.pack('b', self.bytes_per_element)) handle.write(struct.pack('B', len(self.dimensions))) handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions)) if self.bytes: handle.write(self.bytes) desc = self.desc.encode('utf-8') handle.write(struct.pack('B', len(desc))) handle.write(desc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read(self, handle): '''Read binary data for this parameter from a file handle. This reads exactly enough data from the current position in the file to initialize the parameter. ''' self.bytes_per_element, = struct.unpack('b', handle.read(1)) dims, = struct.unpack('B', handle.read(1)) self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)] self.bytes = b'' if self.total_bytes: self.bytes = handle.read(self.total_bytes) size, = struct.unpack('B', handle.read(1)) self.desc = size and handle.read(size).decode('utf-8') or ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _as_array(self, fmt): '''Unpack the raw bytes of this param using the given data format.''' assert self.dimensions, \ '{}: cannot get value as {} array!'.format(self.name, fmt) elems = array.array(fmt) elems.fromstring(self.bytes) return np.array(elems).reshape(self.dimensions)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def bytes_array(self): '''Get the param as an array of raw byte strings.''' assert len(self.dimensions) == 2, \ '{}: cannot get value as bytes array!'.format(self.name) l, n = self.dimensions return [self.bytes[i*l:(i+1)*l] for i in range(n)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def string_array(self): '''Get the param as a array of unicode strings.''' assert len(self.dimensions) == 2, \ '{}: cannot get value as string array!'.format(self.name) l, n = self.dimensions return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_param(self, name, **kwargs): '''Add a parameter to this group. Parameters ---------- name : str Name of the parameter to add to this group. The name will automatically be case-normalized. Additional keyword arguments will be passed to the `Param` constructor. ''' self.params[name.upper()] = Param(name.upper(), **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def binary_size(self): '''Return the number of bytes to store this group and its parameters.''' return ( 1 + # group_id 1 + len(self.name.encode('utf-8')) + # size of name and name bytes 2 + # next offset marker 1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes sum(p.binary_size() for p in self.params.values()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write(self, group_id, handle): '''Write this parameter group, with parameters, to a file handle. Parameters ---------- group_id : int The numerical ID of the group. handle : file handle An open, writable, binary file handle. ''' name = self.name.encode('utf-8') desc = self.desc.encode('utf-8') handle.write(struct.pack('bb', len(name), -group_id)) handle.write(name) handle.write(struct.pack('<h', 3 + len(desc))) handle.write(struct.pack('B', len(desc))) handle.write(desc) for param in self.params.values(): param.write(group_id, handle)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_metadata(self): '''Ensure that the metadata in our file is self-consistent.''' assert self.header.point_count == self.point_used, ( 'inconsistent point count! {} header != {} POINT:USED'.format( self.header.point_count, self.point_used, )) assert self.header.scale_factor == self.point_scale, ( 'inconsistent scale factor! {} header != {} POINT:SCALE'.format( self.header.scale_factor, self.point_scale, )) assert self.header.frame_rate == self.point_rate, ( 'inconsistent frame rate! {} header != {} POINT:RATE'.format( self.header.frame_rate, self.point_rate, )) ratio = self.analog_rate / self.point_rate assert True or self.header.analog_per_frame == ratio, ( 'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format( self.header.analog_per_frame, self.analog_rate, self.point_rate, )) count = self.analog_used * self.header.analog_per_frame assert True or self.header.analog_count == count, ( 'inconsistent analog count! {} header != {} analog used * {} per-frame'.format( self.header.analog_count, self.analog_used, self.header.analog_per_frame, )) start = self.get_uint16('POINT:DATA_START') assert self.header.data_block == start, ( 'inconsistent data block! {} header != {} POINT:DATA_START'.format( self.header.data_block, start)) for name in ('POINT:LABELS', 'POINT:DESCRIPTIONS', 'ANALOG:LABELS', 'ANALOG:DESCRIPTIONS'): if self.get(name) is None: warnings.warn('missing parameter {}'.format(name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_group(self, group_id, name, desc): '''Add a new parameter group. Parameters ---------- group_id : int The numeric ID for a group to check or create. name : str, optional If a group is created, assign this name to the group. desc : str, optional If a group is created, assign this description to the group. Returns ------- group : :class:`Group` A group with the given ID, name, and description. Raises ------ KeyError If a group with a duplicate ID or name already exists. ''' if group_id in self.groups: raise KeyError(group_id) name = name.upper() if name in self.groups: raise KeyError(name) group = self.groups[name] = self.groups[group_id] = Group(name, desc) return group
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get(self, group, default=None): '''Get a group or parameter. Parameters ---------- group : str If this string contains a period (.), then the part before the period will be used to retrieve a group, and the part after the period will be used to retrieve a parameter from that group. If this string does not contain a period, then just a group will be returned. default : any Return this value if the named group and parameter are not found. Returns ------- value : :class:`Group` or :class:`Param` Either a group or parameter with the specified name(s). If neither is found, returns the default value. ''' if isinstance(group, int): return self.groups.get(group, default) group = group.upper() param = None if '.' in group: group, param = group.split('.', 1) if ':' in group: group, param = group.split(':', 1) if group not in self.groups: return default group = self.groups[group] if param is not None: return group.get(param, default) return group
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _pad_block(self, handle): '''Pad the file with 0s to the end of the next block boundary.''' extra = handle.tell() % 512 if extra: handle.write(b'\x00' * (512 - extra))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _write_metadata(self, handle): '''Write metadata to a file handle. Parameters ---------- handle : file Write metadata and C3D motion frames to the given file handle. The writer does not close the handle. ''' self.check_metadata() # header self.header.write(handle) self._pad_block(handle) assert handle.tell() == 512 # groups handle.write(struct.pack( 'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL)) id_groups = sorted( (i, g) for i, g in self.groups.items() if isinstance(i, int)) for group_id, group in id_groups: group.write(group_id, handle) # padding self._pad_block(handle) while handle.tell() != 512 * (self.header.data_block - 1): handle.write(b'\x00' * 512)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _write_frames(self, handle): '''Write our frame data to the given file handle. Parameters ---------- handle : file Write metadata and C3D motion frames to the given file handle. The writer does not close the handle. ''' assert handle.tell() == 512 * (self.header.data_block - 1) scale = abs(self.point_scale) is_float = self.point_scale < 0 point_dtype = [np.int16, np.float32][is_float] point_scale = [scale, 1][is_float] point_format = 'if'[is_float] raw = np.empty((self.point_used, 4), point_dtype) for points, analog in self._frames: valid = points[:, 3] > -1 raw[~valid, 3] = -1 raw[valid, :3] = points[valid, :3] / self._point_scale raw[valid, 3] = ( ((points[valid, 4]).astype(np.uint8) << 8) | (points[valid, 3] / scale).astype(np.uint16) ) point = array.array(point_format) point.extend(raw.flatten()) point.tofile(handle) analog = array.array(point_format) analog.extend(analog) analog.tofile(handle) self._pad_block(handle)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _workaround_no_vector_images(project): """Replace vector images with fake ones."""
RED = (255, 0, 0) PLACEHOLDER = kurt.Image.new((32, 32), RED) for scriptable in [project.stage] + project.sprites: for costume in scriptable.costumes: if costume.image.format == "SVG": yield "%s - %s" % (scriptable.name, costume.name) costume.image = PLACEHOLDER
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_plugin(cls, name=None, **kwargs): """Returns the first format plugin whose attributes match kwargs. For example:: get_plugin(extension="scratch14") Will return the :class:`KurtPlugin` whose :attr:`extension <KurtPlugin.extension>` attribute is ``"scratch14"``. The :attr:`name <KurtPlugin.name>` is used as the ``format`` parameter to :attr:`Project.load` and :attr:`Project.save`. :raises: :class:`ValueError` if the format doesn't exist. :returns: :class:`KurtPlugin` """
if isinstance(name, KurtPlugin): return name if 'extension' in kwargs: kwargs['extension'] = kwargs['extension'].lower() if name: kwargs["name"] = name if not kwargs: raise ValueError, "No arguments" for plugin in cls.plugins.values(): for name in kwargs: if getattr(plugin, name) != kwargs[name]: break else: return plugin raise ValueError, "Unknown format %r" % kwargs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_up(scripts): """Clean up the given list of scripts in-place so none of the scripts overlap. """
scripts_with_pos = [s for s in scripts if s.pos] scripts_with_pos.sort(key=lambda s: (s.pos[1], s.pos[0])) scripts = scripts_with_pos + [s for s in scripts if not s.pos] y = 20 for script in scripts: script.pos = (20, y) if isinstance(script, kurt.Script): y += stack_height(script.blocks) elif isinstance(script, kurt.Comment): y += 14 y += 15
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def obj_classes_from_module(module): """Return a list of classes in a module that have a 'classID' attribute."""
for name in dir(module): if not name.startswith('_'): cls = getattr(module, name) if getattr(cls, 'classID', None): yield (name, cls)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_network(objects): """Return root object from ref-containing obj table entries"""
def resolve_ref(obj, objects=objects): if isinstance(obj, Ref): # first entry is 1 return objects[obj.index - 1] else: return obj # Reading the ObjTable backwards somehow makes more sense. for i in xrange(len(objects)-1, -1, -1): obj = objects[i] if isinstance(obj, Container): obj.update((k, resolve_ref(v)) for (k, v) in obj.items()) elif isinstance(obj, Dictionary): obj.value = dict( (resolve_ref(field), resolve_ref(value)) for (field, value) in obj.value.items() ) elif isinstance(obj, dict): obj = dict( (resolve_ref(field), resolve_ref(value)) for (field, value) in obj.items() ) elif isinstance(obj, list): obj = [resolve_ref(field) for field in obj] elif isinstance(obj, Form): for field in obj.value: value = getattr(obj, field) value = resolve_ref(value) setattr(obj, field, value) elif isinstance(obj, ContainsRefs): obj.value = [resolve_ref(field) for field in obj.value] objects[i] = obj for obj in objects: if isinstance(obj, Form): obj.built() root = objects[0] return root
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_obj_table(table_entries, plugin): """Return root of obj table. Converts user-class objects"""
entries = [] for entry in table_entries: if isinstance(entry, Container): assert not hasattr(entry, '__recursion_lock__') user_obj_def = plugin.user_objects[entry.classID] assert entry.version == user_obj_def.version entry = Container(class_name=entry.classID, **dict(zip(user_obj_def.defaults.keys(), entry.values))) entries.append(entry) return decode_network(entries)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode_obj_table(root, plugin): """Return list of obj table entries. Converts user-class objects"""
entries = encode_network(root) table_entries = [] for entry in entries: if isinstance(entry, Container): assert not hasattr(entry, '__recursion_lock__') user_obj_def = plugin.user_objects[entry.class_name] attrs = OrderedDict() for (key, default) in user_obj_def.defaults.items(): attrs[key] = entry.get(key, default) entry = Container(classID=entry.class_name, length=len(attrs), version=user_obj_def.version, values=attrs.values()) table_entries.append(entry) return table_entries
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _encode(self, obj, context): """Encodes a class to a lower-level object using the class' own to_construct function. If no such function is defined, returns the object unchanged. """
func = getattr(obj, 'to_construct', None) if callable(func): return func(context) else: return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _decode(self, obj, context): """Initialises a new Python class from a construct using the mapping passed to the adapter. """
cls = self._get_class(obj.classID) return cls.from_construct(obj, context)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_file(self, name, contents): """Write file contents string into archive."""
# TODO: find a way to make ZipFile accept a file object. zi = zipfile.ZipInfo(name) zi.date_time = time.localtime(time.time())[:6] zi.compress_type = zipfile.ZIP_DEFLATED zi.external_attr = 0777 << 16L self.zip_file.writestr(zi, contents)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_string(cls, width, height, rgba_string): """Returns a Form with 32-bit RGBA pixels Accepts string containing raw RGBA color values """
# Convert RGBA string to ARGB raw = "" for i in range(0, len(rgba_string), 4): raw += rgba_string[i+3] # alpha raw += rgba_string[i:i+3] # rgb assert len(rgba_string) == width * height * 4 return Form( width = width, height = height, depth = 32, bits = Bitmap(raw), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_file_path(self, path): """Update the file_path Entry widget"""
self.file_path.delete(0, END) self.file_path.insert(0, path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(cls, path, format=None): """Load project from file. Use ``format`` to specify the file format to use. Path can be a file-like object, in which case format is required. Otherwise, can guess the appropriate format from the extension. If you pass a file-like object, you're responsible for closing the file. :param path: Path or file pointer. :param format: :attr:`KurtFileFormat.name` eg. ``"scratch14"``. Overrides the extension. :raises: :class:`UnknownFormat` if the extension is unrecognised. :raises: :py:class:`ValueError` if the format doesn't exist. """
path_was_string = isinstance(path, basestring) if path_was_string: (folder, filename) = os.path.split(path) (name, extension) = os.path.splitext(filename) if format is None: plugin = kurt.plugin.Kurt.get_plugin(extension=extension) if not plugin: raise UnknownFormat(extension) fp = open(path, "rb") else: fp = path assert format, "Format is required" plugin = kurt.plugin.Kurt.get_plugin(format) if not plugin: raise ValueError, "Unknown format %r" % format project = plugin.load(fp) if path_was_string: fp.close() project.convert(plugin) if isinstance(path, basestring): project.path = path if not project.name: project.name = name return project
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self): """Return a new Project instance, deep-copying all the attributes."""
p = Project() p.name = self.name p.path = self.path p._plugin = self._plugin p.stage = self.stage.copy() p.stage.project = p for sprite in self.sprites: s = sprite.copy() s.project = p p.sprites.append(s) for actor in self.actors: if isinstance(actor, Sprite): p.actors.append(p.get_sprite(actor.name)) else: a = actor.copy() if isinstance(a, Watcher): if isinstance(a.target, Project): a.target = p elif isinstance(a.target, Stage): a.target = p.stage else: a.target = p.get_sprite(a.target.name) p.actors.append(a) p.variables = dict((n, v.copy()) for (n, v) in self.variables.items()) p.lists = dict((n, l.copy()) for (n, l) in self.lists.items()) p.thumbnail = self.thumbnail p.tempo = self.tempo p.notes = self.notes p.author = self.author return p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(self, format): """Convert the project in-place to a different file format. Returns a list of :class:`UnsupportedFeature` objects, which may give warnings about the conversion. :param format: :attr:`KurtFileFormat.name` eg. ``"scratch14"``. :raises: :class:`ValueError` if the format doesn't exist. """
self._plugin = kurt.plugin.Kurt.get_plugin(format) return list(self._normalize())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, path=None, debug=False): """Save project to file. :param path: Path or file pointer. If you pass a file pointer, you're responsible for closing it. If path is not given, the :attr:`path` attribute is used, usually the original path given to :attr:`load()`. If `path` has the extension of an existing plugin, the project will be converted using :attr:`convert`. Otherwise, the extension will be replaced with the extension of the current plugin. (Note that log output for the conversion will be printed to stdout. If you want to deal with the output, call :attr:`convert` directly.) If the path ends in a folder instead of a file, the filename is based on the project's :attr:`name`. :param debug: If true, return debugging information from the format plugin instead of the path. :raises: :py:class:`ValueError` if there's no path or name. :returns: path to the saved file. """
p = self.copy() plugin = p._plugin # require path p.path = path or self.path if not p.path: raise ValueError, "path is required" if isinstance(p.path, basestring): # split path (folder, filename) = os.path.split(p.path) (name, extension) = os.path.splitext(filename) # get plugin from extension if path: # only if not using self.path try: plugin = kurt.plugin.Kurt.get_plugin(extension=extension) except ValueError: pass # build output path if not name: name = _clean_filename(self.name) if not name: raise ValueError, "name is required" filename = name + plugin.extension p.path = os.path.join(folder, filename) # open fp = open(p.path, "wb") else: fp = p.path path = None if not plugin: raise ValueError, "must convert project to a format before saving" for m in p.convert(plugin): print m result = p._save(fp) if path: fp.close() return result if debug else p.path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stringify(self): """Returns the color value in hexcode format. eg. ``'#ff1056'`` """
hexcode = "#" for x in self.value: part = hex(x)[2:] if len(part) < 2: part = "0" + part hexcode += part return hexcode
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def options(self, scriptable=None): """Return a list of valid options to a menu insert, given a Scriptable for context. Mostly complete, excepting 'attribute'. """
options = list(Insert.KIND_OPTIONS.get(self.kind, [])) if scriptable: if self.kind == 'var': options += scriptable.variables.keys() options += scriptable.project.variables.keys() elif self.kind == 'list': options += scriptable.lists.keys() options += scriptable.project.lists.keys() elif self.kind == 'costume': options += [c.name for c in scriptable.costumes] elif self.kind == 'backdrop': options += [c.name for c in scriptable.project.stage.costumes] elif self.kind == 'sound': options += [c.name for c in scriptable.sounds] options += [c.name for c in scriptable.project.stage.sounds] elif self.kind in ('spriteOnly', 'spriteOrMouse', 'spriteOrStage', 'touching'): options += [s.name for s in scriptable.project.sprites] elif self.kind == 'attribute': pass # TODO elif self.kind == 'broadcast': options += list(set(scriptable.project.get_broadcasts())) return options
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def text(self): """The text displayed on the block. String containing ``"%s"`` in place of inserts. eg. ``'say %s for %s secs'`` """
parts = [("%s" if isinstance(p, Insert) else p) for p in self.parts] parts = [("%%" if p == "%" else p) for p in parts] # escape percent return "".join(parts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _strip_text(text): """Returns text with spaces and inserts removed."""
text = re.sub(r'[ ,?:]|%s', "", text.lower()) for chr in "-%": new_text = text.replace(chr, "") if new_text: text = new_text return text.lower()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_insert(self, shape): """Returns True if any of the inserts have the given shape."""
for insert in self.inserts: if insert.shape == shape: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_conversion(self, plugin, pbt): """Add a new PluginBlockType conversion. If the plugin already exists, do nothing. """
assert self.shape == pbt.shape assert len(self.inserts) == len(pbt.inserts) for (i, o) in zip(self.inserts, pbt.inserts): assert i.shape == o.shape assert i.kind == o.kind assert i.unevaluated == o.unevaluated if plugin not in self._plugins: self._plugins[plugin] = pbt