_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q278500
ANY_mentions
test
def ANY_mentions(target_mentions, chain_mentions): ''' For each name string in the target_mentions list, searches through all chain_mentions looking for any cleansed Token.token that contains the name. Returns True if any of the target_mention strings appeared as substrings of any cleansed Token.token. Otherwise, returns False. :type target_mentions: list of basestring :type chain_mentions: list
python
{ "resource": "" }
q278501
look_ahead_match
test
def look_ahead_match(rating, tokens): '''iterate through all tokens looking for matches of cleansed tokens or token regexes, skipping tokens left empty by cleansing and coping with Token objects that produce multiple space-separated strings when cleansed. Yields tokens that match. ''' ## this ensures that all cleansed tokens are non-zero length all_mregexes = [] for m in rating.mentions: mregexes = [] mpatterns = m.decode('utf8').split(' ') for mpat in mpatterns: if mpat.startswith('ur"^') and mpat.endswith('$"'): # is not regex ## chop out the meat of the regex so we can reconstitute it below mpat = mpat[4:-2] else: mpat = cleanse(mpat) if mpat: ## make a unicode raw string ## https://docs.python.org/2/reference/lexical_analysis.html#string-literals mpat = ur'^%s$' % mpat logger.debug('look_ahead_match compiling regex: %s', mpat) mregexes.append(re.compile(mpat, re.UNICODE | re.IGNORECASE)) if not mregexes: logger.warn('got empty cleansed mention: %r\nrating=%r' % (m, rating)) all_mregexes.append(mregexes) ## now that we have all_mregexes, go through all the tokens for i in range(len(tokens)): for mregexes in all_mregexes: if mregexes[0].match(tokens[i][0][0]): ## found the start of a possible match, so iterate ## through the tuples of cleansed strings for each ## Token while stepping through the cleansed strings ## for this mention. m_j = 1 i_j = 0 last_token_matched = 0 matched = True while m_j < len(mregexes): i_j += 1
python
{ "resource": "" }
q278502
multi_token_match
test
def multi_token_match(stream_item, aligner_data): ''' iterate through tokens looking for near-exact matches to strings in si.ratings...mentions ''' tagger_id = _get_tagger_id(stream_item, aligner_data) sentences = stream_item.body.sentences.get(tagger_id) if not sentences: return ## construct a list of tuples, where the first part of each tuple ## is a tuple of cleansed strings, and the second part is the ## Token object from which it came. tokens = map(lambda tok: (cleanse(tok.token.decode('utf8')).split(' '), tok), itertools.chain(*[sent.tokens for sent in sentences])) required_annotator_id = aligner_data['annotator_id'] for annotator_id, ratings in stream_item.ratings.items(): if (required_annotator_id is None) or (annotator_id == required_annotator_id): for rating in ratings: label = Label(annotator=rating.annotator, target=rating.target) num_tokens_matched = 0 for tok in look_ahead_match(rating, tokens):
python
{ "resource": "" }
q278503
TaggerBatchTransform.make_ner_file
test
def make_ner_file(self, clean_visible_path, ner_xml_path): '''run tagger a child process to get XML output''' if self.template is None: raise exceptions.NotImplementedError(''' Subclasses must specify a class property "template" that provides command string format for running a tagger. It should take %(tagger_root_path)s as the path from the config file, %(clean_visible_path)s as the input XML file, and %(ner_xml_path)s as the output path to create. ''') tagger_config = dict( tagger_root_path=self.config['tagger_root_path'], clean_visible_path=clean_visible_path, ner_xml_path=ner_xml_path) ## get a java_heap_size or default to 1GB tagger_config['java_heap_size'] = self.config.get('java_heap_size', '') cmd = self.template % tagger_config start_time = time.time() ## make sure we are using as little memory as possible gc.collect() try: self._child = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True) except OSError, exc: msg = traceback.format_exc(exc) msg += make_memory_info_msg(clean_visible_path, ner_xml_path) raise PipelineOutOfMemory(msg) s_out, errors = self._child.communicate() if not self._child.returncode == 0:
python
{ "resource": "" }
q278504
TaggerBatchTransform.align_chunk_with_ner
test
def align_chunk_with_ner(self, ner_xml_path, i_chunk, o_chunk): ''' iterate through ner_xml_path to fuse with i_chunk into o_chunk ''' ## prepare to iterate over the input chunk input_iter = i_chunk.__iter__() all_ner = xml.dom.minidom.parse(open(ner_xml_path)) ## this converts our UTF-8 data into unicode strings, so when ## we want to compute byte offsets or construct tokens, we ## must .encode('utf8') for ner_dom in all_ner.getElementsByTagName('FILENAME'): #for stream_id, raw_ner in files(open(ner_xml_path).read().decode('utf8')): stream_item = input_iter.next() ## get stream_id out of the XML stream_id = ner_dom.attributes.get('stream_id').value if stream_item.stream_id is None: assert not stream_id, 'out of sync: None != %r' % stream_id logger.critical('si.stream_id is None... ignoring') continue assert stream_id and stream_id == stream_item.stream_id, \ '%s != %s' % (stream_id, stream_item.stream_id) if not stream_item.body: ## the XML better have had an empty clean_visible too... #assert not ner_dom....something continue tagging = Tagging() tagging.tagger_id = self.tagger_id # pylint: disable=E1101 ''' ## get this one file out of its FILENAME tags tagged_doc_parts = list(files(ner_dom.toxml())) if not tagged_doc_parts: continue tagged_doc = tagged_doc_parts[0][1] ## hack hope_original = make_clean_visible(tagged_doc, '') open(ner_xml_path + '-clean', 'wb').write(hope_original.encode('utf-8')) print ner_xml_path + '-clean' ''' #tagging.raw_tagging = tagged_doc tagging.generation_time = streamcorpus.make_stream_time() stream_item.body.taggings[self.tagger_id] = tagging # pylint: disable=E1101 ## could consume lots of memory here by instantiating everything sentences, relations, attributes = self.get_sentences(ner_dom) stream_item.body.sentences[self.tagger_id] = sentences # pylint: disable=E1101 stream_item.body.relations[self.tagger_id] = relations # pylint: disable=E1101 stream_item.body.attributes[self.tagger_id] = attributes # pylint: disable=E1101
python
{ "resource": "" }
q278505
TaggerBatchTransform.shutdown
test
def shutdown(self): ''' send SIGTERM to the tagger child process ''' if self._child: try: self._child.terminate() except OSError, exc: if exc.errno == 3:
python
{ "resource": "" }
q278506
mult
test
def mult(p, n): """Returns a Pattern that matches exactly n repetitions of Pattern p. """ np = P() while n >= 1: if n % 2:
python
{ "resource": "" }
q278507
fix_emails
test
def fix_emails(text): '''Replace all angle bracket emails with a unique key.''' emails = bracket_emails.findall(text) keys = [] for email in emails: _email
python
{ "resource": "" }
q278508
nltk_tokenizer._sentences
test
def _sentences(self, clean_visible): 'generate strings identified as sentences' previous_end = 0 clean_visible = clean_visible.decode('utf8') for start, end in self.sentence_tokenizer.span_tokenize(clean_visible): # no need to check start, because the first byte of text # is always first byte of first sentence, and we will # have already made the previous sentence longer on the # end if there was an overlap. if start < previous_end: start = previous_end if start > end: # skip this sentence... because it was eaten by # an earlier sentence with a label continue
python
{ "resource": "" }
q278509
nltk_tokenizer.make_label_index
test
def make_label_index(self, stream_item): 'make a sortedcollection on body.labels' labels = stream_item.body.labels.get(self.annotator_id) if not labels: labels = [] self.label_index
python
{ "resource": "" }
q278510
nltk_tokenizer.make_sentences
test
def make_sentences(self, stream_item): 'assemble Sentence and Token objects' self.make_label_index(stream_item) sentences = [] token_num = 0 new_mention_id = 0 for sent_start, sent_end, sent_str in self._sentences( stream_item.body.clean_visible): assert isinstance(sent_str, unicode) sent = Sentence() sentence_pos = 0 for start, end in self.word_tokenizer.span_tokenize(sent_str): token_str = sent_str[start:end].encode('utf8') tok = Token( token_num=token_num, token=token_str, sentence_pos=sentence_pos, ) tok.offsets[OffsetType.CHARS] = Offset( type=OffsetType.CHARS, first=sent_start + start, length=end - start, ) # whitespace tokenizer will never get a token # boundary in the middle of an 'author' label try: label = self.label_index.find_le(sent_start + start) except ValueError: label = None if label: off = label.offsets[OffsetType.CHARS] if off.first + off.length > sent_start + start: streamcorpus.add_annotation(tok, label)
python
{ "resource": "" }
q278511
html_entities_to_unicode
test
def html_entities_to_unicode(text, space_padding=False, safe_only=False): ''' Convert any HTML, XML, or numeric entities in the attribute values. For example '&amp;' becomes '&'. This is adapted from BeautifulSoup, which should be able to do the same thing when called like this --- but this fails to convert everything for some bug. text = unicode(BeautifulStoneSoup(text, convertEntities=BeautifulStoneSoup.XML_ENTITIES)) ''' def convert_entities(match): ''' comes from BeautifulSoup.Tag._convertEntities ''' x = match.group(1) if safe_only and x not in ENTITIES_THAT_ARE_SAFE_TO_STRING_PAD: return u'&%s;' % x if x in name2codepoint: ## handles most cases return unichr(name2codepoint[x]) elif x in XML_ENTITIES_TO_SPECIAL_CHARS: return XML_ENTITIES_TO_SPECIAL_CHARS[x] elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) else: ## uh oh, failed to anything
python
{ "resource": "" }
q278512
make_cleansed_file
test
def make_cleansed_file(i_chunk, tmp_cleansed_path): '''make a temp file of cleansed text''' tmp_cleansed = open(tmp_cleansed_path, 'wb') for idx, si in enumerate(i_chunk): tmp_cleansed.write('<FILENAME docid="%s">\n' % si.stream_id) tmp_cleansed.write(si.body.cleansed)
python
{ "resource": "" }
q278513
make_ner_file
test
def make_ner_file(tagger_id, tmp_cleansed_path, tmp_ner_path, pipeline_root): '''run child process to get OWPL output''' params = dict(INPUT_FILE=tmp_cleansed_path, #RAW_OUTPUT_FILE=tmp_ner_raw_path, OUTPUT_FILE=tmp_ner_path, PIPELINE_ROOT=pipeline_root) pipeline_cmd = pipeline_cmd_templates[tagger_id] % params print pipeline_cmd ## replace this with log.info() print 'creating %s' % tmp_ner_path start_time = time.time() gpg_child = subprocess.Popen( pipeline_cmd, stderr=subprocess.PIPE, shell=True) s_out, errors = gpg_child.communicate() assert gpg_child.returncode == 0 and 'Exception' not in errors, errors elapsed = time.time() - start_time ## replace this with log.info() print 'created %s in %.1f sec' % (tmp_ner_path, elapsed) ''' postproc_cmd = postproc_cmd_templates[tagger_id] % params
python
{ "resource": "" }
q278514
cleanse
test
def cleanse(span): '''Convert a string of text into a lowercase string with no punctuation and only spaces for whitespace. :param span: string ''' try: ## attempt to force it to utf8, which might fail span = span.encode('utf8', 'ignore') except: pass ## lowercase, strip punctuation,
python
{ "resource": "" }
q278515
align_chunk_with_ner
test
def align_chunk_with_ner(tmp_ner_path, i_chunk, tmp_done_path): ''' iterate through the i_chunk and tmp_ner_path to generate a new Chunk with body.ner ''' o_chunk = Chunk() input_iter = i_chunk.__iter__() ner = '' stream_id = None all_ner = xml.dom.minidom.parse(open(tmp_ner_path)) for raw_ner in all_ner.getElementsByTagName('FILENAME'): stream_item = input_iter.next() ## get stream_id out of the XML stream_id = raw_ner.attributes.get('docid').value assert stream_id and stream_id == stream_item.stream_id, \ '%s != %s\nner=%r' % (stream_id, stream_item.stream_id, ner) tagger_id = 'lingpipe' tagging = Tagging() tagging.tagger_id = tagger_id ## get this one file out of its FILENAME tags tagged_doc = list(lingpipe.files(raw_ner.toxml()))[0][1] tagging.raw_tagging = tagged_doc tagging.generation_time = streamcorpus.make_stream_time() stream_item.body.taggings[tagger_id] = tagging sentences = list(lingpipe.sentences(tagged_doc)) ## make JS labels on individual tokens assert stream_item.ratings[0].mentions, stream_item.stream_id john_smith_label = Label() john_smith_label.annotator = stream_item.ratings[0].annotator john_smith_label.target_id = stream_item.ratings[0].target_id # first map all corefchains to their words equiv_ids = collections.defaultdict(lambda: set()) for sent in sentences: for tok in sent.tokens:
python
{ "resource": "" }
q278516
make_absolute_paths
test
def make_absolute_paths(config): '''given a config dict with streamcorpus_pipeline as a key, find all keys under streamcorpus_pipeline that end with "_path" and if the value of that key is a relative path, convert it to an absolute path using the value provided by root_path ''' if not 'streamcorpus_pipeline' in config: logger.critical('bad config: %r', config) raise ConfigurationError('missing "streamcorpus_pipeline" from config') ## remove the root_path, so it does not get extended itself root_path = config['streamcorpus_pipeline'].pop('root_path', None) if not root_path:
python
{ "resource": "" }
q278517
instantiate_config
test
def instantiate_config(config): '''setup the config and load external modules This updates 'config' as follows: * All paths are replaced with absolute paths * A hash and JSON dump of the config are stored in the config * If 'pythonpath' is in the config, it is added to sys.path * If 'setup_modules' is in the config, all modules named in it are loaded ''' make_absolute_paths(config) pipeline_config = config['streamcorpus_pipeline'] pipeline_config['config_hash'] = make_hash(config) pipeline_config['config_json'] = json.dumps(config) logger.debug('running config: {0} = {1!r}'
python
{ "resource": "" }
q278518
generate_john_smith_chunk
test
def generate_john_smith_chunk(path_to_original): ''' This _looks_ like a Chunk only in that it generates StreamItem instances when iterated upon. ''' ## Every StreamItem has a stream_time property. It usually comes ## from the document creation time. Here, we assume the JS corpus ## was created at one moment at the end of 1998: creation_time = '1998-12-31T23:59:59.999999Z' correct_time = 915148799 if not os.path.isabs(path_to_original): path_to_original = os.path.join(os.getcwd(), path_to_original) ## iterate over the files in the 35 input directories for label_id in range(35): dir_path = os.path.join(path_to_original, str(label_id)) fnames = os.listdir(dir_path) fnames.sort() for fname in fnames: stream_item = streamcorpus.make_stream_item( creation_time, ## make up an abs_url os.path.join( 'john-smith-corpus', str(label_id), fname)) if int(stream_item.stream_time.epoch_ticks) != correct_time: raise PipelineBaseException('wrong stream_time construction: %r-->%r != %r'\ % (creation_time, stream_item.stream_time.epoch_ticks, correct_time)) ## These docs came from the authors of the paper cited above. stream_item.source = 'bagga-and-baldwin' ## build a ContentItem for the body body = streamcorpus.ContentItem() raw_string = open(os.path.join(dir_path, fname)).read() ## We know that this is already clean and has nothing ## tricky in it, because we manually cleansed it. To ## illustrate how we stick all strings into thrift, we ## convert this to unicode (which introduces no changes) ## and then encode it as utf-8, which also introduces no ## changes. Thrift stores strings as 8-bit character ## strings.
python
{ "resource": "" }
q278519
re_based_make_clean_visible
test
def re_based_make_clean_visible(html): ''' Takes an HTML-like binary string as input and returns a binary string of the same length with all tags replaced by whitespace. This also detects script and style tags, and replaces the text between them with whitespace. Pre-existing whitespace of any kind (newlines, tabs) is converted to single spaces ' ', which has the same byte length (and character length). Note: this does not change any characters like &rsquo; and &nbsp;, so taggers operating on this text must cope with such symbols. Converting them to some other character would change their byte length, even if equivalent from a character perspective. This is regex based, which can occassionally just hang... ''' text = '' # Fix emails html
python
{ "resource": "" }
q278520
make_clean_visible
test
def make_clean_visible(_html, tag_replacement_char=' '): ''' Takes an HTML-like Unicode string as input and returns a UTF-8 encoded string with all tags replaced by whitespace. In particular, all Unicode characters inside HTML are replaced with a single whitespace character. This does not detect comments, style, script, link. It also does do anything with HTML-escaped characters. All of these are handled by the clean_html pre-cursor step. Pre-existing whitespace of any kind (newlines, tabs) is converted to single spaces ' ', which has the same byte length (and character length). This is a simple state machine iterator without regexes ''' def non_tag_chars(html): n = 0 while n < len(html): angle = html.find('<', n) if angle == -1: yield html[n:] n = len(html) break yield html[n:angle] n = angle while n < len(html): nl = html.find('\n', n) angle = html.find('>', n) if angle == -1: yield ' ' * (len(html) - n) n = len(html) break
python
{ "resource": "" }
q278521
make_clean_visible_file
test
def make_clean_visible_file(i_chunk, clean_visible_path): '''make a temp file of clean_visible text''' _clean = open(clean_visible_path, 'wb') _clean.write('<?xml version="1.0" encoding="UTF-8"?>') _clean.write('<root>') for idx, si in enumerate(i_chunk): if si.stream_id is None: # create the FILENAME element anyway, so the ordering # remains the same as the i_chunk and can be aligned. stream_id = '' else: stream_id = si.stream_id doc = lxml.etree.Element("FILENAME", stream_id=stream_id) if si.body and si.body.clean_visible: try: # is UTF-8, and etree wants .text to be unicode doc.text = si.body.clean_visible.decode('utf8') except ValueError: doc.text = drop_invalid_and_upper_utf8_chars( si.body.clean_visible.decode('utf8')) except Exception, exc:
python
{ "resource": "" }
q278522
cleanse
test
def cleanse(span, lower=True): '''Convert a unicode string into a lowercase string with no punctuation and only spaces for whitespace. Replace PennTreebank escaped brackets with ' ': -LRB- -RRB- -RSB- -RSB- -LCB- -RCB- (The acronyms stand for (Left|Right) (Round|Square|Curly) Bracket.) http://www.cis.upenn.edu/~treebank/tokenization.html :param span: string ''' assert isinstance(span, unicode), \ 'got non-unicode string %r' % span #
python
{ "resource": "" }
q278523
main
test
def main(): '''manual test loop for make_clean_visible_from_raw ''' import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('path') args = parser.parse_args() html = open(args.path).read() html = html.decode('utf8') cursor = 0 for s in non_tag_chars_from_raw(html): for c
python
{ "resource": "" }
q278524
StageRegistry.tryload_stage
test
def tryload_stage(self, moduleName, functionName, name=None): '''Try to load a stage into self, ignoring errors. If loading a module fails because of some subordinate load failure, just give a warning and move on. On success the stage is added to the stage dictionary. :param str moduleName: name of the Python module :param str functionName: name of the stage constructor :param str name: name of the stage, defaults to `functionName` ''' if name is None: name = functionName try: mod = __import__(moduleName, globals(), locals(), [functionName])
python
{ "resource": "" }
q278525
StageRegistry.load_external_stages
test
def load_external_stages(self, path): '''Add external stages from the Python module in `path`. `path` must be a path to a Python module source that
python
{ "resource": "" }
q278526
StageRegistry.load_module_stages
test
def load_module_stages(self, mod): '''Add external stages from the Python module `mod`. If `mod` is a string, then it will be interpreted as the name of a module; otherwise it is an actual module object. The module should exist somewhere in :data:`sys.path`. The module must contain a `Stages` dictionary, which is a map from stage name to callable. :param mod: name of the module or the module itself :raise exceptions.ImportError: if `mod` cannot be loaded or does not contain ``Stages``
python
{ "resource": "" }
q278527
StageRegistry.init_stage
test
def init_stage(self, name, config): '''Construct and configure a stage from known stages. `name` must be the name of one of the stages in this. `config` is the configuration dictionary of the containing object, and its `name` member will be passed into the stage constructor. :param str name: name of the stage
python
{ "resource": "" }
q278528
read_to
test
def read_to( idx_bytes, stop_bytes=None, run_bytes=None ): ''' iterates through idx_bytes until a byte in stop_bytes or a byte not in run_bytes. :rtype (int, string): idx of last byte and all of bytes including the terminal byte from stop_bytes or not in run_bytes ''' idx = None vals = [] next_b = None while 1: try: idx, next_b = idx_bytes.next() except StopIteration: ## maybe something going wrong? idx = None next_b = None
python
{ "resource": "" }
q278529
hyperlink_labels.href_filter
test
def href_filter(self, href): ''' Test whether an href string meets criteria specified by configuration parameters 'require_abs_url', which means "does it look like it is probably an absolute URL?" and 'domain_substrings'. It searches for each of the domain_substrings in the href individually, and if any match, then returns True. :param: href string :returns bool: ''' if self.config['require_abs_url']:
python
{ "resource": "" }
q278530
hyperlink_labels.make_labels
test
def make_labels(self, clean_html, clean_visible=None): ''' Make a list of Labels for 'author' and the filtered hrefs & anchors ''' if self.offset_type == OffsetType.BYTES: parser = self.byte_href_anchors elif self.offset_type == OffsetType.CHARS: parser = self.char_href_anchors elif self.offset_type == OffsetType.LINES: parser = self.line_href_anchors labels = [] ## make clean_html accessible as a class property so we can self.clean_html = clean_html for href, first, length, value in parser(): if self.href_filter(href): ''' if clean_visible: _check_html = self.clean_html.splitlines()[first-10:10+first+length] _check_visi = clean_visible.splitlines()[first:first+length] if not make_clean_visible(_check_html) == _check_visi: print len(self.clean_html.splitlines()) print len(clean_visible.splitlines()) print href
python
{ "resource": "" }
q278531
paths
test
def paths(input_dir): 'yield all file paths under input_dir' for root, dirs, fnames in os.walk(input_dir): for i_fname in fnames:
python
{ "resource": "" }
q278532
Cassa.tasks
test
def tasks(self, key_prefix=''): ''' generate the data objects for every task ''' for row in self._tasks.get_range(): logger.debug(row) if not row[0].startswith(key_prefix):
python
{ "resource": "" }
q278533
Cassa.get_random_available
test
def get_random_available(self, max_iter=10000): ''' get a random key out of the first max_iter rows ''' c = 1 keeper = None ## note the ConsistencyLevel here. If we do not do this, and ## get all slick with things like column_count=0 and filter ## empty False, then we can get keys that were recently ## deleted... EVEN if the default consistency would seem to ## rule that out! ## note the random start key, so that we do not always hit the ## same place in the key range with all workers #random_key = hashlib.md5(str(random.random())).hexdigest() #random_key = '0' * 32 #logger.debug('available.get_range(%r)' % random_key) ## scratch that idea: turns out that using a random start key ## OR using row_count=1 can cause get_range to hang for hours ## why we need ConsistencyLevel.ALL on a single node is not ## clear, but experience indicates it is needed. ## note that putting a finite row_count is problematic in two ## ways: # 1) if there are more workers than max_iter, some will not # get tasks # # 2) if there
python
{ "resource": "" }
q278534
LingPipeParser.tokens
test
def tokens(self, sentence_dom): ''' Tokenize all the words and preserve NER labels from ENAMEX tags ''' ## keep track of sentence position, which is reset for each ## sentence, and used above in _make_token self.sent_pos = 0 ## keep track of mention_id, so we can distinguish adjacent ## multi-token mentions within the same coref chain mention_id = 0 while len(sentence_dom.childNodes) > 0: ## shrink the sentence_dom's child nodes. In v0_2_0 this ## was required to cope with HitMaxi16. Now it is just to ## save memory. node = sentence_dom.childNodes.pop(0) if node.nodeType == node.TEXT_NODE: ## process portion before an ENAMEX tag for line in node.data.splitlines(True): self._input_string = line for start, end in self.word_tokenizer.span_tokenize(line): tok = self._make_token(start, end) if tok: yield tok if line.endswith('\n'): ## maintain the index to the current line self.line_idx += 1 ## increment index pasat the 'before' portion self.byte_idx += len(line.encode('utf-8')) else: ## process text inside an ENAMEX tag
python
{ "resource": "" }
q278535
lingpipe.get_sentences
test
def get_sentences(self, ner_dom): '''parse the sentences and tokens out of the XML''' lp_parser = LingPipeParser(self.config) lp_parser.set(ner_dom) sentences
python
{ "resource": "" }
q278536
_retry
test
def _retry(func): ''' Decorator for methods that need many retries, because of intermittent failures, such as AWS calls via boto, which has a non-back-off retry. ''' def retry_func(self, *args, **kwargs): tries = 1 while True: # If a handler allows execution to continue, then # fall through and do a back-off retry. try: return func(self, *args, **kwargs) break except OSError as exc: ## OSError: [Errno 24] Too many open files logger.error('assuming OSError unrecoverable') raise except FailedExtraction as exc: ## pass through exc to caller logger.error('FAIL(%d)', tries, exc_info=True) raise except FailedVerification as exc: logger.warn('FAIL(%d)', tries, exc_info=True) if tries >= self.config['tries']:
python
{ "resource": "" }
q278537
verify_md5
test
def verify_md5(md5_expected, data, other_errors=None): "return True if okay, raise Exception if not" # O_o ? md5_recv = hashlib.md5(data).hexdigest() if md5_expected != md5_recv: if other_errors is not None: logger.critical('\n'.join(other_errors))
python
{ "resource": "" }
q278538
get_bucket
test
def get_bucket(config, bucket_name=None): '''This function is mostly about managing configuration, and then finally returns a boto.Bucket object. AWS credentials come first from config keys aws_access_key_id_path, aws_secret_access_key_path (paths to one line files); secondly from environment variables AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY; also from $HOME/.aws/credentials or the magic Amazon http://169.254.169.254/ service. If credentials are not set in the config then behavior is the same as other AWS-based command-line tools. ''' if not bucket_name: if 'bucket' not in config: raise ConfigurationError( 'The "bucket" parameter is required for the s3 stages.') bucket_name = config['bucket'] # get AWS credentials. first, from config; else, from env vars.
python
{ "resource": "" }
q278539
from_s3_chunks._decode
test
def _decode(self, data): ''' Given the raw data from s3, return a generator for the items contained in that data. A generator is necessary to support chunk files, but non-chunk files can be provided by a generator that yields exactly one item. Decoding works by case analysis on the config option ``input_format``. If an invalid ``input_format`` is given, then a ``ConfigurationError`` is raised. ''' informat = self.config['input_format'].lower() if informat == 'spinn3r': return _generate_stream_items(data) elif informat == 'streamitem': ver = self.config['streamcorpus_version'] if ver not in _message_versions: raise ConfigurationError( 'Not a valid streamcorpus version: %s ' '(choose from: %s)'
python
{ "resource": "" }
q278540
from_s3_chunks.get_chunk
test
def get_chunk(self, bucket_name, key_path): '''return Chunk object full of records bucket_name may be None''' bucket = get_bucket(self.config, bucket_name=bucket_name) key = bucket.get_key(key_path) if key is None: raise FailedExtraction('Key "%s" does not exist.' % key_path) fh = StringIO() key.get_contents_to_file(fh) data = fh.getvalue() if not data: raise FailedExtraction('%s: no data (does the key exist?)' % key.key) chunk_type, compression, encryption = parse_file_extensions(key_path) if encryption == 'gpg': if not self.gpg_decryption_key_path: raise FailedExtraction('%s ends with ".gpg" but gpg_decryption_key_path=%s' % (key.key, self.gpg_decryption_key_path)) _errors = [] if compression or encryption: _errors, data
python
{ "resource": "" }
q278541
stream_id_to_kvlayer_key
test
def stream_id_to_kvlayer_key(stream_id): '''Convert a text stream ID to a kvlayer key. The return tuple can be used directly as a key in the :data:`STREAM_ITEMS_TABLE` table. :param str stream_id: stream ID to convert :return: :mod:`kvlayer` key tuple :raise exceptions.KeyError: if `stream_id` is malformed ''' # Reminder: stream_id is 1234567890-123456789abcdef...0
python
{ "resource": "" }
q278542
kvlayer_key_to_stream_id
test
def kvlayer_key_to_stream_id(k): '''Convert a kvlayer key to a text stream ID. `k` should be of the same form produced by :func:`stream_id_to_kvlayer_key`.
python
{ "resource": "" }
q278543
key_for_stream_item
test
def key_for_stream_item(si): '''Get a kvlayer key from a stream item. The return tuple can be used directly as a key in the :data:`STREAM_ITEMS_TABLE` table. Note that this recalculates the stream ID, and if the internal data on the stream item is inconsistent then this could return a different result from :func:`stream_id_to_kvlayer_key`. :param si: stream item to get key
python
{ "resource": "" }
q278544
main
test
def main(argv=sys.argv): args = parse(argv) """Serve up some ponies.""" hostname = args.listen port = args.port print( "Making all your dreams for a pony come true on http://{0}:{1}.\n" "Press Ctrl+C to quit.\n".format(hostname, port))
python
{ "resource": "" }
q278545
build_parser
test
def build_parser(): """Build the parser that will have all available commands and options.""" description = ( 'HTTPony (pronounced aych-tee-tee-pony) is a simple HTTP ' 'server that pretty prints HTTP requests to a terminal. It ' 'is a useful aide for developing clients that send HTTP ' 'requests. HTTPony acts as a sink for a client so that a ' 'developer can understand
python
{ "resource": "" }
q278546
add_xpaths_to_stream_item
test
def add_xpaths_to_stream_item(si): '''Mutably tag tokens with xpath offsets. Given some stream item, this will tag all tokens from all taggings in the document that contain character offsets. Note that some tokens may not have computable xpath offsets, so an xpath offset for those tokens will not be set. (See the documentation and comments for ``char_offsets_to_xpaths`` for what it means for a token to have a computable xpath.) If a token can have its xpath offset computed, it is added to its set of offsets with a ``OffsetType.XPATH_CHARS`` key. ''' def sentences_to_xpaths(sentences):
python
{ "resource": "" }
q278547
sentences_to_char_tokens
test
def sentences_to_char_tokens(si_sentences): '''Convert stream item sentences to character ``Offset``s.''' for sentence in si_sentences: for token
python
{ "resource": "" }
q278548
char_tokens_to_char_offsets
test
def char_tokens_to_char_offsets(si_tokens): '''Convert character ``Offset``s to character ranges.''' for token in si_tokens: offset =
python
{ "resource": "" }
q278549
char_offsets_to_xpaths
test
def char_offsets_to_xpaths(html, char_offsets): '''Converts HTML and a sequence of char offsets to xpath offsets. Returns a generator of :class:`streamcorpus.XpathRange` objects in correspondences with the sequence of ``char_offsets`` given. Namely, each ``XpathRange`` should address precisely the same text as that ``char_offsets`` (sans the HTML). Depending on how ``char_offsets`` was tokenized, it's possible that some tokens cannot have their xpaths generated reliably. In this case, a ``None`` value is yielded instead of a ``XpathRange``. ``char_offsets`` must be a sorted and non-overlapping sequence of character ranges. They do not have to be contiguous. ''' html = uni(html) parser = XpathTextCollector() prev_end = 0 prev_progress = True for start, end in char_offsets: if start == end: # Zero length tokens shall have no quarter! # Note that this is a special case. If we let zero-length tokens # be handled normally, then it will be recorded as if the parser # did not make any progress. But of course, there is no progress # to be had! yield None continue # If we didn't make any progress on the previous token, then we'll # need to try and make progress before we can start tracking offsets # again. Otherwise the parser will report incorrect offset info. # # (The parser can fail to make progress when tokens are split at # weird boundaries, e.g., `&amp` followed by `;`. The parser won't # make progress after `&amp` but will once `;` is given.) # # Here, we feed the parser one character at a time between where the # last token ended and where the next token will start. In most cases, # this will be enough to nudge the parser along. Once done, we can pick # up where we left off and start handing out offsets again. # # If this still doesn't let us make progress, then we'll have to skip # this token too. if not prev_progress: for i in xrange(prev_end, start): parser.feed(html[i]) prev_end += 1 if parser.made_progress: break
python
{ "resource": "" }
q278550
DepthStackEntry.add_element
test
def add_element(self, tag): '''Record that `tag` has been seen at this depth. If `tag` is :class:`TextElement`, it records a text node. ''' # Collapse adjacent text nodes if tag is TextElement and self.last_tag is TextElement:
python
{ "resource": "" }
q278551
DepthStackEntry.xpath_piece
test
def xpath_piece(self): '''Get an XPath fragment for this location. It is of the form ``tag[n]`` where `tag` is the most recent element added and n is its position. ''' if self.last_tag is TextElement: return 'text()[{count}]'.format(count=self.text_index())
python
{ "resource": "" }
q278552
DepthStackEntry.text_index
test
def text_index(self): '''Returns the one-based index of the current text node.''' # This is the number of text nodes we've seen so far. # If
python
{ "resource": "" }
q278553
descendants
test
def descendants(elem): ''' Yields all the elements descendant of elem in document order ''' for child in elem.xml_children:
python
{ "resource": "" }
q278554
select_elements
test
def select_elements(source): ''' Yields all the elements from the source source - if an element, yields all child elements in order; if any other iterator yields the elements from that iterator '''
python
{ "resource": "" }
q278555
select_name
test
def select_name(source, name): ''' Yields all the elements with the given name source - if an element, starts with all child elements in order; can also be any other iterator name - will yield
python
{ "resource": "" }
q278556
select_name_pattern
test
def select_name_pattern(source, pat): ''' Yields elements from the source whose name matches the given regular expression pattern source - if an element, starts with all child elements in order; can also be any other iterator
python
{ "resource": "" }
q278557
select_attribute
test
def select_attribute(source, name, val=None): ''' Yields elements from the source having the given attrivute, optionally with the given attribute value source - if an element, starts with all child elements in order; can also be any other iterator name - attribute name to check val - if None check only for the existence of the attribute, otherwise compare the given value as well
python
{ "resource": "" }
q278558
following_siblings
test
def following_siblings(elem): ''' Yields elements and text which have the same parent as elem, but come afterward in document order ''' it = itertools.dropwhile(lambda x: x
python
{ "resource": "" }
q278559
make_pretty
test
def make_pretty(elem, depth=0, indent=' '): ''' Add text nodes as possible to all descendants of an element for spacing & indentation to make the MicroXML as printed easier for people to read. Will not modify the value of any text node which is not already entirely whitespace. Warning: even though this operaton avoids molesting text nodes which already have whitespace, it still makes changes which alter the text. Not all whitespace in XML is ignorable. In XML cues from the DTD indicate which whitespace can be ignored. No such cues are available for MicroXML, so use this function with care. That said, in many real world applications of XML and MicroXML, this function causes no problems. elem - target element whose descendant nodes are to be modified. returns - the same element, which has been updated in place >>> from amara3.uxml import tree >>> from amara3.uxml.treeutil import * >>> DOC = '<a><b><x>1</x></b><c><x>2</x><d><x>3</x></d></c><x>4</x><y>5</y></a>' >>> tb = tree.treebuilder() >>> root = tb.parse(DOC) >>> len(root.xml_children) 4 >>> make_pretty(root) <uxml.element (8763373718343) "a" with 9 children> >>> len(root.xml_children) 9 >>> root.xml_encode() '<a>\n <b>\n <x>1</x>\n </b>\n <c>\n <x>2</x>\n <d>\n <x>3</x>\n </d>\n </c>\n <x>4</x>\n <y>5</y>\n</a>' ''' depth += 1 updated_child_list = [] updated_child_ix = 0 for child in elem.xml_children: if isinstance(child, element): if updated_child_ix % 2: updated_child_list.append(child) updated_child_ix += 1 else: #It's the turn for text, but we have an element new_text = text('\n' + indent*depth, elem) updated_child_list.append(new_text) updated_child_list.append(child) updated_child_ix += 2
python
{ "resource": "" }
q278560
call_inkscape
test
def call_inkscape(args_strings, inkscape_binpath=None): """Call inkscape CLI with arguments and returns its return value. Parameters ---------- args_string: list of str inkscape_binpath: str Returns ------- return_value Inkscape command CLI call return value. """ log.debug('Looking for the binary file for inkscape.') if inkscape_binpath is None:
python
{ "resource": "" }
q278561
inkscape_export
test
def inkscape_export(input_file, output_file, export_flag="-A", dpi=90, inkscape_binpath=None): """ Call Inkscape to export the input_file to output_file using the specific export argument flag for the output file type. Parameters ---------- input_file: str Path to the input file output_file: str Path to the output file export_flag: str Inkscape CLI flag to indicate the type of the output file Returns ------- return_value Command call return value """ if not os.path.exists(input_file): log.error('File {} not found.'.format(input_file)) raise IOError((0, 'File not found.', input_file))
python
{ "resource": "" }
q278562
svg2pdf
test
def svg2pdf(svg_file_path, pdf_file_path, dpi=150, command_binpath=None, support_unicode=False): """ Transform SVG file to PDF file """ if support_unicode: return rsvg_export(svg_file_path, pdf_file_path, dpi=dpi, rsvg_binpath=command_binpath)
python
{ "resource": "" }
q278563
svg2png
test
def svg2png(svg_file_path, png_file_path, dpi=150, inkscape_binpath=None): """ Transform SVG file to PNG file """
python
{ "resource": "" }
q278564
get_environment_for
test
def get_environment_for(file_path): """Return a Jinja2 environment for where file_path is. Parameters ---------- file_path: str Returns ------- jinja_env: Jinja2.Environment """ work_dir = os.path.dirname(os.path.abspath(file_path)) if not os.path.exists(work_dir):
python
{ "resource": "" }
q278565
TextDocument._setup_template_file
test
def _setup_template_file(self, template_file_path): """ Setup self.template Parameters ---------- template_file_path: str Document template file path. """ try: template_file = template_file_path
python
{ "resource": "" }
q278566
TextDocument.fill
test
def fill(self, doc_contents): """ Fill the content of the document with the information in doc_contents. Parameters ---------- doc_contents: dict Set of values to set the template document. Returns ------- filled_doc: str The content of the document with the template information filled. """ try: filled_doc = self.template.render(**doc_contents) except:
python
{ "resource": "" }
q278567
TextDocument.save_content
test
def save_content(self, file_path, encoding='utf-8'): """ Save the content of the .txt file in a text file. Parameters ---------- file_path: str Path to the output file. """ if self.file_content_ is None: msg = 'Template content has not been updated. \ Please fill the template before rendering it.' log.exception(msg) raise ValueError(msg) try:
python
{ "resource": "" }
q278568
TextDocument.from_template_file
test
def from_template_file(cls, template_file_path, command=None): """ Factory function to create a specific document of the class given by the `command` or the extension of `template_file_path`. See get_doctype_by_command and get_doctype_by_extension. Parameters ---------- template_file_path: str command: str Returns ------- doc """ # get template file extension
python
{ "resource": "" }
q278569
SVGDocument.fill
test
def fill(self, doc_contents): """ Fill the content of the document with the information in doc_contents. This is different from the TextDocument fill function, because this will check for symbools in the values of `doc_content` and replace them to good XML codes before filling the template. Parameters ---------- doc_contents: dict Set of values to set the template document. Returns -------
python
{ "resource": "" }
q278570
SVGDocument.render
test
def render(self, file_path, **kwargs): """ Save the content of the .svg file in the chosen rendered format. Parameters ---------- file_path: str Path to the output file. Kwargs ------ file_type: str Choices: 'png', 'pdf', 'svg' Default: 'pdf' dpi: int Dots-per-inch for the png and pdf. Default: 150 support_unicode: bool Whether to allow unicode to be encoded in the PDF. Default: False """ temp = get_tempfile(suffix='.svg') self.save_content(temp.name) file_type = kwargs.get('file_type', 'pdf') dpi = kwargs.get('dpi', 150)
python
{ "resource": "" }
q278571
LateXDocument.render
test
def render(self, file_path, **kwargs): """ Save the content of the .text file in the PDF. Parameters ---------- file_path: str Path to the output file. """ temp = get_tempfile(suffix='.tex') self.save_content(temp.name)
python
{ "resource": "" }
q278572
parse
test
def parse(source, handler): ''' Convert XML 1.0 to MicroXML source - XML 1.0 input handler - MicroXML events handler Returns uxml, extras uxml - MicroXML element extracted from the source extras - information to be preserved but not part of MicroXML, e.g. namespaces ''' h = expat_callbacks(handler) p = xml.parsers.expat.ParserCreate(namespace_separator=' ') p.StartElementHandler = h.start_element
python
{ "resource": "" }
q278573
parse
test
def parse(source, prefixes=None, model=None, encoding=None, use_xhtml_ns=False): ''' Parse an input source with HTML text into an Amara 3 tree >>> from amara3.uxml import html5 >>> import urllib.request >>> with urllib.request.urlopen('http://uche.ogbuji.net/') as response: ... html5.parse(response) #Warning: if you pass a string, you must make sure it's a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if it's not obviously XML or HTML (for example it could be confused with a file name) ''' def get_tree_instance(namespaceHTMLElements, use_xhtml_ns=use_xhtml_ns):
python
{ "resource": "" }
q278574
markup_fragment
test
def markup_fragment(source, encoding=None): ''' Parse a fragment if markup in HTML mode, and return a bindery node Warning: if you pass a string, you must make sure it's a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if it's not obviously XML or HTML (for example it could be confused with a file name) from amara.lib import inputsource from amara.bindery import html
python
{ "resource": "" }
q278575
node.insertText
test
def insertText(self, data, insertBefore=None): """Insert data as text in the current node, positioned before the start of node insertBefore or to the end of the node's text. """ if insertBefore:
python
{ "resource": "" }
q278576
node.insertBefore
test
def insertBefore(self, node, refNode): """Insert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of
python
{ "resource": "" }
q278577
element.cloneNode
test
def cloneNode(self): """Return a shallow copy of the current node i.e. a node with the same name and attributes but with no parent or child nodes
python
{ "resource": "" }
q278578
execute
test
def execute(option): '''A script that melody calls with each valid set of options. This script runs the required code and returns the results.''' namelist_option = [] makefile_option = [] flags = "" for entry in option: key = entry.keys()[0] if key == "Problem Size": namelist_option.append({"SIZE": entry[key]}) elif key == "F90": makefile_option.append(entry) else: flags += entry[key] + " " makefile_option.append({"F90FLAGS": flags}) namelist = create_input(namelist_option, "namelist", template_location="templates") makefile_include = create_input(makefile_option, "Makefile.include", template_location="templates") benchmark_base = "shallow" # save the input files in the appropriate place location = benchmark_base + "/original/namelist" my_file = open(location, 'w') my_file.write(namelist) my_file.flush() location = benchmark_base + "/common/Makefile.include" my_file = open(location, 'w') my_file.write(makefile_include) my_file.flush() # compile shallow if required base_path = benchmark_base + "/original" import subprocess make_process = subprocess.Popen(["make", "clean"], cwd=base_path, stderr=subprocess.PIPE, stdout=subprocess.PIPE) if
python
{ "resource": "" }
q278579
strval
test
def strval(node, outermost=True): ''' XPath-like string value of node ''' if not isinstance(node, element): return node.xml_value if outermost else [node.xml_value] accumulator = [] for child in node.xml_children: if isinstance(child, text): accumulator.append(child.xml_value)
python
{ "resource": "" }
q278580
element.xml_insert
test
def xml_insert(self, child, index=-1): ''' Append a node as the last child child - the child to append. If a string, convert to a text node, for convenience ''' if isinstance(child, str): child = text(child, parent=self) else:
python
{ "resource": "" }
q278581
parse_config
test
def parse_config(options): """ Get settings from config file. """ if os.path.exists(options.config): config = ConfigParser.ConfigParser() try: config.read(options.config) except Exception, err: if not options.quiet: sys.stderr.write("ERROR: Config file read {config} error. {err}".format(config=options.config, err=err)) sys.exit(-1) try: configdata = { "secrets": config.get("GOOGLE", "secrets"), "credentials": config.get("nagios-notification-google-calendar", "credentials"), "start": config.get("nagios-notification-google-calendar", "start"),
python
{ "resource": "" }
q278582
get_google_credentials
test
def get_google_credentials(options, config): """ Get google API credentials for user. """ try: if options.get_google_credentials: flow = flow_from_clientsecrets(config["secrets"], scope=SCOPE, redirect_uri="oob") sys.stdout.write("Follow this URL: {url} and grant access to calendar.\n".format(url=flow.step1_get_authorize_url())) token = raw_input("Enter token:") credentials = flow.step2_exchange(token) storage = Storage(os.path.join(config["credentials"], "{username}.json".format(username=options.username))) storage.put(credentials) credentials.set_store(storage) else:
python
{ "resource": "" }
q278583
create_event_datetimes
test
def create_event_datetimes(options, config): """ Create event start and end datetimes. """ now = datetime.datetime.now() return { "start": { "dateTime": (now + datetime.timedelta(minutes=int(config["start"]))).strftime(DT_FORMAT), "timeZone": options.timezone, }, "end": {
python
{ "resource": "" }
q278584
create_event
test
def create_event(options, config, credentials): """ Create event in calendar with sms reminder. """ try: http = credentials.authorize(httplib2.Http()) service = build("calendar", "v3", http=http) event = { "summary": options.message, "location": "", "reminders": { "useDefault": False, "overrides": [ { "method": "sms", "minutes": config["message"], }, ], }
python
{ "resource": "" }
q278585
main
test
def main(): """ Processing notification call main function. """ # getting info for creating event options = parse_options() config = parse_config(options)
python
{ "resource": "" }
q278586
get_extension
test
def get_extension(filepath, check_if_exists=False): """Return the extension of fpath. Parameters ---------- fpath: string File name or path check_if_exists: bool Returns ------- str The extension of the file name or path """ if check_if_exists: if not os.path.exists(filepath):
python
{ "resource": "" }
q278587
add_extension_if_needed
test
def add_extension_if_needed(filepath, ext, check_if_exists=False): """Add the extension ext to fpath if it doesn't have it. Parameters ---------- filepath: str File name or path ext: str File extension check_if_exists: bool Returns ------- File name or path with extension added, if needed. """ if not
python
{ "resource": "" }
q278588
get_tempfile
test
def get_tempfile(suffix='.txt', dirpath=None): """ Return a temporary file with the given suffix within dirpath. If dirpath is None, will look for a temporary folder in your system. Parameters ---------- suffix: str Temporary file name suffix dirpath: str Folder path where create the temporary file
python
{ "resource": "" }
q278589
cleanup
test
def cleanup(workdir, extension): """ Remove the files in workdir that have the given extension. Parameters ---------- workdir:
python
{ "resource": "" }
q278590
csv_to_json
test
def csv_to_json(csv_filepath, json_filepath, fieldnames, ignore_first_line=True): """ Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`. Parameters ---------- csv_filepath: str Path to the input CSV file. json_filepath: str Path to the output JSON file. Will be overwritten if exists.
python
{ "resource": "" }
q278591
replace_file_content
test
def replace_file_content(filepath, old, new, max=1): """ Modify the content of `filepath`, replacing `old` for `new`. Parameters ---------- filepath: str Path to the file to be modified. It will be overwritten. old: str This is old substring to be replaced. new: str This is new substring, which would replace old substring.
python
{ "resource": "" }
q278592
CopyDoc.parse
test
def parse(self): """ Run all parsing functions. """ for tag in self.soup.findAll('span'): self.create_italic(tag) self.create_strong(tag) self.create_underline(tag) self.unwrap_span(tag) for tag in self.soup.findAll('a'): self.remove_comments(tag) self.check_next(tag) if self.soup.body: for tag in self.soup.body.findAll(): self.remove_empty(tag)
python
{ "resource": "" }
q278593
CopyDoc.check_next
test
def check_next(self, tag): """ If next tag is link with same href, combine them. """ if (type(tag.next_sibling) == element.Tag and tag.next_sibling.name == 'a'): next_tag = tag.next_sibling if tag.get('href') and next_tag.get('href'): href = self._parse_href(tag.get('href'))
python
{ "resource": "" }
q278594
CopyDoc.create_italic
test
def create_italic(self, tag): """ See if span tag has italic style and wrap with em tag.
python
{ "resource": "" }
q278595
CopyDoc.create_strong
test
def create_strong(self, tag): """ See if span tag has bold style and wrap with strong tag. """ style = tag.get('style') if (style and
python
{ "resource": "" }
q278596
CopyDoc.create_underline
test
def create_underline(self, tag): """ See if span tag has underline style and wrap with u tag.
python
{ "resource": "" }
q278597
CopyDoc.parse_attrs
test
def parse_attrs(self, tag): """ Reject attributes not defined in ATTR_WHITELIST. """ if tag.name in ATTR_WHITELIST.keys(): attrs = copy(tag.attrs) for attr, value in attrs.items(): if attr in
python
{ "resource": "" }
q278598
CopyDoc.clean_linebreaks
test
def clean_linebreaks(self, tag): """ get unicode string without any other content transformation. and clean extra spaces
python
{ "resource": "" }
q278599
CopyDoc._parse_href
test
def _parse_href(self, href): """ Extract "real" URL from Google redirected url by getting `q` querystring parameter.
python
{ "resource": "" }