_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q278500
|
ANY_mentions
|
test
|
def ANY_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
for name in target_mentions:
for chain_ment in chain_mentions:
if name in chain_ment:
return True
return False
|
python
|
{
"resource": ""
}
|
q278501
|
look_ahead_match
|
test
|
def look_ahead_match(rating, tokens):
'''iterate through all tokens looking for matches of cleansed tokens
or token regexes, skipping tokens left empty by cleansing and
coping with Token objects that produce multiple space-separated
strings when cleansed. Yields tokens that match.
'''
## this ensures that all cleansed tokens are non-zero length
all_mregexes = []
for m in rating.mentions:
mregexes = []
mpatterns = m.decode('utf8').split(' ')
for mpat in mpatterns:
if mpat.startswith('ur"^') and mpat.endswith('$"'): # is not regex
## chop out the meat of the regex so we can reconstitute it below
mpat = mpat[4:-2]
else:
mpat = cleanse(mpat)
if mpat:
## make a unicode raw string
## https://docs.python.org/2/reference/lexical_analysis.html#string-literals
mpat = ur'^%s$' % mpat
logger.debug('look_ahead_match compiling regex: %s', mpat)
mregexes.append(re.compile(mpat, re.UNICODE | re.IGNORECASE))
if not mregexes:
logger.warn('got empty cleansed mention: %r\nrating=%r' % (m, rating))
all_mregexes.append(mregexes)
## now that we have all_mregexes, go through all the tokens
for i in range(len(tokens)):
for mregexes in all_mregexes:
if mregexes[0].match(tokens[i][0][0]):
## found the start of a possible match, so iterate
## through the tuples of cleansed strings for each
## Token while stepping through the cleansed strings
## for this mention.
m_j = 1
i_j = 0
last_token_matched = 0
matched = True
while m_j < len(mregexes):
i_j += 1
if i_j == len(tokens[i + last_token_matched][0]):
i_j = 0
last_token_matched += 1
if i + last_token_matched == len(tokens):
matched = False
break
target_token = tokens[i + last_token_matched][0][i_j]
## this next line is the actual string comparison
if mregexes[m_j].match(target_token):
m_j += 1
elif target_token == '':
continue
else:
matched = False
break
if matched:
## yield each matched token only once
toks = set()
for j in xrange(last_token_matched + 1):
toks.add(tokens[i + j][1])
for tok in toks:
yield tok
|
python
|
{
"resource": ""
}
|
q278502
|
multi_token_match
|
test
|
def multi_token_match(stream_item, aligner_data):
'''
iterate through tokens looking for near-exact matches to strings
in si.ratings...mentions
'''
tagger_id = _get_tagger_id(stream_item, aligner_data)
sentences = stream_item.body.sentences.get(tagger_id)
if not sentences:
return
## construct a list of tuples, where the first part of each tuple
## is a tuple of cleansed strings, and the second part is the
## Token object from which it came.
tokens = map(lambda tok: (cleanse(tok.token.decode('utf8')).split(' '), tok),
itertools.chain(*[sent.tokens for sent in sentences]))
required_annotator_id = aligner_data['annotator_id']
for annotator_id, ratings in stream_item.ratings.items():
if (required_annotator_id is None) or (annotator_id == required_annotator_id):
for rating in ratings:
label = Label(annotator=rating.annotator,
target=rating.target)
num_tokens_matched = 0
for tok in look_ahead_match(rating, tokens):
if aligner_data.get('update_labels'):
tok.labels.pop(annotator_id, None)
add_annotation(tok, label)
num_tokens_matched += 1
if num_tokens_matched == 0:
logger.warning('multi_token_match didn\'t actually match '
'entity %r in stream_id %r',
rating.target.target_id,
stream_item.stream_id)
else:
logger.debug('matched %d tokens for %r in %r',
num_tokens_matched, rating.target.target_id,
stream_item.stream_id)
|
python
|
{
"resource": ""
}
|
q278503
|
TaggerBatchTransform.make_ner_file
|
test
|
def make_ner_file(self, clean_visible_path, ner_xml_path):
'''run tagger a child process to get XML output'''
if self.template is None:
raise exceptions.NotImplementedError('''
Subclasses must specify a class property "template" that provides
command string format for running a tagger. It should take
%(tagger_root_path)s as the path from the config file,
%(clean_visible_path)s as the input XML file, and %(ner_xml_path)s as
the output path to create.
''')
tagger_config = dict(
tagger_root_path=self.config['tagger_root_path'],
clean_visible_path=clean_visible_path,
ner_xml_path=ner_xml_path)
## get a java_heap_size or default to 1GB
tagger_config['java_heap_size'] = self.config.get('java_heap_size', '')
cmd = self.template % tagger_config
start_time = time.time()
## make sure we are using as little memory as possible
gc.collect()
try:
self._child = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True)
except OSError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg(clean_visible_path, ner_xml_path)
raise PipelineOutOfMemory(msg)
s_out, errors = self._child.communicate()
if not self._child.returncode == 0:
if 'java.lang.OutOfMemoryError' in errors:
msg = errors + make_memory_info_msg(clean_visible_path, ner_xml_path)
raise PipelineOutOfMemory(msg)
elif self._child.returncode == 137:
msg = 'tagger returncode = 137\n' + errors
msg += make_memory_info_msg(clean_visible_path, ner_xml_path)
# maybe get a tail of /var/log/messages
raise PipelineOutOfMemory(msg)
elif 'Exception' in errors:
raise PipelineBaseException(errors)
else:
raise PipelineBaseException('tagger exited with %r' % self._child.returncode)
elapsed = time.time() - start_time
logger.info('finished tagging in %.1f seconds' % elapsed)
return elapsed
|
python
|
{
"resource": ""
}
|
q278504
|
TaggerBatchTransform.align_chunk_with_ner
|
test
|
def align_chunk_with_ner(self, ner_xml_path, i_chunk, o_chunk):
''' iterate through ner_xml_path to fuse with i_chunk into o_chunk '''
## prepare to iterate over the input chunk
input_iter = i_chunk.__iter__()
all_ner = xml.dom.minidom.parse(open(ner_xml_path))
## this converts our UTF-8 data into unicode strings, so when
## we want to compute byte offsets or construct tokens, we
## must .encode('utf8')
for ner_dom in all_ner.getElementsByTagName('FILENAME'):
#for stream_id, raw_ner in files(open(ner_xml_path).read().decode('utf8')):
stream_item = input_iter.next()
## get stream_id out of the XML
stream_id = ner_dom.attributes.get('stream_id').value
if stream_item.stream_id is None:
assert not stream_id, 'out of sync: None != %r' % stream_id
logger.critical('si.stream_id is None... ignoring')
continue
assert stream_id and stream_id == stream_item.stream_id, \
'%s != %s' % (stream_id, stream_item.stream_id)
if not stream_item.body:
## the XML better have had an empty clean_visible too...
#assert not ner_dom....something
continue
tagging = Tagging()
tagging.tagger_id = self.tagger_id # pylint: disable=E1101
'''
## get this one file out of its FILENAME tags
tagged_doc_parts = list(files(ner_dom.toxml()))
if not tagged_doc_parts:
continue
tagged_doc = tagged_doc_parts[0][1]
## hack
hope_original = make_clean_visible(tagged_doc, '')
open(ner_xml_path + '-clean', 'wb').write(hope_original.encode('utf-8'))
print ner_xml_path + '-clean'
'''
#tagging.raw_tagging = tagged_doc
tagging.generation_time = streamcorpus.make_stream_time()
stream_item.body.taggings[self.tagger_id] = tagging # pylint: disable=E1101
## could consume lots of memory here by instantiating everything
sentences, relations, attributes = self.get_sentences(ner_dom)
stream_item.body.sentences[self.tagger_id] = sentences # pylint: disable=E1101
stream_item.body.relations[self.tagger_id] = relations # pylint: disable=E1101
stream_item.body.attributes[self.tagger_id] = attributes # pylint: disable=E1101
logger.debug('finished aligning tokens %s' % stream_item.stream_id)
'''
for num, sent in enumerate(sentences):
for tok in sent.tokens:
print '%d\t%d\t%s' % (num, tok.offsets[OffsetType.LINES].first, repr(tok.token))
'''
if 'align_labels_by' in self.config and self.config['align_labels_by']:
assert 'aligner_data' in self.config, 'config missing "aligner_data"'
aligner = AlignmentStrategies[ self.config['align_labels_by'] ]
aligner( stream_item, self.config['aligner_data'] )
## forcibly collect dereferenced objects
gc.collect()
try:
o_chunk.add(stream_item)
except MemoryError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg()
logger.critical(msg)
raise PipelineOutOfMemory(msg)
## all done, so close the o_chunk
try:
o_chunk.close()
logger.info('finished chunk for %r' % ner_xml_path)
except MemoryError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg()
logger.critical(msg)
raise PipelineOutOfMemory(msg)
|
python
|
{
"resource": ""
}
|
q278505
|
TaggerBatchTransform.shutdown
|
test
|
def shutdown(self):
'''
send SIGTERM to the tagger child process
'''
if self._child:
try:
self._child.terminate()
except OSError, exc:
if exc.errno == 3:
## child is already gone, possibly because it ran
## out of memory and caused us to shutdown
pass
|
python
|
{
"resource": ""
}
|
q278506
|
mult
|
test
|
def mult(p, n):
"""Returns a Pattern that matches exactly n repetitions of Pattern p.
"""
np = P()
while n >= 1:
if n % 2:
np = np + p
p = p + p
n = n // 2
return np
|
python
|
{
"resource": ""
}
|
q278507
|
fix_emails
|
test
|
def fix_emails(text):
'''Replace all angle bracket emails with a unique key.'''
emails = bracket_emails.findall(text)
keys = []
for email in emails:
_email = email.replace("<","<").replace(">",">")
text = text.replace(email, _email)
return text
|
python
|
{
"resource": ""
}
|
q278508
|
nltk_tokenizer._sentences
|
test
|
def _sentences(self, clean_visible):
'generate strings identified as sentences'
previous_end = 0
clean_visible = clean_visible.decode('utf8')
for start, end in self.sentence_tokenizer.span_tokenize(clean_visible):
# no need to check start, because the first byte of text
# is always first byte of first sentence, and we will
# have already made the previous sentence longer on the
# end if there was an overlap.
if start < previous_end:
start = previous_end
if start > end:
# skip this sentence... because it was eaten by
# an earlier sentence with a label
continue
try:
label = self.label_index.find_le(end)
except ValueError:
label = None
if label:
## avoid splitting a label
off = label.offsets[OffsetType.CHARS]
end = max(off.first + off.length, end)
previous_end = end
sent_str = clean_visible[start:end]
yield start, end, sent_str
|
python
|
{
"resource": ""
}
|
q278509
|
nltk_tokenizer.make_label_index
|
test
|
def make_label_index(self, stream_item):
'make a sortedcollection on body.labels'
labels = stream_item.body.labels.get(self.annotator_id)
if not labels:
labels = []
self.label_index = SortedCollection(
[l for l in labels if OffsetType.CHARS in l.offsets],
key=lambda label: label.offsets[OffsetType.CHARS].first)
|
python
|
{
"resource": ""
}
|
q278510
|
nltk_tokenizer.make_sentences
|
test
|
def make_sentences(self, stream_item):
'assemble Sentence and Token objects'
self.make_label_index(stream_item)
sentences = []
token_num = 0
new_mention_id = 0
for sent_start, sent_end, sent_str in self._sentences(
stream_item.body.clean_visible):
assert isinstance(sent_str, unicode)
sent = Sentence()
sentence_pos = 0
for start, end in self.word_tokenizer.span_tokenize(sent_str):
token_str = sent_str[start:end].encode('utf8')
tok = Token(
token_num=token_num,
token=token_str,
sentence_pos=sentence_pos,
)
tok.offsets[OffsetType.CHARS] = Offset(
type=OffsetType.CHARS,
first=sent_start + start,
length=end - start,
)
# whitespace tokenizer will never get a token
# boundary in the middle of an 'author' label
try:
label = self.label_index.find_le(sent_start + start)
except ValueError:
label = None
if label:
off = label.offsets[OffsetType.CHARS]
if off.first + off.length > sent_start + start:
streamcorpus.add_annotation(tok, label)
logger.debug('adding label to tok: %r has %r',
tok.token, label.target.target_id)
if label in self.label_to_mention_id:
mention_id = self.label_to_mention_id[label]
else:
mention_id = new_mention_id
new_mention_id += 1
self.label_to_mention_id[label] = mention_id
tok.mention_id = mention_id
token_num += 1
sentence_pos += 1
sent.tokens.append(tok)
sentences.append(sent)
return sentences
|
python
|
{
"resource": ""
}
|
q278511
|
html_entities_to_unicode
|
test
|
def html_entities_to_unicode(text, space_padding=False, safe_only=False):
'''
Convert any HTML, XML, or numeric entities in the attribute values.
For example '&' becomes '&'.
This is adapted from BeautifulSoup, which should be able to do the
same thing when called like this --- but this fails to convert
everything for some bug.
text = unicode(BeautifulStoneSoup(text, convertEntities=BeautifulStoneSoup.XML_ENTITIES))
'''
def convert_entities(match):
'''
comes from BeautifulSoup.Tag._convertEntities
'''
x = match.group(1)
if safe_only and x not in ENTITIES_THAT_ARE_SAFE_TO_STRING_PAD:
return u'&%s;' % x
if x in name2codepoint:
## handles most cases
return unichr(name2codepoint[x])
elif x in XML_ENTITIES_TO_SPECIAL_CHARS:
return XML_ENTITIES_TO_SPECIAL_CHARS[x]
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
else:
## uh oh, failed to anything
return u'&%s;' % x
def convert_to_padded_entitites(match):
converted_string = convert_entities(match)
num_spaces_needed = len(match.group(0)) - len(converted_string)
assert num_spaces_needed >= 0, \
'len(%r) !<= len(%r)' % (converted_string, match.group(0))
## Where to put the spaces? Before, after, symmetric?
# Let's do symmetric.
## cast to int in prep for python3
num_left = int(num_spaces_needed / 2)
num_right = num_spaces_needed - num_left
return (' ' * num_left) + converted_string + (' ' * num_right)
## brute force regex through all the characters...
if space_padding:
return tags.sub(
convert_to_padded_entitites,
text)
else:
return tags.sub(
convert_entities,
text)
|
python
|
{
"resource": ""
}
|
q278512
|
make_cleansed_file
|
test
|
def make_cleansed_file(i_chunk, tmp_cleansed_path):
'''make a temp file of cleansed text'''
tmp_cleansed = open(tmp_cleansed_path, 'wb')
for idx, si in enumerate(i_chunk):
tmp_cleansed.write('<FILENAME docid="%s">\n' % si.stream_id)
tmp_cleansed.write(si.body.cleansed)
## how to deal with other_content?
tmp_cleansed.write('</FILENAME>\n')
tmp_cleansed.close()
## replace this with log.info()
print 'created %s' % tmp_cleansed_path
|
python
|
{
"resource": ""
}
|
q278513
|
make_ner_file
|
test
|
def make_ner_file(tagger_id, tmp_cleansed_path, tmp_ner_path, pipeline_root):
'''run child process to get OWPL output'''
params = dict(INPUT_FILE=tmp_cleansed_path,
#RAW_OUTPUT_FILE=tmp_ner_raw_path,
OUTPUT_FILE=tmp_ner_path,
PIPELINE_ROOT=pipeline_root)
pipeline_cmd = pipeline_cmd_templates[tagger_id] % params
print pipeline_cmd
## replace this with log.info()
print 'creating %s' % tmp_ner_path
start_time = time.time()
gpg_child = subprocess.Popen(
pipeline_cmd,
stderr=subprocess.PIPE, shell=True)
s_out, errors = gpg_child.communicate()
assert gpg_child.returncode == 0 and 'Exception' not in errors, errors
elapsed = time.time() - start_time
## replace this with log.info()
print 'created %s in %.1f sec' % (tmp_ner_path, elapsed)
'''
postproc_cmd = postproc_cmd_templates[tagger_id] % params
print postproc_cmd
## replace this with log.info()
print 'creating %s' % tmp_ner_raw_path
start_time = time.time()
gpg_child = subprocess.Popen(
postproc_cmd,
stderr=subprocess.PIPE, shell=True)
s_out, errors = gpg_child.communicate()
assert gpg_child.returncode == 0 and 'Exception' not in errors, errors
elapsed = time.time() - start_time
## replace this with log.info()
print 'created %s in %.1f sec' % (tmp_ner_path, elapsed)
'''
|
python
|
{
"resource": ""
}
|
q278514
|
cleanse
|
test
|
def cleanse(span):
'''Convert a string of text into a lowercase string with no
punctuation and only spaces for whitespace.
:param span: string
'''
try:
## attempt to force it to utf8, which might fail
span = span.encode('utf8', 'ignore')
except:
pass
## lowercase, strip punctuation, and shrink all whitespace
span = span.lower()
span = span.translate(strip_punctuation)
span = whitespace.sub(' ', span)
## trim any leading or trailing whitespace
return span.strip()
|
python
|
{
"resource": ""
}
|
q278515
|
align_chunk_with_ner
|
test
|
def align_chunk_with_ner(tmp_ner_path, i_chunk, tmp_done_path):
'''
iterate through the i_chunk and tmp_ner_path to generate a new
Chunk with body.ner
'''
o_chunk = Chunk()
input_iter = i_chunk.__iter__()
ner = ''
stream_id = None
all_ner = xml.dom.minidom.parse(open(tmp_ner_path))
for raw_ner in all_ner.getElementsByTagName('FILENAME'):
stream_item = input_iter.next()
## get stream_id out of the XML
stream_id = raw_ner.attributes.get('docid').value
assert stream_id and stream_id == stream_item.stream_id, \
'%s != %s\nner=%r' % (stream_id, stream_item.stream_id, ner)
tagger_id = 'lingpipe'
tagging = Tagging()
tagging.tagger_id = tagger_id
## get this one file out of its FILENAME tags
tagged_doc = list(lingpipe.files(raw_ner.toxml()))[0][1]
tagging.raw_tagging = tagged_doc
tagging.generation_time = streamcorpus.make_stream_time()
stream_item.body.taggings[tagger_id] = tagging
sentences = list(lingpipe.sentences(tagged_doc))
## make JS labels on individual tokens
assert stream_item.ratings[0].mentions, stream_item.stream_id
john_smith_label = Label()
john_smith_label.annotator = stream_item.ratings[0].annotator
john_smith_label.target_id = stream_item.ratings[0].target_id
# first map all corefchains to their words
equiv_ids = collections.defaultdict(lambda: set())
for sent in sentences:
for tok in sent.tokens:
if tok.entity_type is not None:
equiv_ids[tok.equiv_id].add(cleanse(tok.token))
## find all the chains that are John Smith
johnsmiths = set()
for equiv_id, names in equiv_ids.items():
## detect 'smith' in 'smithye'
_names = cleanse(' '.join(names))
if 'john' in _names and 'smith' in _names:
johnsmiths.add(equiv_id)
print len(johnsmiths)
## now apply the label
for sent in sentences:
for tok in sent.tokens:
if tok.equiv_id in johnsmiths:
tok.labels = [john_smith_label]
stream_item.body.sentences[tagger_id] = sentences
o_chunk.add(stream_item)
## put the o_chunk bytes into the specified file
open(tmp_done_path, 'wb').write(str(o_chunk))
## replace this with log.info()
print 'created %s' % tmp_done_path
|
python
|
{
"resource": ""
}
|
q278516
|
make_absolute_paths
|
test
|
def make_absolute_paths(config):
'''given a config dict with streamcorpus_pipeline as a key, find all
keys under streamcorpus_pipeline that end with "_path" and if the
value of that key is a relative path, convert it to an absolute
path using the value provided by root_path
'''
if not 'streamcorpus_pipeline' in config:
logger.critical('bad config: %r', config)
raise ConfigurationError('missing "streamcorpus_pipeline" from config')
## remove the root_path, so it does not get extended itself
root_path = config['streamcorpus_pipeline'].pop('root_path', None)
if not root_path:
root_path = os.getcwd()
if not root_path.startswith('/'):
root_path = os.path.join( os.getcwd(), root_path )
def recursive_abs_path( sub_config, root_path ):
for key, val in sub_config.items():
if isinstance(val, basestring):
if key.endswith('path'):
## ignore URLs in *_path parameters
if re.match('^http.?://', val): continue
## we have a path... is it already absolute?
if not val.startswith('/'):
## make the path absolute
sub_config[key] = os.path.join(root_path, val)
elif isinstance(val, dict):
recursive_abs_path( val, root_path )
recursive_abs_path( config, root_path )
## put the root_path back
config['root_path'] = root_path
|
python
|
{
"resource": ""
}
|
q278517
|
instantiate_config
|
test
|
def instantiate_config(config):
'''setup the config and load external modules
This updates 'config' as follows:
* All paths are replaced with absolute paths
* A hash and JSON dump of the config are stored in the config
* If 'pythonpath' is in the config, it is added to sys.path
* If 'setup_modules' is in the config, all modules named in it are loaded
'''
make_absolute_paths(config)
pipeline_config = config['streamcorpus_pipeline']
pipeline_config['config_hash'] = make_hash(config)
pipeline_config['config_json'] = json.dumps(config)
logger.debug('running config: {0} = {1!r}'
.format(pipeline_config['config_hash'], config))
## Load modules
# This is a method of using settings in yaml configs to load plugins.
die = False
for pathstr in pipeline_config.get('pythonpath', {}).itervalues():
if pathstr not in sys.path:
sys.path.append(pathstr)
for modname in pipeline_config.get('setup_modules', {}).itervalues():
try:
m = importlib.import_module(modname)
if not m:
logger.critical('could not load module %r', modname)
die = True
continue
if hasattr(m, 'setup'):
m.setup()
logger.debug('loaded and setup %r', modname)
else:
logger.debug('loaded %r', modname)
except Exception:
logger.critical('error loading and initting module %r', modname, exc_info=True)
die = True
if die:
sys.exit(1)
|
python
|
{
"resource": ""
}
|
q278518
|
generate_john_smith_chunk
|
test
|
def generate_john_smith_chunk(path_to_original):
'''
This _looks_ like a Chunk only in that it generates StreamItem
instances when iterated upon.
'''
## Every StreamItem has a stream_time property. It usually comes
## from the document creation time. Here, we assume the JS corpus
## was created at one moment at the end of 1998:
creation_time = '1998-12-31T23:59:59.999999Z'
correct_time = 915148799
if not os.path.isabs(path_to_original):
path_to_original = os.path.join(os.getcwd(), path_to_original)
## iterate over the files in the 35 input directories
for label_id in range(35):
dir_path = os.path.join(path_to_original, str(label_id))
fnames = os.listdir(dir_path)
fnames.sort()
for fname in fnames:
stream_item = streamcorpus.make_stream_item(
creation_time,
## make up an abs_url
os.path.join(
'john-smith-corpus', str(label_id), fname))
if int(stream_item.stream_time.epoch_ticks) != correct_time:
raise PipelineBaseException('wrong stream_time construction: %r-->%r != %r'\
% (creation_time, stream_item.stream_time.epoch_ticks,
correct_time))
## These docs came from the authors of the paper cited above.
stream_item.source = 'bagga-and-baldwin'
## build a ContentItem for the body
body = streamcorpus.ContentItem()
raw_string = open(os.path.join(dir_path, fname)).read()
## We know that this is already clean and has nothing
## tricky in it, because we manually cleansed it. To
## illustrate how we stick all strings into thrift, we
## convert this to unicode (which introduces no changes)
## and then encode it as utf-8, which also introduces no
## changes. Thrift stores strings as 8-bit character
## strings.
# http://www.mail-archive.com/[email protected]/msg00210.html
body.clean_visible = unicode(raw_string).encode('utf8')
## attach the content_item to the stream_item
stream_item.body = body
stream_item.body.language = streamcorpus.Language(code='en', name='ENGLISH')
## The authors also annotated the corpus
anno = streamcorpus.Annotator()
anno.annotator_id = 'bagga-and-baldwin'
anno.annotation_time = stream_item.stream_time
## build a Label for the doc-level label:
rating = streamcorpus.Rating()
rating.annotator = anno
rating.target = streamcorpus.Target(target_id = str(label_id)) # must be string
rating.contains_mention = True
rating.mentions = ['john', 'smith']
## put this one label in the array of labels
streamcorpus.add_annotation(stream_item, rating)
## provide this stream_item to the pipeline
yield stream_item
|
python
|
{
"resource": ""
}
|
q278519
|
re_based_make_clean_visible
|
test
|
def re_based_make_clean_visible(html):
'''
Takes an HTML-like binary string as input and returns a binary
string of the same length with all tags replaced by whitespace.
This also detects script and style tags, and replaces the text
between them with whitespace.
Pre-existing whitespace of any kind (newlines, tabs) is converted
to single spaces ' ', which has the same byte length (and
character length).
Note: this does not change any characters like ’ and ,
so taggers operating on this text must cope with such symbols.
Converting them to some other character would change their byte
length, even if equivalent from a character perspective.
This is regex based, which can occassionally just hang...
'''
text = ''
# Fix emails
html = fix_emails(html)
for m in invisible.finditer(html):
text += m.group('before')
text += ' ' * len(m.group('invisible'))
# text better be >= original
assert len(html) >= len(text), '%d !>= %d' % (len(html), len(text))
# capture any characters after the last tag... such as newlines
tail = len(html) - len(text)
text += html[-tail:]
# now they must be equal
assert len(html) == len(text), '%d != %d' % (len(html), len(text))
return text
|
python
|
{
"resource": ""
}
|
q278520
|
make_clean_visible
|
test
|
def make_clean_visible(_html, tag_replacement_char=' '):
'''
Takes an HTML-like Unicode string as input and returns a UTF-8
encoded string with all tags replaced by whitespace. In particular,
all Unicode characters inside HTML are replaced with a single
whitespace character.
This does not detect comments, style, script, link. It also does
do anything with HTML-escaped characters. All of these are
handled by the clean_html pre-cursor step.
Pre-existing whitespace of any kind (newlines, tabs) is converted
to single spaces ' ', which has the same byte length (and
character length).
This is a simple state machine iterator without regexes
'''
def non_tag_chars(html):
n = 0
while n < len(html):
angle = html.find('<', n)
if angle == -1:
yield html[n:]
n = len(html)
break
yield html[n:angle]
n = angle
while n < len(html):
nl = html.find('\n', n)
angle = html.find('>', n)
if angle == -1:
yield ' ' * (len(html) - n)
n = len(html)
break
elif nl == -1 or angle < nl:
yield ' ' * (angle + 1 - n)
n = angle + 1
break
else:
yield ' ' * (nl - n) + '\n'
n = nl + 1
# do not break
if not isinstance(_html, unicode):
_html = unicode(_html, 'utf-8')
# Protect emails by substituting with unique key
_html = fix_emails(_html)
#Strip tags with previous logic
non_tag = ''.join(non_tag_chars(_html))
return non_tag.encode('utf-8')
|
python
|
{
"resource": ""
}
|
q278521
|
make_clean_visible_file
|
test
|
def make_clean_visible_file(i_chunk, clean_visible_path):
'''make a temp file of clean_visible text'''
_clean = open(clean_visible_path, 'wb')
_clean.write('<?xml version="1.0" encoding="UTF-8"?>')
_clean.write('<root>')
for idx, si in enumerate(i_chunk):
if si.stream_id is None:
# create the FILENAME element anyway, so the ordering
# remains the same as the i_chunk and can be aligned.
stream_id = ''
else:
stream_id = si.stream_id
doc = lxml.etree.Element("FILENAME", stream_id=stream_id)
if si.body and si.body.clean_visible:
try:
# is UTF-8, and etree wants .text to be unicode
doc.text = si.body.clean_visible.decode('utf8')
except ValueError:
doc.text = drop_invalid_and_upper_utf8_chars(
si.body.clean_visible.decode('utf8'))
except Exception, exc:
# this should never ever fail, because if it does,
# then it means that clean_visible (or more likely
# clean_html) is not what it is supposed to be.
# Therefore, do not take it lightly:
logger.critical(traceback.format_exc(exc))
logger.critical('failed on stream_id=%s to follow:',
si.stream_id)
logger.critical(repr(si.body.clean_visible))
logger.critical('above was stream_id=%s', si.stream_id)
# [I don't know who calls this, but note that this
# will *always* fail if clean_visible isn't valid UTF-8.]
raise
else:
doc.text = ''
_clean.write(lxml.etree.tostring(doc, encoding='UTF-8'))
_clean.write('</root>')
_clean.close()
logger.info(clean_visible_path)
'''
## hack to capture html for inspection
_html = open(clean_visible_path + '-html', 'wb')
for idx, si in enumerate(i_chunk):
_html.write('<FILENAME docid="%s">' % si.stream_id)
if si.body and si.body.clean_html:
_html.write(si.body.clean_html)
_html.write('</FILENAME>\n')
_html.close()
## replace this with log.info()
print clean_visible_path + '-html'
'''
|
python
|
{
"resource": ""
}
|
q278522
|
cleanse
|
test
|
def cleanse(span, lower=True):
'''Convert a unicode string into a lowercase string with no
punctuation and only spaces for whitespace.
Replace PennTreebank escaped brackets with ' ':
-LRB- -RRB- -RSB- -RSB- -LCB- -RCB-
(The acronyms stand for (Left|Right) (Round|Square|Curly) Bracket.)
http://www.cis.upenn.edu/~treebank/tokenization.html
:param span: string
'''
assert isinstance(span, unicode), \
'got non-unicode string %r' % span
# lowercase, strip punctuation, and shrink all whitespace
span = penn_treebank_brackets.sub(' ', span)
if lower:
span = span.lower()
span = span.translate(strip_punctuation)
span = whitespace.sub(' ', span)
# trim any leading or trailing whitespace
return span.strip()
|
python
|
{
"resource": ""
}
|
q278523
|
main
|
test
|
def main():
'''manual test loop for make_clean_visible_from_raw
'''
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('path')
args = parser.parse_args()
html = open(args.path).read()
html = html.decode('utf8')
cursor = 0
for s in non_tag_chars_from_raw(html):
for c in s:
if c != ' ' and c != html[cursor]:
import pdb; pdb.set_trace()
sys.stdout.write(c.encode('utf8'))
sys.stdout.flush()
cursor += 1
|
python
|
{
"resource": ""
}
|
q278524
|
StageRegistry.tryload_stage
|
test
|
def tryload_stage(self, moduleName, functionName, name=None):
'''Try to load a stage into self, ignoring errors.
If loading a module fails because of some subordinate load
failure, just give a warning and move on. On success the
stage is added to the stage dictionary.
:param str moduleName: name of the Python module
:param str functionName: name of the stage constructor
:param str name: name of the stage, defaults to `functionName`
'''
if name is None:
name = functionName
try:
mod = __import__(moduleName, globals(), locals(), [functionName])
except ImportError, exc:
logger.warn('cannot load stage {0}: cannot load module {1}'
.format(name, moduleName), exc_info=exc)
return
if not hasattr(mod, functionName):
logger.warn('cannot load stage {0}: module {1} missing {2}'
.format(name, moduleName, functionName))
return
self[name] = getattr(mod, functionName)
|
python
|
{
"resource": ""
}
|
q278525
|
StageRegistry.load_external_stages
|
test
|
def load_external_stages(self, path):
'''Add external stages from the Python module in `path`.
`path` must be a path to a Python module source that contains
a `Stages` dictionary, which is a map from stage name to callable.
:param str path: path to the module file
'''
mod = imp.load_source('', path)
self.update(mod.Stages)
|
python
|
{
"resource": ""
}
|
q278526
|
StageRegistry.load_module_stages
|
test
|
def load_module_stages(self, mod):
'''Add external stages from the Python module `mod`.
If `mod` is a string, then it will be interpreted as the name
of a module; otherwise it is an actual module object. The
module should exist somewhere in :data:`sys.path`. The module
must contain a `Stages` dictionary, which is a map from stage
name to callable.
:param mod: name of the module or the module itself
:raise exceptions.ImportError: if `mod` cannot be loaded or does
not contain ``Stages``
'''
if isinstance(mod, basestring):
mod = __import__(mod, globals=globals(), locals=locals(),
fromlist=['Stages'], level=0)
if not hasattr(mod, 'Stages'):
raise ImportError(mod)
self.update(mod.Stages)
|
python
|
{
"resource": ""
}
|
q278527
|
StageRegistry.init_stage
|
test
|
def init_stage(self, name, config):
'''Construct and configure a stage from known stages.
`name` must be the name of one of the stages in this. `config`
is the configuration dictionary of the containing object, and its `name`
member will be passed into the stage constructor.
:param str name: name of the stage
:param dict config: parent object configuration
:return: callable stage
:raise exceptions.KeyError: if `name` is not a known stage
'''
subconfig = config.get(name, {})
ctor = self[name]
return ctor(subconfig)
|
python
|
{
"resource": ""
}
|
q278528
|
read_to
|
test
|
def read_to( idx_bytes, stop_bytes=None, run_bytes=None ):
'''
iterates through idx_bytes until a byte in stop_bytes or a byte
not in run_bytes.
:rtype (int, string): idx of last byte and all of bytes including
the terminal byte from stop_bytes or not in run_bytes
'''
idx = None
vals = []
next_b = None
while 1:
try:
idx, next_b = idx_bytes.next()
except StopIteration:
## maybe something going wrong?
idx = None
next_b = None
break
## stop when we see any byte in stop_bytes
if stop_bytes and next_b in stop_bytes:
break
## stop when we see any byte not in run_bytes
if run_bytes and next_b not in run_bytes:
break
## assemble the ret_val
vals.append( next_b )
## return whatever we have assembled
return idx, b''.join(vals), next_b
|
python
|
{
"resource": ""
}
|
q278529
|
hyperlink_labels.href_filter
|
test
|
def href_filter(self, href):
'''
Test whether an href string meets criteria specified by
configuration parameters 'require_abs_url', which means "does
it look like it is probably an absolute URL?" and
'domain_substrings'. It searches for each of the
domain_substrings in the href individually, and if any match,
then returns True.
:param: href string
:returns bool:
'''
if self.config['require_abs_url']:
if not href.lower().startswith(('http://', 'https://')):
return False
if self.config['all_domains']:
## blanket accept all domains as labels
return True
if self.config['domain_substrings']:
parts = href.split('/')
if len(parts) < 3:
return False
domain = parts[2].lower()
for substring in self.config['domain_substrings']:
try:
if substring in domain:
return True
except Exception, exc:
logger.warn('%r in %r raised', substring, domain, exc_info=True)
return False
|
python
|
{
"resource": ""
}
|
q278530
|
hyperlink_labels.make_labels
|
test
|
def make_labels(self, clean_html, clean_visible=None):
'''
Make a list of Labels for 'author' and the filtered hrefs &
anchors
'''
if self.offset_type == OffsetType.BYTES:
parser = self.byte_href_anchors
elif self.offset_type == OffsetType.CHARS:
parser = self.char_href_anchors
elif self.offset_type == OffsetType.LINES:
parser = self.line_href_anchors
labels = []
## make clean_html accessible as a class property so we can
self.clean_html = clean_html
for href, first, length, value in parser():
if self.href_filter(href):
'''
if clean_visible:
_check_html = self.clean_html.splitlines()[first-10:10+first+length]
_check_visi = clean_visible.splitlines()[first:first+length]
if not make_clean_visible(_check_html) == _check_visi:
print len(self.clean_html.splitlines())
print len(clean_visible.splitlines())
print href
print '\t html: %r' % _check_html
print '\t visi: %r' % _check_visi
'''
## add a label for every href
label = Label(
annotator = Annotator(annotator_id = 'author'),
target = Target(target_id = href),
)
## the offset type is specified by the config
label.offsets[self.offset_type] = Offset(
first=first, length=length,
value=value,
## the string name of the content field, not the
## content itself :-)
content_form='clean_html')
labels.append(label)
return labels
|
python
|
{
"resource": ""
}
|
q278531
|
paths
|
test
|
def paths(input_dir):
'yield all file paths under input_dir'
for root, dirs, fnames in os.walk(input_dir):
for i_fname in fnames:
i_path = os.path.join(root, i_fname)
yield i_path
|
python
|
{
"resource": ""
}
|
q278532
|
Cassa.tasks
|
test
|
def tasks(self, key_prefix=''):
'''
generate the data objects for every task
'''
for row in self._tasks.get_range():
logger.debug(row)
if not row[0].startswith(key_prefix):
continue
data = json.loads(row[1]['task_data'])
data['task_key'] = row[0]
yield data
|
python
|
{
"resource": ""
}
|
q278533
|
Cassa.get_random_available
|
test
|
def get_random_available(self, max_iter=10000):
'''
get a random key out of the first max_iter rows
'''
c = 1
keeper = None
## note the ConsistencyLevel here. If we do not do this, and
## get all slick with things like column_count=0 and filter
## empty False, then we can get keys that were recently
## deleted... EVEN if the default consistency would seem to
## rule that out!
## note the random start key, so that we do not always hit the
## same place in the key range with all workers
#random_key = hashlib.md5(str(random.random())).hexdigest()
#random_key = '0' * 32
#logger.debug('available.get_range(%r)' % random_key)
## scratch that idea: turns out that using a random start key
## OR using row_count=1 can cause get_range to hang for hours
## why we need ConsistencyLevel.ALL on a single node is not
## clear, but experience indicates it is needed.
## note that putting a finite row_count is problematic in two
## ways:
# 1) if there are more workers than max_iter, some will not
# get tasks
#
# 2) if there are more than max_iter records, then all workers
# have to wade through all of these just to get a task! What
# we really want is a "pick random row" function, and that is
# probably best implemented using CQL3 token function via the
# cql python module instead of pycassa...
for row in self._available.get_range(row_count=max_iter, read_consistency_level=pycassa.ConsistencyLevel.ALL):
#for row in self._available.get_range(row_count=100):
logger.debug('considering %r' % (row,))
if random.random() < 1 / c:
keeper = row[0]
if c == max_iter:
break
c += 1
return keeper
|
python
|
{
"resource": ""
}
|
q278534
|
LingPipeParser.tokens
|
test
|
def tokens(self, sentence_dom):
'''
Tokenize all the words and preserve NER labels from ENAMEX tags
'''
## keep track of sentence position, which is reset for each
## sentence, and used above in _make_token
self.sent_pos = 0
## keep track of mention_id, so we can distinguish adjacent
## multi-token mentions within the same coref chain
mention_id = 0
while len(sentence_dom.childNodes) > 0:
## shrink the sentence_dom's child nodes. In v0_2_0 this
## was required to cope with HitMaxi16. Now it is just to
## save memory.
node = sentence_dom.childNodes.pop(0)
if node.nodeType == node.TEXT_NODE:
## process portion before an ENAMEX tag
for line in node.data.splitlines(True):
self._input_string = line
for start, end in self.word_tokenizer.span_tokenize(line):
tok = self._make_token(start, end)
if tok:
yield tok
if line.endswith('\n'):
## maintain the index to the current line
self.line_idx += 1
## increment index pasat the 'before' portion
self.byte_idx += len(line.encode('utf-8'))
else:
## process text inside an ENAMEX tag
assert node.nodeName == 'ENAMEX', node.nodeName
chain_id = node.attributes.get('ID').value
entity_type = node.attributes.get('TYPE').value
for node in node.childNodes:
assert node.nodeType == node.TEXT_NODE, node.nodeType
for line in node.data.splitlines(True):
self._input_string = line
for start, end in self.word_tokenizer.span_tokenize(line):
tok = self._make_token(start, end)
if tok:
if entity_type in _PRONOUNS:
tok.mention_type = MentionType.PRO
tok.entity_type = _ENTITY_TYPES[entity_type]
## create an attribute
attr = Attribute(
attribute_type=AttributeType.PER_GENDER,
value=str(_PRONOUNS[entity_type])
)
self.attributes.append(attr)
else:
## regular entity_type
tok.mention_type = MentionType.NAME
tok.entity_type = _ENTITY_TYPES[entity_type]
tok.equiv_id = int(chain_id)
tok.mention_id = mention_id
yield tok
if line.endswith('\n'):
## maintain the index to the current line
self.line_idx += 1
## increment index pasat the 'before' portion
self.byte_idx += len(line.encode('utf-8'))
## increment mention_id within this sentence
mention_id += 1
|
python
|
{
"resource": ""
}
|
q278535
|
lingpipe.get_sentences
|
test
|
def get_sentences(self, ner_dom):
'''parse the sentences and tokens out of the XML'''
lp_parser = LingPipeParser(self.config)
lp_parser.set(ner_dom)
sentences = list( lp_parser.sentences() )
return sentences, lp_parser.relations, lp_parser.attributes
|
python
|
{
"resource": ""
}
|
q278536
|
_retry
|
test
|
def _retry(func):
'''
Decorator for methods that need many retries, because of
intermittent failures, such as AWS calls via boto, which has a
non-back-off retry.
'''
def retry_func(self, *args, **kwargs):
tries = 1
while True:
# If a handler allows execution to continue, then
# fall through and do a back-off retry.
try:
return func(self, *args, **kwargs)
break
except OSError as exc:
## OSError: [Errno 24] Too many open files
logger.error('assuming OSError unrecoverable')
raise
except FailedExtraction as exc:
## pass through exc to caller
logger.error('FAIL(%d)', tries, exc_info=True)
raise
except FailedVerification as exc:
logger.warn('FAIL(%d)', tries, exc_info=True)
if tries >= self.config['tries']:
if self.config.get('suppress_failures'):
logger.warn('suppressing failure and breaking out of this loop; data may be corrupt, downstream will have to cope')
break
else:
raise
except Exception as exc:
logger.warn('FAIL(%d): having I/O trouble with S3', tries, exc_info=True)
if tries >= self.config['tries']:
raise
logger.warn('RETRYING (%d left)', self.config['tries'] - tries)
time.sleep(3 * tries)
tries += 1
return retry_func
|
python
|
{
"resource": ""
}
|
q278537
|
verify_md5
|
test
|
def verify_md5(md5_expected, data, other_errors=None):
"return True if okay, raise Exception if not" # O_o ?
md5_recv = hashlib.md5(data).hexdigest()
if md5_expected != md5_recv:
if other_errors is not None:
logger.critical('\n'.join(other_errors))
raise FailedVerification('original md5 = %r != %r = received md5' \
% (md5_expected, md5_recv))
return True
|
python
|
{
"resource": ""
}
|
q278538
|
get_bucket
|
test
|
def get_bucket(config, bucket_name=None):
'''This function is mostly about managing configuration, and then
finally returns a boto.Bucket object.
AWS credentials come first from config keys
aws_access_key_id_path, aws_secret_access_key_path (paths to one
line files); secondly from environment variables
AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY; also from $HOME/.aws/credentials
or the magic Amazon http://169.254.169.254/ service. If credentials
are not set in the config then behavior is the same as other AWS-based
command-line tools.
'''
if not bucket_name:
if 'bucket' not in config:
raise ConfigurationError(
'The "bucket" parameter is required for the s3 stages.')
bucket_name = config['bucket']
# get AWS credentials. first, from config; else, from env vars.
# (boto will read environment variables and other normal places.)
aws_access_key_id_path = config.get('aws_access_key_id_path')
aws_secret_access_key_path = config.get('aws_secret_access_key_path')
params = ()
if aws_access_key_id_path and aws_secret_access_key_path:
try:
access = open(aws_access_key_id_path).read().strip()
secret = open(aws_secret_access_key_path).read().strip()
params = (access, secret)
except:
logger.error('failed reading aws credentials from configured file', exc_info=True)
raise
conn = S3Connection(*params)
bucket = conn.get_bucket(bucket_name)
return bucket
|
python
|
{
"resource": ""
}
|
q278539
|
from_s3_chunks._decode
|
test
|
def _decode(self, data):
'''
Given the raw data from s3, return a generator for the items
contained in that data. A generator is necessary to support
chunk files, but non-chunk files can be provided by a generator
that yields exactly one item.
Decoding works by case analysis on the config option
``input_format``. If an invalid ``input_format`` is given, then
a ``ConfigurationError`` is raised.
'''
informat = self.config['input_format'].lower()
if informat == 'spinn3r':
return _generate_stream_items(data)
elif informat == 'streamitem':
ver = self.config['streamcorpus_version']
if ver not in _message_versions:
raise ConfigurationError(
'Not a valid streamcorpus version: %s '
'(choose from: %s)'
% (ver, ', '.join(_message_versions.keys())))
message = _message_versions[ver]
return streamcorpus.Chunk(data=data, message=message)
elif informat == 'featurecollection' and FCChunk is not None:
return FCChunk(data=data)
else:
raise ConfigurationError(
'from_s3_chunks unknown input_format = %r'
% informat)
|
python
|
{
"resource": ""
}
|
q278540
|
from_s3_chunks.get_chunk
|
test
|
def get_chunk(self, bucket_name, key_path):
'''return Chunk object full of records
bucket_name may be None'''
bucket = get_bucket(self.config, bucket_name=bucket_name)
key = bucket.get_key(key_path)
if key is None:
raise FailedExtraction('Key "%s" does not exist.' % key_path)
fh = StringIO()
key.get_contents_to_file(fh)
data = fh.getvalue()
if not data:
raise FailedExtraction('%s: no data (does the key exist?)'
% key.key)
chunk_type, compression, encryption = parse_file_extensions(key_path)
if encryption == 'gpg':
if not self.gpg_decryption_key_path:
raise FailedExtraction('%s ends with ".gpg" but gpg_decryption_key_path=%s'
% (key.key, self.gpg_decryption_key_path))
_errors = []
if compression or encryption:
_errors, data = decrypt_and_uncompress(
data,
self.gpg_decryption_key_path,
tmp_dir=self.config.get('tmp_dir_path'),
compression=compression,
)
if not data:
msg = 'decrypt_and_uncompress got no data for {0!r}, from {1} bytes' \
+ ' downloaded, errors: {2}' \
.format(key_path, len(data), '\n'.join(_errors))
logger.error(msg)
raise FailedExtraction(msg)
logger.info( '\n'.join(_errors) )
if not self.config['compare_md5_in_file_name']:
logger.warn('not checking md5 in file name, consider setting '
'from_s3_chunks:compare_md5_in_file_name')
else:
logger.info('Verifying md5 for "%s"...' % key.key)
# The regex hammer.
m = re.search('([a-z0-9]{32})(?:\.|$)', key.key)
if m is None:
raise FailedExtraction(
'Could not extract md5 from key "%s". '
'Perhaps you should disable compare_md5_in_file_name?'
% key.key)
i_content_md5 = m.group(1)
#import pdb; pdb.set_trace()
verify_md5(i_content_md5, data, other_errors=_errors)
return self._decode(data)
|
python
|
{
"resource": ""
}
|
q278541
|
stream_id_to_kvlayer_key
|
test
|
def stream_id_to_kvlayer_key(stream_id):
'''Convert a text stream ID to a kvlayer key.
The return tuple can be used directly as a key in the
:data:`STREAM_ITEMS_TABLE` table.
:param str stream_id: stream ID to convert
:return: :mod:`kvlayer` key tuple
:raise exceptions.KeyError: if `stream_id` is malformed
'''
# Reminder: stream_id is 1234567890-123456789abcdef...0
# where the first part is the (decimal) epoch_ticks and the second
# part is the (hex) doc_id
parts = stream_id.split('-')
if len(parts) != 2:
raise KeyError('invalid stream_id ' + stream_id)
epoch_ticks_s = parts[0]
doc_id_s = parts[1]
if not epoch_ticks_s.isdigit():
raise KeyError('invalid stream_id ' + stream_id)
if doc_id_s.lstrip(string.hexdigits) != '':
raise KeyError('invalid stream_id ' + stream_id)
return (base64.b16decode(doc_id_s.upper()), int(epoch_ticks_s))
|
python
|
{
"resource": ""
}
|
q278542
|
kvlayer_key_to_stream_id
|
test
|
def kvlayer_key_to_stream_id(k):
'''Convert a kvlayer key to a text stream ID.
`k` should be of the same form produced by
:func:`stream_id_to_kvlayer_key`.
:param k: :mod:`kvlayer` key tuple
:return: converted stream ID
:returntype str:
'''
abs_url_hash, epoch_ticks = k
return '{0}-{1}'.format(epoch_ticks,
base64.b16encode(abs_url_hash).lower())
|
python
|
{
"resource": ""
}
|
q278543
|
key_for_stream_item
|
test
|
def key_for_stream_item(si):
'''Get a kvlayer key from a stream item.
The return tuple can be used directly as a key in the
:data:`STREAM_ITEMS_TABLE` table. Note that this recalculates the
stream ID, and if the internal data on the stream item is inconsistent
then this could return a different result from
:func:`stream_id_to_kvlayer_key`.
:param si: stream item to get key for
:return: :mod:`kvlayer` key tuple
'''
# get binary 16 byte digest
urlhash = hashlib.md5(si.abs_url).digest()
return (urlhash, int(si.stream_time.epoch_ticks))
|
python
|
{
"resource": ""
}
|
q278544
|
main
|
test
|
def main(argv=sys.argv):
args = parse(argv)
"""Serve up some ponies."""
hostname = args.listen
port = args.port
print(
"Making all your dreams for a pony come true on http://{0}:{1}.\n"
"Press Ctrl+C to quit.\n".format(hostname, port))
# Hush, werkzeug.
logging.getLogger('werkzeug').setLevel(logging.CRITICAL)
plugin_manager.load_installed_plugins()
app = make_app()
run_simple(hostname, port, app)
|
python
|
{
"resource": ""
}
|
q278545
|
build_parser
|
test
|
def build_parser():
"""Build the parser that will have all available commands and options."""
description = (
'HTTPony (pronounced aych-tee-tee-pony) is a simple HTTP '
'server that pretty prints HTTP requests to a terminal. It '
'is a useful aide for developing clients that send HTTP '
'requests. HTTPony acts as a sink for a client so that a '
'developer can understand what the client is sending.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-l', '--listen', help='set the IP address or hostname',
default='localhost')
parser.add_argument(
'-p', '--port', help='set the port', default=8000, type=int)
return parser
|
python
|
{
"resource": ""
}
|
q278546
|
add_xpaths_to_stream_item
|
test
|
def add_xpaths_to_stream_item(si):
'''Mutably tag tokens with xpath offsets.
Given some stream item, this will tag all tokens from all taggings
in the document that contain character offsets. Note that some
tokens may not have computable xpath offsets, so an xpath offset
for those tokens will not be set. (See the documentation and
comments for ``char_offsets_to_xpaths`` for what it means for a
token to have a computable xpath.)
If a token can have its xpath offset computed, it is added to its
set of offsets with a ``OffsetType.XPATH_CHARS`` key.
'''
def sentences_to_xpaths(sentences):
tokens = sentences_to_char_tokens(sentences)
offsets = char_tokens_to_char_offsets(tokens)
return char_offsets_to_xpaths(html, offsets)
def xprange_to_offset(xprange):
return Offset(type=OffsetType.XPATH_CHARS,
first=xprange.start_offset, length=0,
xpath=xprange.start_xpath,
content_form='clean_html', value=None,
xpath_end=xprange.end_xpath,
xpath_end_offset=xprange.end_offset)
html = unicode(si.body.clean_html, 'utf-8')
for sentences in si.body.sentences.itervalues():
tokens = sentences_to_char_tokens(sentences)
for token, xprange in izip(tokens, sentences_to_xpaths(sentences)):
if xprange is None:
continue
offset = xprange_to_offset(xprange)
token.offsets[OffsetType.XPATH_CHARS] = offset
|
python
|
{
"resource": ""
}
|
q278547
|
sentences_to_char_tokens
|
test
|
def sentences_to_char_tokens(si_sentences):
'''Convert stream item sentences to character ``Offset``s.'''
for sentence in si_sentences:
for token in sentence.tokens:
if OffsetType.CHARS in token.offsets:
yield token
|
python
|
{
"resource": ""
}
|
q278548
|
char_tokens_to_char_offsets
|
test
|
def char_tokens_to_char_offsets(si_tokens):
'''Convert character ``Offset``s to character ranges.'''
for token in si_tokens:
offset = token.offsets[OffsetType.CHARS]
yield offset.first, offset.first + offset.length
|
python
|
{
"resource": ""
}
|
q278549
|
char_offsets_to_xpaths
|
test
|
def char_offsets_to_xpaths(html, char_offsets):
'''Converts HTML and a sequence of char offsets to xpath offsets.
Returns a generator of :class:`streamcorpus.XpathRange` objects
in correspondences with the sequence of ``char_offsets`` given.
Namely, each ``XpathRange`` should address precisely the same text
as that ``char_offsets`` (sans the HTML).
Depending on how ``char_offsets`` was tokenized, it's possible that
some tokens cannot have their xpaths generated reliably. In this
case, a ``None`` value is yielded instead of a ``XpathRange``.
``char_offsets`` must be a sorted and non-overlapping sequence of
character ranges. They do not have to be contiguous.
'''
html = uni(html)
parser = XpathTextCollector()
prev_end = 0
prev_progress = True
for start, end in char_offsets:
if start == end:
# Zero length tokens shall have no quarter!
# Note that this is a special case. If we let zero-length tokens
# be handled normally, then it will be recorded as if the parser
# did not make any progress. But of course, there is no progress
# to be had!
yield None
continue
# If we didn't make any progress on the previous token, then we'll
# need to try and make progress before we can start tracking offsets
# again. Otherwise the parser will report incorrect offset info.
#
# (The parser can fail to make progress when tokens are split at
# weird boundaries, e.g., `&` followed by `;`. The parser won't
# make progress after `&` but will once `;` is given.)
#
# Here, we feed the parser one character at a time between where the
# last token ended and where the next token will start. In most cases,
# this will be enough to nudge the parser along. Once done, we can pick
# up where we left off and start handing out offsets again.
#
# If this still doesn't let us make progress, then we'll have to skip
# this token too.
if not prev_progress:
for i in xrange(prev_end, start):
parser.feed(html[i])
prev_end += 1
if parser.made_progress:
break
if not parser.made_progress:
yield None
continue
# Hand the parser everything from the end of the last token to the
# start of this one. Then ask for the Xpath, which should be at the
# start of `char_offsets`.
if prev_end < start:
parser.feed(html[prev_end:start])
if not parser.made_progress:
parser.feed(html[start:end])
prev_progress = parser.made_progress
prev_end = end
yield None
continue
xstart = parser.xpath_offset()
# print('START', xstart)
# Hand it the actual token and ask for the ending offset.
parser.feed(html[start:end])
xend = parser.xpath_offset()
# print('END', xend)
prev_end = end
# If we couldn't make progress then the xpaths generated are probably
# incorrect. (If the parser doesn't make progress, then we can't rely
# on the callbacks to have been called, which means we may not have
# captured all state correctly.)
#
# Therefore, we simply give up and claim this token is not addressable.
if not parser.made_progress:
prev_progress = False
yield None
else:
prev_progress = True
yield XpathRange(xstart[0], xstart[1], xend[0], xend[1])
parser.feed(html[prev_end:])
parser.close()
|
python
|
{
"resource": ""
}
|
q278550
|
DepthStackEntry.add_element
|
test
|
def add_element(self, tag):
'''Record that `tag` has been seen at this depth.
If `tag` is :class:`TextElement`, it records a text node.
'''
# Collapse adjacent text nodes
if tag is TextElement and self.last_tag is TextElement:
return
self.last_tag = tag
if tag not in self.tags:
self.tags[tag] = 1
else:
self.tags[tag] += 1
|
python
|
{
"resource": ""
}
|
q278551
|
DepthStackEntry.xpath_piece
|
test
|
def xpath_piece(self):
'''Get an XPath fragment for this location.
It is of the form ``tag[n]`` where `tag` is the most recent
element added and n is its position.
'''
if self.last_tag is TextElement:
return 'text()[{count}]'.format(count=self.text_index())
else:
return '{tag}[{count}]'.format(tag=self.last_tag,
count=self.tags[self.last_tag])
|
python
|
{
"resource": ""
}
|
q278552
|
DepthStackEntry.text_index
|
test
|
def text_index(self):
'''Returns the one-based index of the current text node.'''
# This is the number of text nodes we've seen so far.
# If we are currently in a text node, great; if not then add
# one for the text node that's about to begin.
i = self.tags.get(TextElement, 0)
if self.last_tag is not TextElement:
i += 1
return i
|
python
|
{
"resource": ""
}
|
q278553
|
descendants
|
test
|
def descendants(elem):
'''
Yields all the elements descendant of elem in document order
'''
for child in elem.xml_children:
if isinstance(child, element):
yield child
yield from descendants(child)
|
python
|
{
"resource": ""
}
|
q278554
|
select_elements
|
test
|
def select_elements(source):
'''
Yields all the elements from the source
source - if an element, yields all child elements in order; if any other iterator yields the elements from that iterator
'''
if isinstance(source, element):
source = source.xml_children
return filter(lambda x: isinstance(x, element), source)
|
python
|
{
"resource": ""
}
|
q278555
|
select_name
|
test
|
def select_name(source, name):
'''
Yields all the elements with the given name
source - if an element, starts with all child elements in order; can also be any other iterator
name - will yield only elements with this name
'''
return filter(lambda x: x.xml_name == name, select_elements(source))
|
python
|
{
"resource": ""
}
|
q278556
|
select_name_pattern
|
test
|
def select_name_pattern(source, pat):
'''
Yields elements from the source whose name matches the given regular expression pattern
source - if an element, starts with all child elements in order; can also be any other iterator
pat - re.pattern object
'''
return filter(lambda x: pat.match(x.xml_name) is not None, select_elements(source))
|
python
|
{
"resource": ""
}
|
q278557
|
select_attribute
|
test
|
def select_attribute(source, name, val=None):
'''
Yields elements from the source having the given attrivute, optionally with the given attribute value
source - if an element, starts with all child elements in order; can also be any other iterator
name - attribute name to check
val - if None check only for the existence of the attribute, otherwise compare the given value as well
'''
def check(x):
if val is None:
return name in x.xml_attributes
else:
return name in x.xml_attributes and x.xml_attributes[name] == val
return filter(check, select_elements(source))
|
python
|
{
"resource": ""
}
|
q278558
|
following_siblings
|
test
|
def following_siblings(elem):
'''
Yields elements and text which have the same parent as elem, but come afterward in document order
'''
it = itertools.dropwhile(lambda x: x != elem, elem.xml_parent.xml_children)
next(it) #Skip the element itself
return it
|
python
|
{
"resource": ""
}
|
q278559
|
make_pretty
|
test
|
def make_pretty(elem, depth=0, indent=' '):
'''
Add text nodes as possible to all descendants of an element for spacing & indentation
to make the MicroXML as printed easier for people to read. Will not modify the
value of any text node which is not already entirely whitespace.
Warning: even though this operaton avoids molesting text nodes which already have
whitespace, it still makes changes which alter the text. Not all whitespace in XML is
ignorable. In XML cues from the DTD indicate which whitespace can be ignored.
No such cues are available for MicroXML, so use this function with care. That said,
in many real world applications of XML and MicroXML, this function causes no problems.
elem - target element whose descendant nodes are to be modified.
returns - the same element, which has been updated in place
>>> from amara3.uxml import tree
>>> from amara3.uxml.treeutil import *
>>> DOC = '<a><b><x>1</x></b><c><x>2</x><d><x>3</x></d></c><x>4</x><y>5</y></a>'
>>> tb = tree.treebuilder()
>>> root = tb.parse(DOC)
>>> len(root.xml_children)
4
>>> make_pretty(root)
<uxml.element (8763373718343) "a" with 9 children>
>>> len(root.xml_children)
9
>>> root.xml_encode()
'<a>\n <b>\n <x>1</x>\n </b>\n <c>\n <x>2</x>\n <d>\n <x>3</x>\n </d>\n </c>\n <x>4</x>\n <y>5</y>\n</a>'
'''
depth += 1
updated_child_list = []
updated_child_ix = 0
for child in elem.xml_children:
if isinstance(child, element):
if updated_child_ix % 2:
updated_child_list.append(child)
updated_child_ix += 1
else:
#It's the turn for text, but we have an element
new_text = text('\n' + indent*depth, elem)
updated_child_list.append(new_text)
updated_child_list.append(child)
updated_child_ix += 2
make_pretty(child, depth)
else:
if child.xml_value.strip():
#More to it than whitespace, so leave alone
#Note: if only whitespace entities are used, will still be left alone
updated_child_list.append(child)
updated_child_ix += 1
else:
#Only whitespace, so replace with proper indentation
new_text = text('\n' + indent*depth, elem)
updated_child_list.append(new_text)
updated_child_ix += 1
#Trailing indentation might be needed
if not(updated_child_ix % 2):
new_text = text('\n' + indent*(depth-1), elem)
updated_child_list.append(new_text)
#updated_child_ix += 1 #About to be done, so not really needed
elem.xml_children = updated_child_list
return elem
|
python
|
{
"resource": ""
}
|
q278560
|
call_inkscape
|
test
|
def call_inkscape(args_strings, inkscape_binpath=None):
"""Call inkscape CLI with arguments and returns its return value.
Parameters
----------
args_string: list of str
inkscape_binpath: str
Returns
-------
return_value
Inkscape command CLI call return value.
"""
log.debug('Looking for the binary file for inkscape.')
if inkscape_binpath is None:
inkscape_binpath = get_inkscape_binpath()
if inkscape_binpath is None or not os.path.exists(inkscape_binpath):
raise IOError(
'Inkscape binary has not been found. Please check configuration.'
)
return call_command(inkscape_binpath, args_strings)
|
python
|
{
"resource": ""
}
|
q278561
|
inkscape_export
|
test
|
def inkscape_export(input_file, output_file, export_flag="-A", dpi=90, inkscape_binpath=None):
""" Call Inkscape to export the input_file to output_file using the
specific export argument flag for the output file type.
Parameters
----------
input_file: str
Path to the input file
output_file: str
Path to the output file
export_flag: str
Inkscape CLI flag to indicate the type of the output file
Returns
-------
return_value
Command call return value
"""
if not os.path.exists(input_file):
log.error('File {} not found.'.format(input_file))
raise IOError((0, 'File not found.', input_file))
if '=' not in export_flag:
export_flag += ' '
arg_strings = []
arg_strings += ['--without-gui']
arg_strings += ['--export-text-to-path']
arg_strings += ['{}"{}"'.format(export_flag, output_file)]
arg_strings += ['--export-dpi={}'.format(dpi)]
arg_strings += ['"{}"'.format(input_file)]
return call_inkscape(arg_strings, inkscape_binpath=inkscape_binpath)
|
python
|
{
"resource": ""
}
|
q278562
|
svg2pdf
|
test
|
def svg2pdf(svg_file_path, pdf_file_path, dpi=150, command_binpath=None, support_unicode=False):
""" Transform SVG file to PDF file
"""
if support_unicode:
return rsvg_export(svg_file_path, pdf_file_path, dpi=dpi, rsvg_binpath=command_binpath)
return inkscape_export(svg_file_path, pdf_file_path, export_flag="-A",
dpi=dpi, inkscape_binpath=command_binpath)
|
python
|
{
"resource": ""
}
|
q278563
|
svg2png
|
test
|
def svg2png(svg_file_path, png_file_path, dpi=150, inkscape_binpath=None):
""" Transform SVG file to PNG file
"""
return inkscape_export(svg_file_path, png_file_path, export_flag="-e",
dpi=dpi, inkscape_binpath=inkscape_binpath)
|
python
|
{
"resource": ""
}
|
q278564
|
get_environment_for
|
test
|
def get_environment_for(file_path):
"""Return a Jinja2 environment for where file_path is.
Parameters
----------
file_path: str
Returns
-------
jinja_env: Jinja2.Environment
"""
work_dir = os.path.dirname(os.path.abspath(file_path))
if not os.path.exists(work_dir):
raise IOError('Could not find folder for dirname of file {}.'.format(file_path))
try:
jinja_env = Environment(loader=FileSystemLoader(work_dir))
except:
raise
else:
return jinja_env
|
python
|
{
"resource": ""
}
|
q278565
|
TextDocument._setup_template_file
|
test
|
def _setup_template_file(self, template_file_path):
""" Setup self.template
Parameters
----------
template_file_path: str
Document template file path.
"""
try:
template_file = template_file_path
template_env = get_environment_for(template_file_path)
template = template_env.get_template(os.path.basename(template_file))
except:
raise
else:
self._template_file = template_file
self._template_env = template_env
self.template = template
|
python
|
{
"resource": ""
}
|
q278566
|
TextDocument.fill
|
test
|
def fill(self, doc_contents):
""" Fill the content of the document with the information in doc_contents.
Parameters
----------
doc_contents: dict
Set of values to set the template document.
Returns
-------
filled_doc: str
The content of the document with the template information filled.
"""
try:
filled_doc = self.template.render(**doc_contents)
except:
log.exception('Error rendering Document '
'for {}.'.format(doc_contents))
raise
else:
self.file_content_ = filled_doc
return filled_doc
|
python
|
{
"resource": ""
}
|
q278567
|
TextDocument.save_content
|
test
|
def save_content(self, file_path, encoding='utf-8'):
""" Save the content of the .txt file in a text file.
Parameters
----------
file_path: str
Path to the output file.
"""
if self.file_content_ is None:
msg = 'Template content has not been updated. \
Please fill the template before rendering it.'
log.exception(msg)
raise ValueError(msg)
try:
write_to_file(file_path, content=self.file_content_,
encoding=encoding)
except Exception as exc:
msg = 'Document of type {} got an error when \
writing content.'.format(self.__class__)
log.exception(msg)
raise Exception(msg) from exc
|
python
|
{
"resource": ""
}
|
q278568
|
TextDocument.from_template_file
|
test
|
def from_template_file(cls, template_file_path, command=None):
""" Factory function to create a specific document of the
class given by the `command` or the extension of `template_file_path`.
See get_doctype_by_command and get_doctype_by_extension.
Parameters
----------
template_file_path: str
command: str
Returns
-------
doc
"""
# get template file extension
ext = os.path.basename(template_file_path).split('.')[-1]
try:
doc_type = get_doctype_by_command(command)
except ValueError:
doc_type = get_doctype_by_extension(ext)
except:
raise
else:
return doc_type(template_file_path)
|
python
|
{
"resource": ""
}
|
q278569
|
SVGDocument.fill
|
test
|
def fill(self, doc_contents):
""" Fill the content of the document with the information in doc_contents.
This is different from the TextDocument fill function, because this will
check for symbools in the values of `doc_content` and replace them
to good XML codes before filling the template.
Parameters
----------
doc_contents: dict
Set of values to set the template document.
Returns
-------
filled_doc: str
The content of the document with the template information filled.
"""
for key, content in doc_contents.items():
doc_contents[key] = replace_chars_for_svg_code(content)
return super(SVGDocument, self).fill(doc_contents=doc_contents)
|
python
|
{
"resource": ""
}
|
q278570
|
SVGDocument.render
|
test
|
def render(self, file_path, **kwargs):
""" Save the content of the .svg file in the chosen rendered format.
Parameters
----------
file_path: str
Path to the output file.
Kwargs
------
file_type: str
Choices: 'png', 'pdf', 'svg'
Default: 'pdf'
dpi: int
Dots-per-inch for the png and pdf.
Default: 150
support_unicode: bool
Whether to allow unicode to be encoded in the PDF.
Default: False
"""
temp = get_tempfile(suffix='.svg')
self.save_content(temp.name)
file_type = kwargs.get('file_type', 'pdf')
dpi = kwargs.get('dpi', 150)
support_unicode = kwargs.get('support_unicode', False)
try:
if file_type == 'svg':
shutil.copyfile(temp.name, file_path)
elif file_type == 'png':
svg2png(temp.name, file_path, dpi=dpi)
elif file_type == 'pdf':
svg2pdf(temp.name, file_path, dpi=dpi, support_unicode=support_unicode)
except:
log.exception(
'Error exporting file {} to {}'.format(file_path, file_type)
)
raise
|
python
|
{
"resource": ""
}
|
q278571
|
LateXDocument.render
|
test
|
def render(self, file_path, **kwargs):
""" Save the content of the .text file in the PDF.
Parameters
----------
file_path: str
Path to the output file.
"""
temp = get_tempfile(suffix='.tex')
self.save_content(temp.name)
try:
self._render_function(temp.name, file_path, output_format='pdf')
except:
log.exception('Error exporting file {} to PDF.'.format(file_path))
raise
|
python
|
{
"resource": ""
}
|
q278572
|
parse
|
test
|
def parse(source, handler):
'''
Convert XML 1.0 to MicroXML
source - XML 1.0 input
handler - MicroXML events handler
Returns uxml, extras
uxml - MicroXML element extracted from the source
extras - information to be preserved but not part of MicroXML, e.g. namespaces
'''
h = expat_callbacks(handler)
p = xml.parsers.expat.ParserCreate(namespace_separator=' ')
p.StartElementHandler = h.start_element
p.EndElementHandler = h.end_element
p.CharacterDataHandler = h.char_data
p.StartNamespaceDeclHandler = h.start_namespace
p.EndNamespaceDeclHandler = h.end_namespace
p.Parse(source)
return p
|
python
|
{
"resource": ""
}
|
q278573
|
parse
|
test
|
def parse(source, prefixes=None, model=None, encoding=None, use_xhtml_ns=False):
'''
Parse an input source with HTML text into an Amara 3 tree
>>> from amara3.uxml import html5
>>> import urllib.request
>>> with urllib.request.urlopen('http://uche.ogbuji.net/') as response:
... html5.parse(response)
#Warning: if you pass a string, you must make sure it's a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if it's not obviously XML or HTML (for example it could be confused with a file name)
'''
def get_tree_instance(namespaceHTMLElements, use_xhtml_ns=use_xhtml_ns):
#use_xhtml_ns is a boolean, whether or not to use http://www.w3.org/1999/xhtml
return treebuilder(use_xhtml_ns)
parser = html5lib.HTMLParser(tree=get_tree_instance)
#doc = parser.parse(inputsource(source, None).stream, encoding=encoding)
#doc = parser.parse(source, encoding=encoding)
doc = parser.parse(source)
first_element = next((e for e in doc.root_nodes if isinstance(e, element)), None)
return first_element
|
python
|
{
"resource": ""
}
|
q278574
|
markup_fragment
|
test
|
def markup_fragment(source, encoding=None):
'''
Parse a fragment if markup in HTML mode, and return a bindery node
Warning: if you pass a string, you must make sure it's a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if it's not obviously XML or HTML (for example it could be confused with a file name)
from amara.lib import inputsource
from amara.bindery import html
doc = html.markup_fragment(inputsource.text('XXX<html><body onload="" color="white"><p>Spam!<p>Eggs!</body></html>YYY'))
See also: http://wiki.xml3k.org/Amara2/Tagsoup
'''
doc = parse(source, encoding=encoding)
frag = doc.html.body
return frag
|
python
|
{
"resource": ""
}
|
q278575
|
node.insertText
|
test
|
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
if insertBefore:
self.insertBefore(tree.text(data), insertBefore)
else:
self.xml_append(tree.text(data))
|
python
|
{
"resource": ""
}
|
q278576
|
node.insertBefore
|
test
|
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
offset = self.xml_children.index(refNode)
self.xml_insert(node, offset)
|
python
|
{
"resource": ""
}
|
q278577
|
element.cloneNode
|
test
|
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
attrs = self.xml_attributes.copy()
return element(self.xml_name, attrs=attrs)
|
python
|
{
"resource": ""
}
|
q278578
|
execute
|
test
|
def execute(option):
'''A script that melody calls with each valid set of options. This
script runs the required code and returns the results.'''
namelist_option = []
makefile_option = []
flags = ""
for entry in option:
key = entry.keys()[0]
if key == "Problem Size":
namelist_option.append({"SIZE": entry[key]})
elif key == "F90":
makefile_option.append(entry)
else:
flags += entry[key] + " "
makefile_option.append({"F90FLAGS": flags})
namelist = create_input(namelist_option, "namelist",
template_location="templates")
makefile_include = create_input(makefile_option, "Makefile.include",
template_location="templates")
benchmark_base = "shallow"
# save the input files in the appropriate place
location = benchmark_base + "/original/namelist"
my_file = open(location, 'w')
my_file.write(namelist)
my_file.flush()
location = benchmark_base + "/common/Makefile.include"
my_file = open(location, 'w')
my_file.write(makefile_include)
my_file.flush()
# compile shallow if required
base_path = benchmark_base + "/original"
import subprocess
make_process = subprocess.Popen(["make", "clean"], cwd=base_path,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
if make_process.wait() != 0:
return False, []
make_process = subprocess.Popen(["make"], cwd=base_path,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
if make_process.wait() != 0:
return False, []
# run shallow
make_process = subprocess.Popen(["./shallow_base"], cwd=base_path,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
if make_process.wait() != 0:
return False, []
# _ = make_process.stderr.read()
stdout = make_process.stdout.read()
# determine if the results are correct. We will need to look at
# the results from stdout but for the moment we assume they are
# correct
# extract the required outputs
for line in stdout.split("\n"):
if "Time-stepping" in line:
total_time = line.split()[2]
return True, total_time
|
python
|
{
"resource": ""
}
|
q278579
|
strval
|
test
|
def strval(node, outermost=True):
'''
XPath-like string value of node
'''
if not isinstance(node, element):
return node.xml_value if outermost else [node.xml_value]
accumulator = []
for child in node.xml_children:
if isinstance(child, text):
accumulator.append(child.xml_value)
elif isinstance(child, element):
accumulator.extend(strval(child, outermost=False))
if outermost: accumulator = ''.join(accumulator)
return accumulator
|
python
|
{
"resource": ""
}
|
q278580
|
element.xml_insert
|
test
|
def xml_insert(self, child, index=-1):
'''
Append a node as the last child
child - the child to append. If a string, convert to a text node, for convenience
'''
if isinstance(child, str):
child = text(child, parent=self)
else:
child._xml_parent = weakref.ref(self)
if index == -1:
self.xml_children.append(child)
else:
self.xml_children.insert(index, child)
return
|
python
|
{
"resource": ""
}
|
q278581
|
parse_config
|
test
|
def parse_config(options):
"""
Get settings from config file.
"""
if os.path.exists(options.config):
config = ConfigParser.ConfigParser()
try:
config.read(options.config)
except Exception, err:
if not options.quiet:
sys.stderr.write("ERROR: Config file read {config} error. {err}".format(config=options.config, err=err))
sys.exit(-1)
try:
configdata = {
"secrets": config.get("GOOGLE", "secrets"),
"credentials": config.get("nagios-notification-google-calendar", "credentials"),
"start": config.get("nagios-notification-google-calendar", "start"),
"end": config.get("nagios-notification-google-calendar", "end"),
"message": config.get("nagios-notification-google-calendar", "message"),
}
except ConfigParser.NoOptionError, err:
if not options.quiet:
sys.stderr.write("ERROR: Config file missing option error. {err}\n".format(err=err))
sys.exit(-1)
# check mandatory config options supplied
mandatories = ["secrets", "credentials", "start", "end", "message", ]
if not all(configdata[mandatory] for mandatory in mandatories):
if not options.quiet:
sys.stdout.write("Mandatory config option missing\n")
sys.exit(0)
return configdata
else:
if not options.quiet:
sys.stderr.write("ERROR: Config file {config} does not exist\n".format(config=options.config))
sys.exit(0)
|
python
|
{
"resource": ""
}
|
q278582
|
get_google_credentials
|
test
|
def get_google_credentials(options, config):
"""
Get google API credentials for user.
"""
try:
if options.get_google_credentials:
flow = flow_from_clientsecrets(config["secrets"], scope=SCOPE, redirect_uri="oob")
sys.stdout.write("Follow this URL: {url} and grant access to calendar.\n".format(url=flow.step1_get_authorize_url()))
token = raw_input("Enter token:")
credentials = flow.step2_exchange(token)
storage = Storage(os.path.join(config["credentials"], "{username}.json".format(username=options.username)))
storage.put(credentials)
credentials.set_store(storage)
else:
storage = Storage(os.path.join(config["credentials"], "{username}.json".format(username=options.username)))
credentials = storage.get()
except Exception, err:
if not options.quiet:
sys.stderr.write("ERROR: Getting google API credentials error. {err}\n".format(err=err))
sys.exit(-1)
return credentials
|
python
|
{
"resource": ""
}
|
q278583
|
create_event_datetimes
|
test
|
def create_event_datetimes(options, config):
"""
Create event start and end datetimes.
"""
now = datetime.datetime.now()
return {
"start": {
"dateTime": (now + datetime.timedelta(minutes=int(config["start"]))).strftime(DT_FORMAT),
"timeZone": options.timezone,
},
"end": {
"dateTime": (now + datetime.timedelta(minutes=int(config["end"]))).strftime(DT_FORMAT),
"timeZone": options.timezone,
},
}
|
python
|
{
"resource": ""
}
|
q278584
|
create_event
|
test
|
def create_event(options, config, credentials):
"""
Create event in calendar with sms reminder.
"""
try:
http = credentials.authorize(httplib2.Http())
service = build("calendar", "v3", http=http)
event = {
"summary": options.message,
"location": "",
"reminders": {
"useDefault": False,
"overrides": [
{
"method": "sms",
"minutes": config["message"],
},
],
}
}
event.update(create_event_datetimes(options, config))
service.events().insert(calendarId=options.calendar, sendNotifications=True, body=event).execute()
except Exception, err:
if not options.quiet:
sys.stderr.write("ERROR: Creating google calendar event error. {err}\n".format(err=err))
sys.exit(-1)
|
python
|
{
"resource": ""
}
|
q278585
|
main
|
test
|
def main():
"""
Processing notification call main function.
"""
# getting info for creating event
options = parse_options()
config = parse_config(options)
credentials = get_google_credentials(options, config)
if not options.get_google_credentials:
create_event(options, config, credentials)
|
python
|
{
"resource": ""
}
|
q278586
|
get_extension
|
test
|
def get_extension(filepath, check_if_exists=False):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not os.path.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
try:
rest, ext = os.path.splitext(filepath)
except:
raise
else:
return ext
|
python
|
{
"resource": ""
}
|
q278587
|
add_extension_if_needed
|
test
|
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not os.path.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
return filepath
|
python
|
{
"resource": ""
}
|
q278588
|
get_tempfile
|
test
|
def get_tempfile(suffix='.txt', dirpath=None):
""" Return a temporary file with the given suffix within dirpath.
If dirpath is None, will look for a temporary folder in your system.
Parameters
----------
suffix: str
Temporary file name suffix
dirpath: str
Folder path where create the temporary file
Returns
-------
temp_filepath: str
The path to the temporary path
"""
if dirpath is None:
dirpath = get_temp_dir()
return tempfile.NamedTemporaryFile(suffix=suffix, dir=dirpath)
|
python
|
{
"resource": ""
}
|
q278589
|
cleanup
|
test
|
def cleanup(workdir, extension):
""" Remove the files in workdir that have the given extension.
Parameters
----------
workdir:
Folder path from where to clean the files.
extension: str
File extension without the dot, e.g., 'txt'
"""
[os.remove(f) for f in glob(os.path.join(workdir, '*.' + extension))]
|
python
|
{
"resource": ""
}
|
q278590
|
csv_to_json
|
test
|
def csv_to_json(csv_filepath, json_filepath, fieldnames, ignore_first_line=True):
""" Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`.
Parameters
----------
csv_filepath: str
Path to the input CSV file.
json_filepath: str
Path to the output JSON file. Will be overwritten if exists.
fieldnames: List[str]
Names of the fields in the CSV file.
ignore_first_line: bool
"""
import csv
import json
csvfile = open(csv_filepath, 'r')
jsonfile = open(json_filepath, 'w')
reader = csv.DictReader(csvfile, fieldnames)
rows = []
if ignore_first_line:
next(reader)
for row in reader:
rows.append(row)
json.dump(rows, jsonfile)
jsonfile.close()
csvfile.close()
|
python
|
{
"resource": ""
}
|
q278591
|
replace_file_content
|
test
|
def replace_file_content(filepath, old, new, max=1):
""" Modify the content of `filepath`, replacing `old` for `new`.
Parameters
----------
filepath: str
Path to the file to be modified. It will be overwritten.
old: str
This is old substring to be replaced.
new: str
This is new substring, which would replace old substring.
max: int
If larger than 0, Only the first `max` occurrences are replaced.
"""
with open(filepath, 'r') as f:
content = f.read()
content = content.replace(old, new, max)
with open(filepath, 'w') as f:
f.write(content)
|
python
|
{
"resource": ""
}
|
q278592
|
CopyDoc.parse
|
test
|
def parse(self):
"""
Run all parsing functions.
"""
for tag in self.soup.findAll('span'):
self.create_italic(tag)
self.create_strong(tag)
self.create_underline(tag)
self.unwrap_span(tag)
for tag in self.soup.findAll('a'):
self.remove_comments(tag)
self.check_next(tag)
if self.soup.body:
for tag in self.soup.body.findAll():
self.remove_empty(tag)
self.remove_inline_comment(tag)
self.parse_attrs(tag)
for token, target in self.tokens:
self.find_token(tag, token, target)
self.remove_blacklisted_tags(tag)
|
python
|
{
"resource": ""
}
|
q278593
|
CopyDoc.check_next
|
test
|
def check_next(self, tag):
"""
If next tag is link with same href, combine them.
"""
if (type(tag.next_sibling) == element.Tag and
tag.next_sibling.name == 'a'):
next_tag = tag.next_sibling
if tag.get('href') and next_tag.get('href'):
href = self._parse_href(tag.get('href'))
next_href = self._parse_href(next_tag.get('href'))
if href == next_href:
next_text = next_tag.get_text()
tag.append(next_text)
self.tags_blacklist.append(next_tag)
|
python
|
{
"resource": ""
}
|
q278594
|
CopyDoc.create_italic
|
test
|
def create_italic(self, tag):
"""
See if span tag has italic style and wrap with em tag.
"""
style = tag.get('style')
if style and 'font-style:italic' in style:
tag.wrap(self.soup.new_tag('em'))
|
python
|
{
"resource": ""
}
|
q278595
|
CopyDoc.create_strong
|
test
|
def create_strong(self, tag):
"""
See if span tag has bold style and wrap with strong tag.
"""
style = tag.get('style')
if (style and
('font-weight:bold' in style or 'font-weight:700' in style)):
tag.wrap(self.soup.new_tag('strong'))
|
python
|
{
"resource": ""
}
|
q278596
|
CopyDoc.create_underline
|
test
|
def create_underline(self, tag):
"""
See if span tag has underline style and wrap with u tag.
"""
style = tag.get('style')
if style and 'text-decoration:underline' in style:
tag.wrap(self.soup.new_tag('u'))
|
python
|
{
"resource": ""
}
|
q278597
|
CopyDoc.parse_attrs
|
test
|
def parse_attrs(self, tag):
"""
Reject attributes not defined in ATTR_WHITELIST.
"""
if tag.name in ATTR_WHITELIST.keys():
attrs = copy(tag.attrs)
for attr, value in attrs.items():
if attr in ATTR_WHITELIST[tag.name]:
tag.attrs[attr] = self._parse_attr(tag.name, attr, value)
else:
del tag.attrs[attr]
else:
tag.attrs = {}
|
python
|
{
"resource": ""
}
|
q278598
|
CopyDoc.clean_linebreaks
|
test
|
def clean_linebreaks(self, tag):
"""
get unicode string without any other content transformation.
and clean extra spaces
"""
stripped = tag.decode(formatter=None)
stripped = re.sub('\s+', ' ', stripped)
stripped = re.sub('\n', '', stripped)
return stripped
|
python
|
{
"resource": ""
}
|
q278599
|
CopyDoc._parse_href
|
test
|
def _parse_href(self, href):
"""
Extract "real" URL from Google redirected url by getting `q`
querystring parameter.
"""
params = parse_qs(urlsplit(href).query)
return params.get('q')
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.