id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
6,400 | mcs07/ChemDataExtractor | chemdataextractor/biblio/bibtex.py | BibtexParser.parse_names | def parse_names(cls, names):
"""Parse a string of names separated by "and" like in a BibTeX authors field."""
names = [latex_to_unicode(n) for n in re.split(r'\sand\s(?=[^{}]*(?:\{|$))', names) if n]
return names | python | def parse_names(cls, names):
"""Parse a string of names separated by "and" like in a BibTeX authors field."""
names = [latex_to_unicode(n) for n in re.split(r'\sand\s(?=[^{}]*(?:\{|$))', names) if n]
return names | [
"def",
"parse_names",
"(",
"cls",
",",
"names",
")",
":",
"names",
"=",
"[",
"latex_to_unicode",
"(",
"n",
")",
"for",
"n",
"in",
"re",
".",
"split",
"(",
"r'\\sand\\s(?=[^{}]*(?:\\{|$))'",
",",
"names",
")",
"if",
"n",
"]",
"return",
"names"
] | Parse a string of names separated by "and" like in a BibTeX authors field. | [
"Parse",
"a",
"string",
"of",
"names",
"separated",
"by",
"and",
"like",
"in",
"a",
"BibTeX",
"authors",
"field",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/biblio/bibtex.py#L172-L175 |
6,401 | mcs07/ChemDataExtractor | chemdataextractor/biblio/bibtex.py | BibtexParser.metadata | def metadata(self):
"""Return metadata for the parsed collection of records."""
auto = {u'records': self.size}
auto.update(self.meta)
return auto | python | def metadata(self):
"""Return metadata for the parsed collection of records."""
auto = {u'records': self.size}
auto.update(self.meta)
return auto | [
"def",
"metadata",
"(",
"self",
")",
":",
"auto",
"=",
"{",
"u'records'",
":",
"self",
".",
"size",
"}",
"auto",
".",
"update",
"(",
"self",
".",
"meta",
")",
"return",
"auto"
] | Return metadata for the parsed collection of records. | [
"Return",
"metadata",
"for",
"the",
"parsed",
"collection",
"of",
"records",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/biblio/bibtex.py#L188-L192 |
6,402 | mcs07/ChemDataExtractor | chemdataextractor/biblio/bibtex.py | BibtexParser.json | def json(self):
"""Return a list of records as a JSON string. Follows the BibJSON convention."""
return json.dumps(OrderedDict([('metadata', self.metadata), ('records', self.records.values())])) | python | def json(self):
"""Return a list of records as a JSON string. Follows the BibJSON convention."""
return json.dumps(OrderedDict([('metadata', self.metadata), ('records', self.records.values())])) | [
"def",
"json",
"(",
"self",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"OrderedDict",
"(",
"[",
"(",
"'metadata'",
",",
"self",
".",
"metadata",
")",
",",
"(",
"'records'",
",",
"self",
".",
"records",
".",
"values",
"(",
")",
")",
"]",
")",
")"
] | Return a list of records as a JSON string. Follows the BibJSON convention. | [
"Return",
"a",
"list",
"of",
"records",
"as",
"a",
"JSON",
"string",
".",
"Follows",
"the",
"BibJSON",
"convention",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/biblio/bibtex.py#L195-L197 |
6,403 | mcs07/ChemDataExtractor | chemdataextractor/config.py | Config._flush | def _flush(self):
"""Save the contents of data to the file on disk. You should not need to call this manually."""
d = os.path.dirname(self.path)
if not os.path.isdir(d):
os.makedirs(d)
with io.open(self.path, 'w', encoding='utf8') as f:
yaml.safe_dump(self._data, f, default_flow_style=False, encoding=None) | python | def _flush(self):
"""Save the contents of data to the file on disk. You should not need to call this manually."""
d = os.path.dirname(self.path)
if not os.path.isdir(d):
os.makedirs(d)
with io.open(self.path, 'w', encoding='utf8') as f:
yaml.safe_dump(self._data, f, default_flow_style=False, encoding=None) | [
"def",
"_flush",
"(",
"self",
")",
":",
"d",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"d",
")",
":",
"os",
".",
"makedirs",
"(",
"d",
")",
"with",
"io",
".",
"open",
"(",
"self",
".",
"path",
",",
"'w'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"f",
":",
"yaml",
".",
"safe_dump",
"(",
"self",
".",
"_data",
",",
"f",
",",
"default_flow_style",
"=",
"False",
",",
"encoding",
"=",
"None",
")"
] | Save the contents of data to the file on disk. You should not need to call this manually. | [
"Save",
"the",
"contents",
"of",
"data",
"to",
"the",
"file",
"on",
"disk",
".",
"You",
"should",
"not",
"need",
"to",
"call",
"this",
"manually",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/config.py#L79-L85 |
6,404 | mcs07/ChemDataExtractor | chemdataextractor/nlp/abbrev.py | AbbreviationDetector._is_allowed_abbr | def _is_allowed_abbr(self, tokens):
"""Return True if text is an allowed abbreviation."""
if len(tokens) <= 2:
abbr_text = ''.join(tokens)
if self.abbr_min <= len(abbr_text) <= self.abbr_max and bracket_level(abbr_text) == 0:
if abbr_text[0].isalnum() and any(c.isalpha() for c in abbr_text):
# Disallow property values
if re.match('^\d+(\.\d+)?(g|m[lL]|cm)$', abbr_text):
return False
return True
return False | python | def _is_allowed_abbr(self, tokens):
"""Return True if text is an allowed abbreviation."""
if len(tokens) <= 2:
abbr_text = ''.join(tokens)
if self.abbr_min <= len(abbr_text) <= self.abbr_max and bracket_level(abbr_text) == 0:
if abbr_text[0].isalnum() and any(c.isalpha() for c in abbr_text):
# Disallow property values
if re.match('^\d+(\.\d+)?(g|m[lL]|cm)$', abbr_text):
return False
return True
return False | [
"def",
"_is_allowed_abbr",
"(",
"self",
",",
"tokens",
")",
":",
"if",
"len",
"(",
"tokens",
")",
"<=",
"2",
":",
"abbr_text",
"=",
"''",
".",
"join",
"(",
"tokens",
")",
"if",
"self",
".",
"abbr_min",
"<=",
"len",
"(",
"abbr_text",
")",
"<=",
"self",
".",
"abbr_max",
"and",
"bracket_level",
"(",
"abbr_text",
")",
"==",
"0",
":",
"if",
"abbr_text",
"[",
"0",
"]",
".",
"isalnum",
"(",
")",
"and",
"any",
"(",
"c",
".",
"isalpha",
"(",
")",
"for",
"c",
"in",
"abbr_text",
")",
":",
"# Disallow property values",
"if",
"re",
".",
"match",
"(",
"'^\\d+(\\.\\d+)?(g|m[lL]|cm)$'",
",",
"abbr_text",
")",
":",
"return",
"False",
"return",
"True",
"return",
"False"
] | Return True if text is an allowed abbreviation. | [
"Return",
"True",
"if",
"text",
"is",
"an",
"allowed",
"abbreviation",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/abbrev.py#L43-L53 |
6,405 | mcs07/ChemDataExtractor | chemdataextractor/scrape/base.py | BaseScraper.name | def name(self):
"""A unique name for this scraper."""
return ''.join('_%s' % c if c.isupper() else c for c in self.__class__.__name__).strip('_').lower() | python | def name(self):
"""A unique name for this scraper."""
return ''.join('_%s' % c if c.isupper() else c for c in self.__class__.__name__).strip('_').lower() | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"''",
".",
"join",
"(",
"'_%s'",
"%",
"c",
"if",
"c",
".",
"isupper",
"(",
")",
"else",
"c",
"for",
"c",
"in",
"self",
".",
"__class__",
".",
"__name__",
")",
".",
"strip",
"(",
"'_'",
")",
".",
"lower",
"(",
")"
] | A unique name for this scraper. | [
"A",
"unique",
"name",
"for",
"this",
"scraper",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/base.py#L41-L43 |
6,406 | mcs07/ChemDataExtractor | chemdataextractor/scrape/base.py | BaseField._post_scrape | def _post_scrape(self, value, processor=None):
"""Apply processing to the scraped value."""
# Pass each value through the field's clean method
value = [self.process(v) for v in value]
# Filter None values
value = [v for v in value if v is not None]
# Pass each value through processors defined on the entity
if processor:
value = [processor(v) for v in value]
value = [v for v in value if v is not None]
# Take first unless all is specified
if not self.all:
value = value[0] if value else None
log.debug('Scraped %s: %s from %s' % (self.name, value, self.selection))
return value | python | def _post_scrape(self, value, processor=None):
"""Apply processing to the scraped value."""
# Pass each value through the field's clean method
value = [self.process(v) for v in value]
# Filter None values
value = [v for v in value if v is not None]
# Pass each value through processors defined on the entity
if processor:
value = [processor(v) for v in value]
value = [v for v in value if v is not None]
# Take first unless all is specified
if not self.all:
value = value[0] if value else None
log.debug('Scraped %s: %s from %s' % (self.name, value, self.selection))
return value | [
"def",
"_post_scrape",
"(",
"self",
",",
"value",
",",
"processor",
"=",
"None",
")",
":",
"# Pass each value through the field's clean method",
"value",
"=",
"[",
"self",
".",
"process",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"# Filter None values",
"value",
"=",
"[",
"v",
"for",
"v",
"in",
"value",
"if",
"v",
"is",
"not",
"None",
"]",
"# Pass each value through processors defined on the entity",
"if",
"processor",
":",
"value",
"=",
"[",
"processor",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"value",
"=",
"[",
"v",
"for",
"v",
"in",
"value",
"if",
"v",
"is",
"not",
"None",
"]",
"# Take first unless all is specified",
"if",
"not",
"self",
".",
"all",
":",
"value",
"=",
"value",
"[",
"0",
"]",
"if",
"value",
"else",
"None",
"log",
".",
"debug",
"(",
"'Scraped %s: %s from %s'",
"%",
"(",
"self",
".",
"name",
",",
"value",
",",
"self",
".",
"selection",
")",
")",
"return",
"value"
] | Apply processing to the scraped value. | [
"Apply",
"processing",
"to",
"the",
"scraped",
"value",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/base.py#L197-L211 |
6,407 | mcs07/ChemDataExtractor | chemdataextractor/scrape/entity.py | Entity.scrape | def scrape(cls, selector, root, xpath=False):
"""Return EntityList for the given selector."""
log.debug('Called scrape classmethod with root: %s' % root)
roots = selector.xpath(root) if xpath else selector.css(root)
results = [cls(r) for r in roots]
return EntityList(*results) | python | def scrape(cls, selector, root, xpath=False):
"""Return EntityList for the given selector."""
log.debug('Called scrape classmethod with root: %s' % root)
roots = selector.xpath(root) if xpath else selector.css(root)
results = [cls(r) for r in roots]
return EntityList(*results) | [
"def",
"scrape",
"(",
"cls",
",",
"selector",
",",
"root",
",",
"xpath",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'Called scrape classmethod with root: %s'",
"%",
"root",
")",
"roots",
"=",
"selector",
".",
"xpath",
"(",
"root",
")",
"if",
"xpath",
"else",
"selector",
".",
"css",
"(",
"root",
")",
"results",
"=",
"[",
"cls",
"(",
"r",
")",
"for",
"r",
"in",
"roots",
"]",
"return",
"EntityList",
"(",
"*",
"results",
")"
] | Return EntityList for the given selector. | [
"Return",
"EntityList",
"for",
"the",
"given",
"selector",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/entity.py#L95-L100 |
6,408 | mcs07/ChemDataExtractor | chemdataextractor/scrape/entity.py | Entity.serialize | def serialize(self):
"""Convert Entity to python dictionary."""
# Serialize fields to a dict
data = {}
for field_name in self:
value = self._values.get(field_name)
field = self.fields.get(field_name)
if value is not None:
if field.all:
value = [field.serialize(v) for v in value]
else:
value = field.serialize(value)
# Skip empty fields unless field.null
if not field.null and ((field.all and value == []) or (not field.all and value in {None, ''})):
continue
data[field.name] = value
return data | python | def serialize(self):
"""Convert Entity to python dictionary."""
# Serialize fields to a dict
data = {}
for field_name in self:
value = self._values.get(field_name)
field = self.fields.get(field_name)
if value is not None:
if field.all:
value = [field.serialize(v) for v in value]
else:
value = field.serialize(value)
# Skip empty fields unless field.null
if not field.null and ((field.all and value == []) or (not field.all and value in {None, ''})):
continue
data[field.name] = value
return data | [
"def",
"serialize",
"(",
"self",
")",
":",
"# Serialize fields to a dict",
"data",
"=",
"{",
"}",
"for",
"field_name",
"in",
"self",
":",
"value",
"=",
"self",
".",
"_values",
".",
"get",
"(",
"field_name",
")",
"field",
"=",
"self",
".",
"fields",
".",
"get",
"(",
"field_name",
")",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"field",
".",
"all",
":",
"value",
"=",
"[",
"field",
".",
"serialize",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"else",
":",
"value",
"=",
"field",
".",
"serialize",
"(",
"value",
")",
"# Skip empty fields unless field.null",
"if",
"not",
"field",
".",
"null",
"and",
"(",
"(",
"field",
".",
"all",
"and",
"value",
"==",
"[",
"]",
")",
"or",
"(",
"not",
"field",
".",
"all",
"and",
"value",
"in",
"{",
"None",
",",
"''",
"}",
")",
")",
":",
"continue",
"data",
"[",
"field",
".",
"name",
"]",
"=",
"value",
"return",
"data"
] | Convert Entity to python dictionary. | [
"Convert",
"Entity",
"to",
"python",
"dictionary",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/entity.py#L102-L118 |
6,409 | mcs07/ChemDataExtractor | chemdataextractor/scrape/entity.py | Entity.to_json | def to_json(self, *args, **kwargs):
"""Convert Entity to JSON."""
return json.dumps(self.serialize(), *args, **kwargs) | python | def to_json(self, *args, **kwargs):
"""Convert Entity to JSON."""
return json.dumps(self.serialize(), *args, **kwargs) | [
"def",
"to_json",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"serialize",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Convert Entity to JSON. | [
"Convert",
"Entity",
"to",
"JSON",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/entity.py#L120-L122 |
6,410 | mcs07/ChemDataExtractor | chemdataextractor/doc/document.py | Document.from_file | def from_file(cls, f, fname=None, readers=None):
"""Create a Document from a file.
Usage::
with open('paper.html', 'rb') as f:
doc = Document.from_file(f)
.. note::
Always open files in binary mode by using the 'rb' parameter.
:param file|string f: A file-like object or path to a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
"""
if isinstance(f, six.string_types):
f = io.open(f, 'rb')
if not fname and hasattr(f, 'name'):
fname = f.name
return cls.from_string(f.read(), fname=fname, readers=readers) | python | def from_file(cls, f, fname=None, readers=None):
"""Create a Document from a file.
Usage::
with open('paper.html', 'rb') as f:
doc = Document.from_file(f)
.. note::
Always open files in binary mode by using the 'rb' parameter.
:param file|string f: A file-like object or path to a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
"""
if isinstance(f, six.string_types):
f = io.open(f, 'rb')
if not fname and hasattr(f, 'name'):
fname = f.name
return cls.from_string(f.read(), fname=fname, readers=readers) | [
"def",
"from_file",
"(",
"cls",
",",
"f",
",",
"fname",
"=",
"None",
",",
"readers",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"f",
",",
"six",
".",
"string_types",
")",
":",
"f",
"=",
"io",
".",
"open",
"(",
"f",
",",
"'rb'",
")",
"if",
"not",
"fname",
"and",
"hasattr",
"(",
"f",
",",
"'name'",
")",
":",
"fname",
"=",
"f",
".",
"name",
"return",
"cls",
".",
"from_string",
"(",
"f",
".",
"read",
"(",
")",
",",
"fname",
"=",
"fname",
",",
"readers",
"=",
"readers",
")"
] | Create a Document from a file.
Usage::
with open('paper.html', 'rb') as f:
doc = Document.from_file(f)
.. note::
Always open files in binary mode by using the 'rb' parameter.
:param file|string f: A file-like object or path to a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use. | [
"Create",
"a",
"Document",
"from",
"a",
"file",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/document.py#L87-L107 |
6,411 | mcs07/ChemDataExtractor | chemdataextractor/doc/document.py | Document.from_string | def from_string(cls, fstring, fname=None, readers=None):
"""Create a Document from a byte string containing the contents of a file.
Usage::
contents = open('paper.html', 'rb').read()
doc = Document.from_string(contents)
.. note::
This method expects a byte string, not a unicode string (in contrast to most methods in ChemDataExtractor).
:param bytes fstring: A byte string containing the contents of a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
"""
if readers is None:
from ..reader import DEFAULT_READERS
readers = DEFAULT_READERS
if isinstance(fstring, six.text_type):
raise ReaderError('from_string expects a byte string, not a unicode string')
for reader in readers:
# Skip reader if we don't think it can read file
if not reader.detect(fstring, fname=fname):
continue
try:
d = reader.readstring(fstring)
log.debug('Parsed document with %s' % reader.__class__.__name__)
return d
except ReaderError:
pass
raise ReaderError('Unable to read document') | python | def from_string(cls, fstring, fname=None, readers=None):
"""Create a Document from a byte string containing the contents of a file.
Usage::
contents = open('paper.html', 'rb').read()
doc = Document.from_string(contents)
.. note::
This method expects a byte string, not a unicode string (in contrast to most methods in ChemDataExtractor).
:param bytes fstring: A byte string containing the contents of a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
"""
if readers is None:
from ..reader import DEFAULT_READERS
readers = DEFAULT_READERS
if isinstance(fstring, six.text_type):
raise ReaderError('from_string expects a byte string, not a unicode string')
for reader in readers:
# Skip reader if we don't think it can read file
if not reader.detect(fstring, fname=fname):
continue
try:
d = reader.readstring(fstring)
log.debug('Parsed document with %s' % reader.__class__.__name__)
return d
except ReaderError:
pass
raise ReaderError('Unable to read document') | [
"def",
"from_string",
"(",
"cls",
",",
"fstring",
",",
"fname",
"=",
"None",
",",
"readers",
"=",
"None",
")",
":",
"if",
"readers",
"is",
"None",
":",
"from",
".",
".",
"reader",
"import",
"DEFAULT_READERS",
"readers",
"=",
"DEFAULT_READERS",
"if",
"isinstance",
"(",
"fstring",
",",
"six",
".",
"text_type",
")",
":",
"raise",
"ReaderError",
"(",
"'from_string expects a byte string, not a unicode string'",
")",
"for",
"reader",
"in",
"readers",
":",
"# Skip reader if we don't think it can read file",
"if",
"not",
"reader",
".",
"detect",
"(",
"fstring",
",",
"fname",
"=",
"fname",
")",
":",
"continue",
"try",
":",
"d",
"=",
"reader",
".",
"readstring",
"(",
"fstring",
")",
"log",
".",
"debug",
"(",
"'Parsed document with %s'",
"%",
"reader",
".",
"__class__",
".",
"__name__",
")",
"return",
"d",
"except",
"ReaderError",
":",
"pass",
"raise",
"ReaderError",
"(",
"'Unable to read document'",
")"
] | Create a Document from a byte string containing the contents of a file.
Usage::
contents = open('paper.html', 'rb').read()
doc = Document.from_string(contents)
.. note::
This method expects a byte string, not a unicode string (in contrast to most methods in ChemDataExtractor).
:param bytes fstring: A byte string containing the contents of a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use. | [
"Create",
"a",
"Document",
"from",
"a",
"byte",
"string",
"containing",
"the",
"contents",
"of",
"a",
"file",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/document.py#L110-L143 |
6,412 | mcs07/ChemDataExtractor | chemdataextractor/doc/document.py | Document.get_element_with_id | def get_element_with_id(self, id):
"""Return the element with the specified ID."""
# Should we maintain a hashmap of ids to make this more efficient? Probably overkill.
# TODO: Elements can contain nested elements (captions, footnotes, table cells, etc.)
return next((el for el in self.elements if el.id == id), None) | python | def get_element_with_id(self, id):
"""Return the element with the specified ID."""
# Should we maintain a hashmap of ids to make this more efficient? Probably overkill.
# TODO: Elements can contain nested elements (captions, footnotes, table cells, etc.)
return next((el for el in self.elements if el.id == id), None) | [
"def",
"get_element_with_id",
"(",
"self",
",",
"id",
")",
":",
"# Should we maintain a hashmap of ids to make this more efficient? Probably overkill.",
"# TODO: Elements can contain nested elements (captions, footnotes, table cells, etc.)",
"return",
"next",
"(",
"(",
"el",
"for",
"el",
"in",
"self",
".",
"elements",
"if",
"el",
".",
"id",
"==",
"id",
")",
",",
"None",
")"
] | Return the element with the specified ID. | [
"Return",
"the",
"element",
"with",
"the",
"specified",
"ID",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/document.py#L300-L304 |
6,413 | mcs07/ChemDataExtractor | chemdataextractor/doc/document.py | Document.serialize | def serialize(self):
"""Convert Document to python dictionary."""
# Serialize fields to a dict
elements = []
for element in self.elements:
elements.append(element.serialize())
data = {'type': 'document', 'elements': elements}
return data | python | def serialize(self):
"""Convert Document to python dictionary."""
# Serialize fields to a dict
elements = []
for element in self.elements:
elements.append(element.serialize())
data = {'type': 'document', 'elements': elements}
return data | [
"def",
"serialize",
"(",
"self",
")",
":",
"# Serialize fields to a dict",
"elements",
"=",
"[",
"]",
"for",
"element",
"in",
"self",
".",
"elements",
":",
"elements",
".",
"append",
"(",
"element",
".",
"serialize",
"(",
")",
")",
"data",
"=",
"{",
"'type'",
":",
"'document'",
",",
"'elements'",
":",
"elements",
"}",
"return",
"data"
] | Convert Document to python dictionary. | [
"Convert",
"Document",
"to",
"python",
"dictionary",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/document.py#L357-L364 |
6,414 | mcs07/ChemDataExtractor | chemdataextractor/biblio/xmp.py | XmpParser.parse | def parse(self, xmp):
"""Run parser and return a dictionary of all the parsed metadata."""
tree = etree.fromstring(xmp)
rdf_tree = tree.find(RDF_NS + 'RDF')
meta = defaultdict(dict)
for desc in rdf_tree.findall(RDF_NS + 'Description'):
for el in desc.getchildren():
ns, tag = self._parse_tag(el)
value = self._parse_value(el)
meta[ns][tag] = value
return dict(meta) | python | def parse(self, xmp):
"""Run parser and return a dictionary of all the parsed metadata."""
tree = etree.fromstring(xmp)
rdf_tree = tree.find(RDF_NS + 'RDF')
meta = defaultdict(dict)
for desc in rdf_tree.findall(RDF_NS + 'Description'):
for el in desc.getchildren():
ns, tag = self._parse_tag(el)
value = self._parse_value(el)
meta[ns][tag] = value
return dict(meta) | [
"def",
"parse",
"(",
"self",
",",
"xmp",
")",
":",
"tree",
"=",
"etree",
".",
"fromstring",
"(",
"xmp",
")",
"rdf_tree",
"=",
"tree",
".",
"find",
"(",
"RDF_NS",
"+",
"'RDF'",
")",
"meta",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"desc",
"in",
"rdf_tree",
".",
"findall",
"(",
"RDF_NS",
"+",
"'Description'",
")",
":",
"for",
"el",
"in",
"desc",
".",
"getchildren",
"(",
")",
":",
"ns",
",",
"tag",
"=",
"self",
".",
"_parse_tag",
"(",
"el",
")",
"value",
"=",
"self",
".",
"_parse_value",
"(",
"el",
")",
"meta",
"[",
"ns",
"]",
"[",
"tag",
"]",
"=",
"value",
"return",
"dict",
"(",
"meta",
")"
] | Run parser and return a dictionary of all the parsed metadata. | [
"Run",
"parser",
"and",
"return",
"a",
"dictionary",
"of",
"all",
"the",
"parsed",
"metadata",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/biblio/xmp.py#L60-L70 |
6,415 | mcs07/ChemDataExtractor | chemdataextractor/biblio/xmp.py | XmpParser._parse_tag | def _parse_tag(self, el):
"""Extract the namespace and tag from an element."""
ns = None
tag = el.tag
if tag[0] == '{':
ns, tag = tag[1:].split('}', 1)
if self.ns_map and ns in self.ns_map:
ns = self.ns_map[ns]
return ns, tag | python | def _parse_tag(self, el):
"""Extract the namespace and tag from an element."""
ns = None
tag = el.tag
if tag[0] == '{':
ns, tag = tag[1:].split('}', 1)
if self.ns_map and ns in self.ns_map:
ns = self.ns_map[ns]
return ns, tag | [
"def",
"_parse_tag",
"(",
"self",
",",
"el",
")",
":",
"ns",
"=",
"None",
"tag",
"=",
"el",
".",
"tag",
"if",
"tag",
"[",
"0",
"]",
"==",
"'{'",
":",
"ns",
",",
"tag",
"=",
"tag",
"[",
"1",
":",
"]",
".",
"split",
"(",
"'}'",
",",
"1",
")",
"if",
"self",
".",
"ns_map",
"and",
"ns",
"in",
"self",
".",
"ns_map",
":",
"ns",
"=",
"self",
".",
"ns_map",
"[",
"ns",
"]",
"return",
"ns",
",",
"tag"
] | Extract the namespace and tag from an element. | [
"Extract",
"the",
"namespace",
"and",
"tag",
"from",
"an",
"element",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/biblio/xmp.py#L72-L80 |
6,416 | mcs07/ChemDataExtractor | chemdataextractor/biblio/xmp.py | XmpParser._parse_value | def _parse_value(self, el):
"""Extract the metadata value from an element."""
if el.find(RDF_NS + 'Bag') is not None:
value = []
for li in el.findall(RDF_NS + 'Bag/' + RDF_NS + 'li'):
value.append(li.text)
elif el.find(RDF_NS + 'Seq') is not None:
value = []
for li in el.findall(RDF_NS + 'Seq/' + RDF_NS + 'li'):
value.append(li.text)
elif el.find(RDF_NS + 'Alt') is not None:
value = {}
for li in el.findall(RDF_NS + 'Alt/' + RDF_NS + 'li'):
value[li.get(XML_NS + 'lang')] = li.text
else:
value = el.text
return value | python | def _parse_value(self, el):
"""Extract the metadata value from an element."""
if el.find(RDF_NS + 'Bag') is not None:
value = []
for li in el.findall(RDF_NS + 'Bag/' + RDF_NS + 'li'):
value.append(li.text)
elif el.find(RDF_NS + 'Seq') is not None:
value = []
for li in el.findall(RDF_NS + 'Seq/' + RDF_NS + 'li'):
value.append(li.text)
elif el.find(RDF_NS + 'Alt') is not None:
value = {}
for li in el.findall(RDF_NS + 'Alt/' + RDF_NS + 'li'):
value[li.get(XML_NS + 'lang')] = li.text
else:
value = el.text
return value | [
"def",
"_parse_value",
"(",
"self",
",",
"el",
")",
":",
"if",
"el",
".",
"find",
"(",
"RDF_NS",
"+",
"'Bag'",
")",
"is",
"not",
"None",
":",
"value",
"=",
"[",
"]",
"for",
"li",
"in",
"el",
".",
"findall",
"(",
"RDF_NS",
"+",
"'Bag/'",
"+",
"RDF_NS",
"+",
"'li'",
")",
":",
"value",
".",
"append",
"(",
"li",
".",
"text",
")",
"elif",
"el",
".",
"find",
"(",
"RDF_NS",
"+",
"'Seq'",
")",
"is",
"not",
"None",
":",
"value",
"=",
"[",
"]",
"for",
"li",
"in",
"el",
".",
"findall",
"(",
"RDF_NS",
"+",
"'Seq/'",
"+",
"RDF_NS",
"+",
"'li'",
")",
":",
"value",
".",
"append",
"(",
"li",
".",
"text",
")",
"elif",
"el",
".",
"find",
"(",
"RDF_NS",
"+",
"'Alt'",
")",
"is",
"not",
"None",
":",
"value",
"=",
"{",
"}",
"for",
"li",
"in",
"el",
".",
"findall",
"(",
"RDF_NS",
"+",
"'Alt/'",
"+",
"RDF_NS",
"+",
"'li'",
")",
":",
"value",
"[",
"li",
".",
"get",
"(",
"XML_NS",
"+",
"'lang'",
")",
"]",
"=",
"li",
".",
"text",
"else",
":",
"value",
"=",
"el",
".",
"text",
"return",
"value"
] | Extract the metadata value from an element. | [
"Extract",
"the",
"metadata",
"value",
"from",
"an",
"element",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/biblio/xmp.py#L82-L98 |
6,417 | mcs07/ChemDataExtractor | chemdataextractor/scrape/pub/rsc.py | parse_rsc_html | def parse_rsc_html(htmlstring):
"""Messy RSC HTML needs this special parser to fix problems before creating selector."""
converted = UnicodeDammit(htmlstring)
if not converted.unicode_markup:
raise UnicodeDecodeError('Failed to detect encoding, tried [%s]')
root = fromstring(htmlstring, parser=HTMLParser(recover=True, encoding=converted.original_encoding))
# Add p.otherpara tags around orphan text
newp = None
for child in root.get_element_by_id('wrapper'):
if newp is not None:
if child.tag in BLOCK_ELEMENTS or child.get('id', '').startswith('sect') or child.getnext() is None:
child.addprevious(newp)
newp = None
else:
newp.append(child)
if newp is None and child.tag in BLOCK_ELEMENTS and child.tail and child.tail.strip():
newp = Element('p', **{'class': 'otherpara'})
newp.text = child.tail
child.tail = ''
return root | python | def parse_rsc_html(htmlstring):
"""Messy RSC HTML needs this special parser to fix problems before creating selector."""
converted = UnicodeDammit(htmlstring)
if not converted.unicode_markup:
raise UnicodeDecodeError('Failed to detect encoding, tried [%s]')
root = fromstring(htmlstring, parser=HTMLParser(recover=True, encoding=converted.original_encoding))
# Add p.otherpara tags around orphan text
newp = None
for child in root.get_element_by_id('wrapper'):
if newp is not None:
if child.tag in BLOCK_ELEMENTS or child.get('id', '').startswith('sect') or child.getnext() is None:
child.addprevious(newp)
newp = None
else:
newp.append(child)
if newp is None and child.tag in BLOCK_ELEMENTS and child.tail and child.tail.strip():
newp = Element('p', **{'class': 'otherpara'})
newp.text = child.tail
child.tail = ''
return root | [
"def",
"parse_rsc_html",
"(",
"htmlstring",
")",
":",
"converted",
"=",
"UnicodeDammit",
"(",
"htmlstring",
")",
"if",
"not",
"converted",
".",
"unicode_markup",
":",
"raise",
"UnicodeDecodeError",
"(",
"'Failed to detect encoding, tried [%s]'",
")",
"root",
"=",
"fromstring",
"(",
"htmlstring",
",",
"parser",
"=",
"HTMLParser",
"(",
"recover",
"=",
"True",
",",
"encoding",
"=",
"converted",
".",
"original_encoding",
")",
")",
"# Add p.otherpara tags around orphan text",
"newp",
"=",
"None",
"for",
"child",
"in",
"root",
".",
"get_element_by_id",
"(",
"'wrapper'",
")",
":",
"if",
"newp",
"is",
"not",
"None",
":",
"if",
"child",
".",
"tag",
"in",
"BLOCK_ELEMENTS",
"or",
"child",
".",
"get",
"(",
"'id'",
",",
"''",
")",
".",
"startswith",
"(",
"'sect'",
")",
"or",
"child",
".",
"getnext",
"(",
")",
"is",
"None",
":",
"child",
".",
"addprevious",
"(",
"newp",
")",
"newp",
"=",
"None",
"else",
":",
"newp",
".",
"append",
"(",
"child",
")",
"if",
"newp",
"is",
"None",
"and",
"child",
".",
"tag",
"in",
"BLOCK_ELEMENTS",
"and",
"child",
".",
"tail",
"and",
"child",
".",
"tail",
".",
"strip",
"(",
")",
":",
"newp",
"=",
"Element",
"(",
"'p'",
",",
"*",
"*",
"{",
"'class'",
":",
"'otherpara'",
"}",
")",
"newp",
".",
"text",
"=",
"child",
".",
"tail",
"child",
".",
"tail",
"=",
"''",
"return",
"root"
] | Messy RSC HTML needs this special parser to fix problems before creating selector. | [
"Messy",
"RSC",
"HTML",
"needs",
"this",
"special",
"parser",
"to",
"fix",
"problems",
"before",
"creating",
"selector",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/pub/rsc.py#L242-L261 |
6,418 | mcs07/ChemDataExtractor | chemdataextractor/scrape/pub/rsc.py | replace_rsc_img_chars | def replace_rsc_img_chars(document):
"""Replace image characters with unicode equivalents."""
image_re = re.compile('http://www.rsc.org/images/entities/(?:h[23]+_)?(?:[ib]+_)?char_([0-9a-f]{4})(?:_([0-9a-f]{4}))?\.gif')
for img in document.xpath('.//img[starts-with(@src, "http://www.rsc.org/images/entities/")]'):
m = image_re.match(img.get('src'))
if m:
u1, u2 = m.group(1), m.group(2)
if not u2 and u1 in RSC_IMG_CHARS:
rep = RSC_IMG_CHARS[u1]
else:
rep = ('\\u%s' % u1).encode('ascii').decode('unicode-escape')
if u2:
rep += ('\\u%s' % u2).encode('ascii').decode('unicode-escape')
if img.tail is not None:
rep += img.tail # Make sure we don't remove any tail text
parent = img.getparent()
if parent is not None:
previous = img.getprevious()
if previous is not None:
previous.tail = (previous.tail or '') + rep
else:
parent.text = (parent.text or '') + rep
parent.remove(img)
return document | python | def replace_rsc_img_chars(document):
"""Replace image characters with unicode equivalents."""
image_re = re.compile('http://www.rsc.org/images/entities/(?:h[23]+_)?(?:[ib]+_)?char_([0-9a-f]{4})(?:_([0-9a-f]{4}))?\.gif')
for img in document.xpath('.//img[starts-with(@src, "http://www.rsc.org/images/entities/")]'):
m = image_re.match(img.get('src'))
if m:
u1, u2 = m.group(1), m.group(2)
if not u2 and u1 in RSC_IMG_CHARS:
rep = RSC_IMG_CHARS[u1]
else:
rep = ('\\u%s' % u1).encode('ascii').decode('unicode-escape')
if u2:
rep += ('\\u%s' % u2).encode('ascii').decode('unicode-escape')
if img.tail is not None:
rep += img.tail # Make sure we don't remove any tail text
parent = img.getparent()
if parent is not None:
previous = img.getprevious()
if previous is not None:
previous.tail = (previous.tail or '') + rep
else:
parent.text = (parent.text or '') + rep
parent.remove(img)
return document | [
"def",
"replace_rsc_img_chars",
"(",
"document",
")",
":",
"image_re",
"=",
"re",
".",
"compile",
"(",
"'http://www.rsc.org/images/entities/(?:h[23]+_)?(?:[ib]+_)?char_([0-9a-f]{4})(?:_([0-9a-f]{4}))?\\.gif'",
")",
"for",
"img",
"in",
"document",
".",
"xpath",
"(",
"'.//img[starts-with(@src, \"http://www.rsc.org/images/entities/\")]'",
")",
":",
"m",
"=",
"image_re",
".",
"match",
"(",
"img",
".",
"get",
"(",
"'src'",
")",
")",
"if",
"m",
":",
"u1",
",",
"u2",
"=",
"m",
".",
"group",
"(",
"1",
")",
",",
"m",
".",
"group",
"(",
"2",
")",
"if",
"not",
"u2",
"and",
"u1",
"in",
"RSC_IMG_CHARS",
":",
"rep",
"=",
"RSC_IMG_CHARS",
"[",
"u1",
"]",
"else",
":",
"rep",
"=",
"(",
"'\\\\u%s'",
"%",
"u1",
")",
".",
"encode",
"(",
"'ascii'",
")",
".",
"decode",
"(",
"'unicode-escape'",
")",
"if",
"u2",
":",
"rep",
"+=",
"(",
"'\\\\u%s'",
"%",
"u2",
")",
".",
"encode",
"(",
"'ascii'",
")",
".",
"decode",
"(",
"'unicode-escape'",
")",
"if",
"img",
".",
"tail",
"is",
"not",
"None",
":",
"rep",
"+=",
"img",
".",
"tail",
"# Make sure we don't remove any tail text",
"parent",
"=",
"img",
".",
"getparent",
"(",
")",
"if",
"parent",
"is",
"not",
"None",
":",
"previous",
"=",
"img",
".",
"getprevious",
"(",
")",
"if",
"previous",
"is",
"not",
"None",
":",
"previous",
".",
"tail",
"=",
"(",
"previous",
".",
"tail",
"or",
"''",
")",
"+",
"rep",
"else",
":",
"parent",
".",
"text",
"=",
"(",
"parent",
".",
"text",
"or",
"''",
")",
"+",
"rep",
"parent",
".",
"remove",
"(",
"img",
")",
"return",
"document"
] | Replace image characters with unicode equivalents. | [
"Replace",
"image",
"characters",
"with",
"unicode",
"equivalents",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/pub/rsc.py#L264-L287 |
6,419 | mcs07/ChemDataExtractor | chemdataextractor/scrape/pub/rsc.py | space_references | def space_references(document):
"""Ensure a space around reference links, so there's a gap when they are removed."""
for ref in document.xpath('.//a/sup/span[@class="sup_ref"]'):
a = ref.getparent().getparent()
if a is not None:
atail = a.tail or ''
if not atail.startswith(')') and not atail.startswith(',') and not atail.startswith(' '):
a.tail = ' ' + atail
return document | python | def space_references(document):
"""Ensure a space around reference links, so there's a gap when they are removed."""
for ref in document.xpath('.//a/sup/span[@class="sup_ref"]'):
a = ref.getparent().getparent()
if a is not None:
atail = a.tail or ''
if not atail.startswith(')') and not atail.startswith(',') and not atail.startswith(' '):
a.tail = ' ' + atail
return document | [
"def",
"space_references",
"(",
"document",
")",
":",
"for",
"ref",
"in",
"document",
".",
"xpath",
"(",
"'.//a/sup/span[@class=\"sup_ref\"]'",
")",
":",
"a",
"=",
"ref",
".",
"getparent",
"(",
")",
".",
"getparent",
"(",
")",
"if",
"a",
"is",
"not",
"None",
":",
"atail",
"=",
"a",
".",
"tail",
"or",
"''",
"if",
"not",
"atail",
".",
"startswith",
"(",
"')'",
")",
"and",
"not",
"atail",
".",
"startswith",
"(",
"','",
")",
"and",
"not",
"atail",
".",
"startswith",
"(",
"' '",
")",
":",
"a",
".",
"tail",
"=",
"' '",
"+",
"atail",
"return",
"document"
] | Ensure a space around reference links, so there's a gap when they are removed. | [
"Ensure",
"a",
"space",
"around",
"reference",
"links",
"so",
"there",
"s",
"a",
"gap",
"when",
"they",
"are",
"removed",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/pub/rsc.py#L290-L298 |
6,420 | mcs07/ChemDataExtractor | chemdataextractor/cli/cluster.py | load | def load(ctx, input, output):
"""Read clusters from file and save to model file."""
log.debug('chemdataextractor.cluster.load')
import pickle
click.echo('Reading %s' % input.name)
clusters = {}
for line in input.readlines():
cluster, word, freq = line.split()
clusters[word] = cluster
pickle.dump(clusters, output, protocol=pickle.HIGHEST_PROTOCOL) | python | def load(ctx, input, output):
"""Read clusters from file and save to model file."""
log.debug('chemdataextractor.cluster.load')
import pickle
click.echo('Reading %s' % input.name)
clusters = {}
for line in input.readlines():
cluster, word, freq = line.split()
clusters[word] = cluster
pickle.dump(clusters, output, protocol=pickle.HIGHEST_PROTOCOL) | [
"def",
"load",
"(",
"ctx",
",",
"input",
",",
"output",
")",
":",
"log",
".",
"debug",
"(",
"'chemdataextractor.cluster.load'",
")",
"import",
"pickle",
"click",
".",
"echo",
"(",
"'Reading %s'",
"%",
"input",
".",
"name",
")",
"clusters",
"=",
"{",
"}",
"for",
"line",
"in",
"input",
".",
"readlines",
"(",
")",
":",
"cluster",
",",
"word",
",",
"freq",
"=",
"line",
".",
"split",
"(",
")",
"clusters",
"[",
"word",
"]",
"=",
"cluster",
"pickle",
".",
"dump",
"(",
"clusters",
",",
"output",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")"
] | Read clusters from file and save to model file. | [
"Read",
"clusters",
"from",
"file",
"and",
"save",
"to",
"model",
"file",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/cluster.py#L32-L41 |
6,421 | mcs07/ChemDataExtractor | chemdataextractor/scrape/pub/nlm.py | space_labels | def space_labels(document):
"""Ensure space around bold compound labels."""
for label in document.xpath('.//bold'):
# TODO: Make this more permissive to match chemical_label in parser
if not label.text or not re.match('^\(L?\d\d?[a-z]?\):?$', label.text, re.I):
continue
parent = label.getparent()
previous = label.getprevious()
if previous is None:
text = parent.text or ''
if not text.endswith(' '):
parent.text = text + ' '
else:
text = previous.tail or ''
if not text.endswith(' '):
previous.tail = text + ' '
text = label.tail or ''
if not text.endswith(' '):
label.tail = text + ' '
return document | python | def space_labels(document):
"""Ensure space around bold compound labels."""
for label in document.xpath('.//bold'):
# TODO: Make this more permissive to match chemical_label in parser
if not label.text or not re.match('^\(L?\d\d?[a-z]?\):?$', label.text, re.I):
continue
parent = label.getparent()
previous = label.getprevious()
if previous is None:
text = parent.text or ''
if not text.endswith(' '):
parent.text = text + ' '
else:
text = previous.tail or ''
if not text.endswith(' '):
previous.tail = text + ' '
text = label.tail or ''
if not text.endswith(' '):
label.tail = text + ' '
return document | [
"def",
"space_labels",
"(",
"document",
")",
":",
"for",
"label",
"in",
"document",
".",
"xpath",
"(",
"'.//bold'",
")",
":",
"# TODO: Make this more permissive to match chemical_label in parser",
"if",
"not",
"label",
".",
"text",
"or",
"not",
"re",
".",
"match",
"(",
"'^\\(L?\\d\\d?[a-z]?\\):?$'",
",",
"label",
".",
"text",
",",
"re",
".",
"I",
")",
":",
"continue",
"parent",
"=",
"label",
".",
"getparent",
"(",
")",
"previous",
"=",
"label",
".",
"getprevious",
"(",
")",
"if",
"previous",
"is",
"None",
":",
"text",
"=",
"parent",
".",
"text",
"or",
"''",
"if",
"not",
"text",
".",
"endswith",
"(",
"' '",
")",
":",
"parent",
".",
"text",
"=",
"text",
"+",
"' '",
"else",
":",
"text",
"=",
"previous",
".",
"tail",
"or",
"''",
"if",
"not",
"text",
".",
"endswith",
"(",
"' '",
")",
":",
"previous",
".",
"tail",
"=",
"text",
"+",
"' '",
"text",
"=",
"label",
".",
"tail",
"or",
"''",
"if",
"not",
"text",
".",
"endswith",
"(",
"' '",
")",
":",
"label",
".",
"tail",
"=",
"text",
"+",
"' '",
"return",
"document"
] | Ensure space around bold compound labels. | [
"Ensure",
"space",
"around",
"bold",
"compound",
"labels",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/pub/nlm.py#L34-L53 |
6,422 | mcs07/ChemDataExtractor | chemdataextractor/scrape/pub/nlm.py | tidy_nlm_references | def tidy_nlm_references(document):
"""Remove punctuation around references like brackets, commas, hyphens."""
def strip_preceding(text):
stext = text.rstrip()
if stext.endswith('[') or stext.endswith('('):
#log.debug('%s -> %s' % (text, stext[:-1]))
return stext[:-1]
return text
def strip_between(text):
stext = text.strip()
if stext in {',', '-', '\u2013', '\u2212'}:
#log.debug('%s -> %s' % (text, ''))
return ''
return text
def strip_following(text):
stext = text.lstrip()
if stext.startswith(']') or stext.startswith(')'):
#log.debug('%s -> %s' % (text, stext[1:]))
return stext[1:]
return text
for ref in document.xpath('.//xref[@ref-type="bibr"]'):
parent = ref.getparent()
previous = ref.getprevious()
next = ref.getnext()
if previous is None:
parent.text = strip_preceding(parent.text or '')
else:
previous.tail = strip_preceding(previous.tail or '')
if next is not None and next.tag == 'xref' and next.get('ref-type') == 'bibr':
ref.tail = strip_between(ref.tail or '')
ref.tail = strip_following(ref.tail or '')
return document | python | def tidy_nlm_references(document):
"""Remove punctuation around references like brackets, commas, hyphens."""
def strip_preceding(text):
stext = text.rstrip()
if stext.endswith('[') or stext.endswith('('):
#log.debug('%s -> %s' % (text, stext[:-1]))
return stext[:-1]
return text
def strip_between(text):
stext = text.strip()
if stext in {',', '-', '\u2013', '\u2212'}:
#log.debug('%s -> %s' % (text, ''))
return ''
return text
def strip_following(text):
stext = text.lstrip()
if stext.startswith(']') or stext.startswith(')'):
#log.debug('%s -> %s' % (text, stext[1:]))
return stext[1:]
return text
for ref in document.xpath('.//xref[@ref-type="bibr"]'):
parent = ref.getparent()
previous = ref.getprevious()
next = ref.getnext()
if previous is None:
parent.text = strip_preceding(parent.text or '')
else:
previous.tail = strip_preceding(previous.tail or '')
if next is not None and next.tag == 'xref' and next.get('ref-type') == 'bibr':
ref.tail = strip_between(ref.tail or '')
ref.tail = strip_following(ref.tail or '')
return document | [
"def",
"tidy_nlm_references",
"(",
"document",
")",
":",
"def",
"strip_preceding",
"(",
"text",
")",
":",
"stext",
"=",
"text",
".",
"rstrip",
"(",
")",
"if",
"stext",
".",
"endswith",
"(",
"'['",
")",
"or",
"stext",
".",
"endswith",
"(",
"'('",
")",
":",
"#log.debug('%s -> %s' % (text, stext[:-1]))",
"return",
"stext",
"[",
":",
"-",
"1",
"]",
"return",
"text",
"def",
"strip_between",
"(",
"text",
")",
":",
"stext",
"=",
"text",
".",
"strip",
"(",
")",
"if",
"stext",
"in",
"{",
"','",
",",
"'-'",
",",
"'\\u2013'",
",",
"'\\u2212'",
"}",
":",
"#log.debug('%s -> %s' % (text, ''))",
"return",
"''",
"return",
"text",
"def",
"strip_following",
"(",
"text",
")",
":",
"stext",
"=",
"text",
".",
"lstrip",
"(",
")",
"if",
"stext",
".",
"startswith",
"(",
"']'",
")",
"or",
"stext",
".",
"startswith",
"(",
"')'",
")",
":",
"#log.debug('%s -> %s' % (text, stext[1:]))",
"return",
"stext",
"[",
"1",
":",
"]",
"return",
"text",
"for",
"ref",
"in",
"document",
".",
"xpath",
"(",
"'.//xref[@ref-type=\"bibr\"]'",
")",
":",
"parent",
"=",
"ref",
".",
"getparent",
"(",
")",
"previous",
"=",
"ref",
".",
"getprevious",
"(",
")",
"next",
"=",
"ref",
".",
"getnext",
"(",
")",
"if",
"previous",
"is",
"None",
":",
"parent",
".",
"text",
"=",
"strip_preceding",
"(",
"parent",
".",
"text",
"or",
"''",
")",
"else",
":",
"previous",
".",
"tail",
"=",
"strip_preceding",
"(",
"previous",
".",
"tail",
"or",
"''",
")",
"if",
"next",
"is",
"not",
"None",
"and",
"next",
".",
"tag",
"==",
"'xref'",
"and",
"next",
".",
"get",
"(",
"'ref-type'",
")",
"==",
"'bibr'",
":",
"ref",
".",
"tail",
"=",
"strip_between",
"(",
"ref",
".",
"tail",
"or",
"''",
")",
"ref",
".",
"tail",
"=",
"strip_following",
"(",
"ref",
".",
"tail",
"or",
"''",
")",
"return",
"document"
] | Remove punctuation around references like brackets, commas, hyphens. | [
"Remove",
"punctuation",
"around",
"references",
"like",
"brackets",
"commas",
"hyphens",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/pub/nlm.py#L56-L91 |
6,423 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tokenize.py | regex_span_tokenize | def regex_span_tokenize(s, regex):
"""Return spans that identify tokens in s split using regex."""
left = 0
for m in re.finditer(regex, s, re.U):
right, next = m.span()
if right != 0:
yield left, right
left = next
yield left, len(s) | python | def regex_span_tokenize(s, regex):
"""Return spans that identify tokens in s split using regex."""
left = 0
for m in re.finditer(regex, s, re.U):
right, next = m.span()
if right != 0:
yield left, right
left = next
yield left, len(s) | [
"def",
"regex_span_tokenize",
"(",
"s",
",",
"regex",
")",
":",
"left",
"=",
"0",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"regex",
",",
"s",
",",
"re",
".",
"U",
")",
":",
"right",
",",
"next",
"=",
"m",
".",
"span",
"(",
")",
"if",
"right",
"!=",
"0",
":",
"yield",
"left",
",",
"right",
"left",
"=",
"next",
"yield",
"left",
",",
"len",
"(",
"s",
")"
] | Return spans that identify tokens in s split using regex. | [
"Return",
"spans",
"that",
"identify",
"tokens",
"in",
"s",
"split",
"using",
"regex",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L70-L78 |
6,424 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tokenize.py | BaseTokenizer.tokenize | def tokenize(self, s):
"""Return a list of token strings from the given sentence.
:param string s: The sentence string to tokenize.
:rtype: iter(str)
"""
return [s[start:end] for start, end in self.span_tokenize(s)] | python | def tokenize(self, s):
"""Return a list of token strings from the given sentence.
:param string s: The sentence string to tokenize.
:rtype: iter(str)
"""
return [s[start:end] for start, end in self.span_tokenize(s)] | [
"def",
"tokenize",
"(",
"self",
",",
"s",
")",
":",
"return",
"[",
"s",
"[",
"start",
":",
"end",
"]",
"for",
"start",
",",
"end",
"in",
"self",
".",
"span_tokenize",
"(",
"s",
")",
"]"
] | Return a list of token strings from the given sentence.
:param string s: The sentence string to tokenize.
:rtype: iter(str) | [
"Return",
"a",
"list",
"of",
"token",
"strings",
"from",
"the",
"given",
"sentence",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L35-L41 |
6,425 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tokenize.py | SentenceTokenizer.span_tokenize | def span_tokenize(self, s):
"""Return a list of integer offsets that identify sentences in the given text.
:param string s: The text to tokenize into sentences.
:rtype: iter(tuple(int, int))
"""
if self._tokenizer is None:
self._tokenizer = load_model(self.model)
# for debug in tokenizer.debug_decisions(s):
# log.debug(format_debug_decision(debug))
return self._tokenizer.span_tokenize(s) | python | def span_tokenize(self, s):
"""Return a list of integer offsets that identify sentences in the given text.
:param string s: The text to tokenize into sentences.
:rtype: iter(tuple(int, int))
"""
if self._tokenizer is None:
self._tokenizer = load_model(self.model)
# for debug in tokenizer.debug_decisions(s):
# log.debug(format_debug_decision(debug))
return self._tokenizer.span_tokenize(s) | [
"def",
"span_tokenize",
"(",
"self",
",",
"s",
")",
":",
"if",
"self",
".",
"_tokenizer",
"is",
"None",
":",
"self",
".",
"_tokenizer",
"=",
"load_model",
"(",
"self",
".",
"model",
")",
"# for debug in tokenizer.debug_decisions(s):",
"# log.debug(format_debug_decision(debug))",
"return",
"self",
".",
"_tokenizer",
".",
"span_tokenize",
"(",
"s",
")"
] | Return a list of integer offsets that identify sentences in the given text.
:param string s: The text to tokenize into sentences.
:rtype: iter(tuple(int, int)) | [
"Return",
"a",
"list",
"of",
"integer",
"offsets",
"that",
"identify",
"sentences",
"in",
"the",
"given",
"text",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L91-L101 |
6,426 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tokenize.py | WordTokenizer._split_span | def _split_span(self, span, index, length=0):
"""Split a span into two or three separate spans at certain indices."""
offset = span[1] + index if index < 0 else span[0] + index
# log.debug([(span[0], offset), (offset, offset + length), (offset + length, span[1])])
return [(span[0], offset), (offset, offset + length), (offset + length, span[1])] | python | def _split_span(self, span, index, length=0):
"""Split a span into two or three separate spans at certain indices."""
offset = span[1] + index if index < 0 else span[0] + index
# log.debug([(span[0], offset), (offset, offset + length), (offset + length, span[1])])
return [(span[0], offset), (offset, offset + length), (offset + length, span[1])] | [
"def",
"_split_span",
"(",
"self",
",",
"span",
",",
"index",
",",
"length",
"=",
"0",
")",
":",
"offset",
"=",
"span",
"[",
"1",
"]",
"+",
"index",
"if",
"index",
"<",
"0",
"else",
"span",
"[",
"0",
"]",
"+",
"index",
"# log.debug([(span[0], offset), (offset, offset + length), (offset + length, span[1])])",
"return",
"[",
"(",
"span",
"[",
"0",
"]",
",",
"offset",
")",
",",
"(",
"offset",
",",
"offset",
"+",
"length",
")",
",",
"(",
"offset",
"+",
"length",
",",
"span",
"[",
"1",
"]",
")",
"]"
] | Split a span into two or three separate spans at certain indices. | [
"Split",
"a",
"span",
"into",
"two",
"or",
"three",
"separate",
"spans",
"at",
"certain",
"indices",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L247-L251 |
6,427 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tokenize.py | ChemWordTokenizer._closing_bracket_index | def _closing_bracket_index(self, text, bpair=('(', ')')):
"""Return the index of the closing bracket that matches the opening bracket at the start of the text."""
level = 1
for i, char in enumerate(text[1:]):
if char == bpair[0]:
level += 1
elif char == bpair[1]:
level -= 1
if level == 0:
return i + 1 | python | def _closing_bracket_index(self, text, bpair=('(', ')')):
"""Return the index of the closing bracket that matches the opening bracket at the start of the text."""
level = 1
for i, char in enumerate(text[1:]):
if char == bpair[0]:
level += 1
elif char == bpair[1]:
level -= 1
if level == 0:
return i + 1 | [
"def",
"_closing_bracket_index",
"(",
"self",
",",
"text",
",",
"bpair",
"=",
"(",
"'('",
",",
"')'",
")",
")",
":",
"level",
"=",
"1",
"for",
"i",
",",
"char",
"in",
"enumerate",
"(",
"text",
"[",
"1",
":",
"]",
")",
":",
"if",
"char",
"==",
"bpair",
"[",
"0",
"]",
":",
"level",
"+=",
"1",
"elif",
"char",
"==",
"bpair",
"[",
"1",
"]",
":",
"level",
"-=",
"1",
"if",
"level",
"==",
"0",
":",
"return",
"i",
"+",
"1"
] | Return the index of the closing bracket that matches the opening bracket at the start of the text. | [
"Return",
"the",
"index",
"of",
"the",
"closing",
"bracket",
"that",
"matches",
"the",
"opening",
"bracket",
"at",
"the",
"start",
"of",
"the",
"text",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L535-L544 |
6,428 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tokenize.py | ChemWordTokenizer._opening_bracket_index | def _opening_bracket_index(self, text, bpair=('(', ')')):
"""Return the index of the opening bracket that matches the closing bracket at the end of the text."""
level = 1
for i, char in enumerate(reversed(text[:-1])):
if char == bpair[1]:
level += 1
elif char == bpair[0]:
level -= 1
if level == 0:
return len(text) - i - 2 | python | def _opening_bracket_index(self, text, bpair=('(', ')')):
"""Return the index of the opening bracket that matches the closing bracket at the end of the text."""
level = 1
for i, char in enumerate(reversed(text[:-1])):
if char == bpair[1]:
level += 1
elif char == bpair[0]:
level -= 1
if level == 0:
return len(text) - i - 2 | [
"def",
"_opening_bracket_index",
"(",
"self",
",",
"text",
",",
"bpair",
"=",
"(",
"'('",
",",
"')'",
")",
")",
":",
"level",
"=",
"1",
"for",
"i",
",",
"char",
"in",
"enumerate",
"(",
"reversed",
"(",
"text",
"[",
":",
"-",
"1",
"]",
")",
")",
":",
"if",
"char",
"==",
"bpair",
"[",
"1",
"]",
":",
"level",
"+=",
"1",
"elif",
"char",
"==",
"bpair",
"[",
"0",
"]",
":",
"level",
"-=",
"1",
"if",
"level",
"==",
"0",
":",
"return",
"len",
"(",
"text",
")",
"-",
"i",
"-",
"2"
] | Return the index of the opening bracket that matches the closing bracket at the end of the text. | [
"Return",
"the",
"index",
"of",
"the",
"opening",
"bracket",
"that",
"matches",
"the",
"closing",
"bracket",
"at",
"the",
"end",
"of",
"the",
"text",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L546-L555 |
6,429 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tokenize.py | ChemWordTokenizer._is_saccharide_arrow | def _is_saccharide_arrow(self, before, after):
"""Return True if the arrow is in a chemical name."""
if (before and after and before[-1].isdigit() and after[0].isdigit() and
before.rstrip('0123456789').endswith('(') and after.lstrip('0123456789').startswith(')-')):
return True
else:
return False | python | def _is_saccharide_arrow(self, before, after):
"""Return True if the arrow is in a chemical name."""
if (before and after and before[-1].isdigit() and after[0].isdigit() and
before.rstrip('0123456789').endswith('(') and after.lstrip('0123456789').startswith(')-')):
return True
else:
return False | [
"def",
"_is_saccharide_arrow",
"(",
"self",
",",
"before",
",",
"after",
")",
":",
"if",
"(",
"before",
"and",
"after",
"and",
"before",
"[",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
"and",
"after",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
"and",
"before",
".",
"rstrip",
"(",
"'0123456789'",
")",
".",
"endswith",
"(",
"'('",
")",
"and",
"after",
".",
"lstrip",
"(",
"'0123456789'",
")",
".",
"startswith",
"(",
"')-'",
")",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Return True if the arrow is in a chemical name. | [
"Return",
"True",
"if",
"the",
"arrow",
"is",
"in",
"a",
"chemical",
"name",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L565-L571 |
6,430 | mcs07/ChemDataExtractor | chemdataextractor/cli/evaluate.py | get_names | def get_names(cs):
"""Return list of every name."""
records = []
for c in cs:
records.extend(c.get('names', []))
return records | python | def get_names(cs):
"""Return list of every name."""
records = []
for c in cs:
records.extend(c.get('names', []))
return records | [
"def",
"get_names",
"(",
"cs",
")",
":",
"records",
"=",
"[",
"]",
"for",
"c",
"in",
"cs",
":",
"records",
".",
"extend",
"(",
"c",
".",
"get",
"(",
"'names'",
",",
"[",
"]",
")",
")",
"return",
"records"
] | Return list of every name. | [
"Return",
"list",
"of",
"every",
"name",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/evaluate.py#L75-L80 |
6,431 | mcs07/ChemDataExtractor | chemdataextractor/cli/evaluate.py | get_labels | def get_labels(cs):
"""Return list of every label."""
records = []
for c in cs:
records.extend(c.get('labels', []))
return records | python | def get_labels(cs):
"""Return list of every label."""
records = []
for c in cs:
records.extend(c.get('labels', []))
return records | [
"def",
"get_labels",
"(",
"cs",
")",
":",
"records",
"=",
"[",
"]",
"for",
"c",
"in",
"cs",
":",
"records",
".",
"extend",
"(",
"c",
".",
"get",
"(",
"'labels'",
",",
"[",
"]",
")",
")",
"return",
"records"
] | Return list of every label. | [
"Return",
"list",
"of",
"every",
"label",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/evaluate.py#L83-L88 |
6,432 | mcs07/ChemDataExtractor | chemdataextractor/cli/evaluate.py | get_ids | def get_ids(cs):
"""Return chemical identifier records."""
records = []
for c in cs:
records.append({k: c[k] for k in c if k in {'names', 'labels'}})
return records | python | def get_ids(cs):
"""Return chemical identifier records."""
records = []
for c in cs:
records.append({k: c[k] for k in c if k in {'names', 'labels'}})
return records | [
"def",
"get_ids",
"(",
"cs",
")",
":",
"records",
"=",
"[",
"]",
"for",
"c",
"in",
"cs",
":",
"records",
".",
"append",
"(",
"{",
"k",
":",
"c",
"[",
"k",
"]",
"for",
"k",
"in",
"c",
"if",
"k",
"in",
"{",
"'names'",
",",
"'labels'",
"}",
"}",
")",
"return",
"records"
] | Return chemical identifier records. | [
"Return",
"chemical",
"identifier",
"records",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/evaluate.py#L91-L96 |
6,433 | mcs07/ChemDataExtractor | chemdataextractor/utils.py | memoized_property | def memoized_property(fget):
"""Decorator to create memoized properties."""
attr_name = '_{}'.format(fget.__name__)
@functools.wraps(fget)
def fget_memoized(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fget(self))
return getattr(self, attr_name)
return property(fget_memoized) | python | def memoized_property(fget):
"""Decorator to create memoized properties."""
attr_name = '_{}'.format(fget.__name__)
@functools.wraps(fget)
def fget_memoized(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fget(self))
return getattr(self, attr_name)
return property(fget_memoized) | [
"def",
"memoized_property",
"(",
"fget",
")",
":",
"attr_name",
"=",
"'_{}'",
".",
"format",
"(",
"fget",
".",
"__name__",
")",
"@",
"functools",
".",
"wraps",
"(",
"fget",
")",
"def",
"fget_memoized",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"attr_name",
")",
":",
"setattr",
"(",
"self",
",",
"attr_name",
",",
"fget",
"(",
"self",
")",
")",
"return",
"getattr",
"(",
"self",
",",
"attr_name",
")",
"return",
"property",
"(",
"fget_memoized",
")"
] | Decorator to create memoized properties. | [
"Decorator",
"to",
"create",
"memoized",
"properties",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/utils.py#L25-L34 |
6,434 | mcs07/ChemDataExtractor | chemdataextractor/utils.py | memoize | def memoize(obj):
"""Decorator to create memoized functions, methods or classes."""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
if args not in cache:
cache[args] = obj(*args, **kwargs)
return cache[args]
return memoizer | python | def memoize(obj):
"""Decorator to create memoized functions, methods or classes."""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
if args not in cache:
cache[args] = obj(*args, **kwargs)
return cache[args]
return memoizer | [
"def",
"memoize",
"(",
"obj",
")",
":",
"cache",
"=",
"obj",
".",
"cache",
"=",
"{",
"}",
"@",
"functools",
".",
"wraps",
"(",
"obj",
")",
"def",
"memoizer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
"not",
"in",
"cache",
":",
"cache",
"[",
"args",
"]",
"=",
"obj",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"cache",
"[",
"args",
"]",
"return",
"memoizer"
] | Decorator to create memoized functions, methods or classes. | [
"Decorator",
"to",
"create",
"memoized",
"functions",
"methods",
"or",
"classes",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/utils.py#L37-L46 |
6,435 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | BaseTagger.evaluate | def evaluate(self, gold):
"""Evaluate the accuracy of this tagger using a gold standard corpus.
:param list(list(tuple(str, str))) gold: The list of tagged sentences to score the tagger on.
:returns: Tagger accuracy value.
:rtype: float
"""
tagged_sents = self.tag_sents([w for (w, t) in sent] for sent in gold)
gold_tokens = sum(gold, [])
test_tokens = sum(tagged_sents, [])
accuracy = float(sum(x == y for x, y in six.moves.zip(gold_tokens, test_tokens))) / len(test_tokens)
return accuracy | python | def evaluate(self, gold):
"""Evaluate the accuracy of this tagger using a gold standard corpus.
:param list(list(tuple(str, str))) gold: The list of tagged sentences to score the tagger on.
:returns: Tagger accuracy value.
:rtype: float
"""
tagged_sents = self.tag_sents([w for (w, t) in sent] for sent in gold)
gold_tokens = sum(gold, [])
test_tokens = sum(tagged_sents, [])
accuracy = float(sum(x == y for x, y in six.moves.zip(gold_tokens, test_tokens))) / len(test_tokens)
return accuracy | [
"def",
"evaluate",
"(",
"self",
",",
"gold",
")",
":",
"tagged_sents",
"=",
"self",
".",
"tag_sents",
"(",
"[",
"w",
"for",
"(",
"w",
",",
"t",
")",
"in",
"sent",
"]",
"for",
"sent",
"in",
"gold",
")",
"gold_tokens",
"=",
"sum",
"(",
"gold",
",",
"[",
"]",
")",
"test_tokens",
"=",
"sum",
"(",
"tagged_sents",
",",
"[",
"]",
")",
"accuracy",
"=",
"float",
"(",
"sum",
"(",
"x",
"==",
"y",
"for",
"x",
",",
"y",
"in",
"six",
".",
"moves",
".",
"zip",
"(",
"gold_tokens",
",",
"test_tokens",
")",
")",
")",
"/",
"len",
"(",
"test_tokens",
")",
"return",
"accuracy"
] | Evaluate the accuracy of this tagger using a gold standard corpus.
:param list(list(tuple(str, str))) gold: The list of tagged sentences to score the tagger on.
:returns: Tagger accuracy value.
:rtype: float | [
"Evaluate",
"the",
"accuracy",
"of",
"this",
"tagger",
"using",
"a",
"gold",
"standard",
"corpus",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L51-L62 |
6,436 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | AveragedPerceptron.predict | def predict(self, features):
"""Dot-product the features and current weights and return the best label."""
scores = defaultdict(float)
for feat in features:
if feat not in self.weights:
continue
weights = self.weights[feat]
for label, weight in weights.items():
scores[label] += weight
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda label: (scores[label], label)) | python | def predict(self, features):
"""Dot-product the features and current weights and return the best label."""
scores = defaultdict(float)
for feat in features:
if feat not in self.weights:
continue
weights = self.weights[feat]
for label, weight in weights.items():
scores[label] += weight
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda label: (scores[label], label)) | [
"def",
"predict",
"(",
"self",
",",
"features",
")",
":",
"scores",
"=",
"defaultdict",
"(",
"float",
")",
"for",
"feat",
"in",
"features",
":",
"if",
"feat",
"not",
"in",
"self",
".",
"weights",
":",
"continue",
"weights",
"=",
"self",
".",
"weights",
"[",
"feat",
"]",
"for",
"label",
",",
"weight",
"in",
"weights",
".",
"items",
"(",
")",
":",
"scores",
"[",
"label",
"]",
"+=",
"weight",
"# Do a secondary alphabetic sort, for stability",
"return",
"max",
"(",
"self",
".",
"classes",
",",
"key",
"=",
"lambda",
"label",
":",
"(",
"scores",
"[",
"label",
"]",
",",
"label",
")",
")"
] | Dot-product the features and current weights and return the best label. | [
"Dot",
"-",
"product",
"the",
"features",
"and",
"current",
"weights",
"and",
"return",
"the",
"best",
"label",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L141-L151 |
6,437 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | AveragedPerceptron.update | def update(self, truth, guess, features):
"""Update the feature weights."""
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
return None | python | def update(self, truth, guess, features):
"""Update the feature weights."""
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
return None | [
"def",
"update",
"(",
"self",
",",
"truth",
",",
"guess",
",",
"features",
")",
":",
"def",
"upd_feat",
"(",
"c",
",",
"f",
",",
"w",
",",
"v",
")",
":",
"param",
"=",
"(",
"f",
",",
"c",
")",
"self",
".",
"_totals",
"[",
"param",
"]",
"+=",
"(",
"self",
".",
"i",
"-",
"self",
".",
"_tstamps",
"[",
"param",
"]",
")",
"*",
"w",
"self",
".",
"_tstamps",
"[",
"param",
"]",
"=",
"self",
".",
"i",
"self",
".",
"weights",
"[",
"f",
"]",
"[",
"c",
"]",
"=",
"w",
"+",
"v",
"self",
".",
"i",
"+=",
"1",
"if",
"truth",
"==",
"guess",
":",
"return",
"None",
"for",
"f",
"in",
"features",
":",
"weights",
"=",
"self",
".",
"weights",
".",
"setdefault",
"(",
"f",
",",
"{",
"}",
")",
"upd_feat",
"(",
"truth",
",",
"f",
",",
"weights",
".",
"get",
"(",
"truth",
",",
"0.0",
")",
",",
"1.0",
")",
"upd_feat",
"(",
"guess",
",",
"f",
",",
"weights",
".",
"get",
"(",
"guess",
",",
"0.0",
")",
",",
"-",
"1.0",
")",
"return",
"None"
] | Update the feature weights. | [
"Update",
"the",
"feature",
"weights",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L153-L168 |
6,438 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | AveragedPerceptron.average_weights | def average_weights(self):
"""Average weights from all iterations."""
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / float(self.i), 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
return None | python | def average_weights(self):
"""Average weights from all iterations."""
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / float(self.i), 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
return None | [
"def",
"average_weights",
"(",
"self",
")",
":",
"for",
"feat",
",",
"weights",
"in",
"self",
".",
"weights",
".",
"items",
"(",
")",
":",
"new_feat_weights",
"=",
"{",
"}",
"for",
"clas",
",",
"weight",
"in",
"weights",
".",
"items",
"(",
")",
":",
"param",
"=",
"(",
"feat",
",",
"clas",
")",
"total",
"=",
"self",
".",
"_totals",
"[",
"param",
"]",
"total",
"+=",
"(",
"self",
".",
"i",
"-",
"self",
".",
"_tstamps",
"[",
"param",
"]",
")",
"*",
"weight",
"averaged",
"=",
"round",
"(",
"total",
"/",
"float",
"(",
"self",
".",
"i",
")",
",",
"3",
")",
"if",
"averaged",
":",
"new_feat_weights",
"[",
"clas",
"]",
"=",
"averaged",
"self",
".",
"weights",
"[",
"feat",
"]",
"=",
"new_feat_weights",
"return",
"None"
] | Average weights from all iterations. | [
"Average",
"weights",
"from",
"all",
"iterations",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L170-L182 |
6,439 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | AveragedPerceptron.save | def save(self, path):
"""Save the pickled model weights."""
with io.open(path, 'wb') as fout:
return pickle.dump(dict(self.weights), fout) | python | def save(self, path):
"""Save the pickled model weights."""
with io.open(path, 'wb') as fout:
return pickle.dump(dict(self.weights), fout) | [
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"with",
"io",
".",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"fout",
":",
"return",
"pickle",
".",
"dump",
"(",
"dict",
"(",
"self",
".",
"weights",
")",
",",
"fout",
")"
] | Save the pickled model weights. | [
"Save",
"the",
"pickled",
"model",
"weights",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L184-L187 |
6,440 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | AveragedPerceptron.load | def load(self, path):
"""Load the pickled model weights."""
with io.open(path, 'rb') as fin:
self.weights = pickle.load(fin) | python | def load(self, path):
"""Load the pickled model weights."""
with io.open(path, 'rb') as fin:
self.weights = pickle.load(fin) | [
"def",
"load",
"(",
"self",
",",
"path",
")",
":",
"with",
"io",
".",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"fin",
":",
"self",
".",
"weights",
"=",
"pickle",
".",
"load",
"(",
"fin",
")"
] | Load the pickled model weights. | [
"Load",
"the",
"pickled",
"model",
"weights",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L189-L192 |
6,441 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | ApTagger.train | def train(self, sentences, nr_iter=5):
"""Train a model from sentences.
:param sentences: A list of sentences, each of which is a list of (token, tag) tuples.
:param nr_iter: Number of training iterations.
"""
self._make_tagdict(sentences)
self.perceptron.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in sentences:
prev, prev2 = self.START
context = [t[0] for t in sentence]
for i, (token, tag) in enumerate(sentence):
guess = self.tagdict.get(token)
if not guess:
feats = self._get_features(i, context, prev, prev2)
guess = self.perceptron.predict(feats)
self.perceptron.update(tag, guess, feats)
prev2 = prev
prev = guess
c += guess == tag
n += 1
random.shuffle(sentences)
log.debug('Iter %s: %s/%s=%s' % (iter_, c, n, (float(c) / n) * 100))
self.perceptron.average_weights() | python | def train(self, sentences, nr_iter=5):
"""Train a model from sentences.
:param sentences: A list of sentences, each of which is a list of (token, tag) tuples.
:param nr_iter: Number of training iterations.
"""
self._make_tagdict(sentences)
self.perceptron.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in sentences:
prev, prev2 = self.START
context = [t[0] for t in sentence]
for i, (token, tag) in enumerate(sentence):
guess = self.tagdict.get(token)
if not guess:
feats = self._get_features(i, context, prev, prev2)
guess = self.perceptron.predict(feats)
self.perceptron.update(tag, guess, feats)
prev2 = prev
prev = guess
c += guess == tag
n += 1
random.shuffle(sentences)
log.debug('Iter %s: %s/%s=%s' % (iter_, c, n, (float(c) / n) * 100))
self.perceptron.average_weights() | [
"def",
"train",
"(",
"self",
",",
"sentences",
",",
"nr_iter",
"=",
"5",
")",
":",
"self",
".",
"_make_tagdict",
"(",
"sentences",
")",
"self",
".",
"perceptron",
".",
"classes",
"=",
"self",
".",
"classes",
"for",
"iter_",
"in",
"range",
"(",
"nr_iter",
")",
":",
"c",
"=",
"0",
"n",
"=",
"0",
"for",
"sentence",
"in",
"sentences",
":",
"prev",
",",
"prev2",
"=",
"self",
".",
"START",
"context",
"=",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"sentence",
"]",
"for",
"i",
",",
"(",
"token",
",",
"tag",
")",
"in",
"enumerate",
"(",
"sentence",
")",
":",
"guess",
"=",
"self",
".",
"tagdict",
".",
"get",
"(",
"token",
")",
"if",
"not",
"guess",
":",
"feats",
"=",
"self",
".",
"_get_features",
"(",
"i",
",",
"context",
",",
"prev",
",",
"prev2",
")",
"guess",
"=",
"self",
".",
"perceptron",
".",
"predict",
"(",
"feats",
")",
"self",
".",
"perceptron",
".",
"update",
"(",
"tag",
",",
"guess",
",",
"feats",
")",
"prev2",
"=",
"prev",
"prev",
"=",
"guess",
"c",
"+=",
"guess",
"==",
"tag",
"n",
"+=",
"1",
"random",
".",
"shuffle",
"(",
"sentences",
")",
"log",
".",
"debug",
"(",
"'Iter %s: %s/%s=%s'",
"%",
"(",
"iter_",
",",
"c",
",",
"n",
",",
"(",
"float",
"(",
"c",
")",
"/",
"n",
")",
"*",
"100",
")",
")",
"self",
".",
"perceptron",
".",
"average_weights",
"(",
")"
] | Train a model from sentences.
:param sentences: A list of sentences, each of which is a list of (token, tag) tuples.
:param nr_iter: Number of training iterations. | [
"Train",
"a",
"model",
"from",
"sentences",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L235-L261 |
6,442 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | ApTagger.save | def save(self, f):
"""Save pickled model to file."""
return pickle.dump((self.perceptron.weights, self.tagdict, self.classes, self.clusters), f, protocol=pickle.HIGHEST_PROTOCOL) | python | def save(self, f):
"""Save pickled model to file."""
return pickle.dump((self.perceptron.weights, self.tagdict, self.classes, self.clusters), f, protocol=pickle.HIGHEST_PROTOCOL) | [
"def",
"save",
"(",
"self",
",",
"f",
")",
":",
"return",
"pickle",
".",
"dump",
"(",
"(",
"self",
".",
"perceptron",
".",
"weights",
",",
"self",
".",
"tagdict",
",",
"self",
".",
"classes",
",",
"self",
".",
"clusters",
")",
",",
"f",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")"
] | Save pickled model to file. | [
"Save",
"pickled",
"model",
"to",
"file",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L263-L265 |
6,443 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | ApTagger.load | def load(self, model):
"""Load pickled model."""
self.perceptron.weights, self.tagdict, self.classes, self.clusters = load_model(model)
self.perceptron.classes = self.classes | python | def load(self, model):
"""Load pickled model."""
self.perceptron.weights, self.tagdict, self.classes, self.clusters = load_model(model)
self.perceptron.classes = self.classes | [
"def",
"load",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"perceptron",
".",
"weights",
",",
"self",
".",
"tagdict",
",",
"self",
".",
"classes",
",",
"self",
".",
"clusters",
"=",
"load_model",
"(",
"model",
")",
"self",
".",
"perceptron",
".",
"classes",
"=",
"self",
".",
"classes"
] | Load pickled model. | [
"Load",
"pickled",
"model",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L267-L270 |
6,444 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | CrfTagger.train | def train(self, sentences, model):
"""Train the CRF tagger using CRFSuite.
:params sentences: Annotated sentences.
:params model: Path to save pickled model.
"""
trainer = pycrfsuite.Trainer(verbose=True)
trainer.set_params(self.params)
for sentence in sentences:
tokens, labels = zip(*sentence)
features = [self._get_features(tokens, i) for i in range(len(tokens))]
trainer.append(features, labels)
trainer.train(model)
self.load(model) | python | def train(self, sentences, model):
"""Train the CRF tagger using CRFSuite.
:params sentences: Annotated sentences.
:params model: Path to save pickled model.
"""
trainer = pycrfsuite.Trainer(verbose=True)
trainer.set_params(self.params)
for sentence in sentences:
tokens, labels = zip(*sentence)
features = [self._get_features(tokens, i) for i in range(len(tokens))]
trainer.append(features, labels)
trainer.train(model)
self.load(model) | [
"def",
"train",
"(",
"self",
",",
"sentences",
",",
"model",
")",
":",
"trainer",
"=",
"pycrfsuite",
".",
"Trainer",
"(",
"verbose",
"=",
"True",
")",
"trainer",
".",
"set_params",
"(",
"self",
".",
"params",
")",
"for",
"sentence",
"in",
"sentences",
":",
"tokens",
",",
"labels",
"=",
"zip",
"(",
"*",
"sentence",
")",
"features",
"=",
"[",
"self",
".",
"_get_features",
"(",
"tokens",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tokens",
")",
")",
"]",
"trainer",
".",
"append",
"(",
"features",
",",
"labels",
")",
"trainer",
".",
"train",
"(",
"model",
")",
"self",
".",
"load",
"(",
"model",
")"
] | Train the CRF tagger using CRFSuite.
:params sentences: Annotated sentences.
:params model: Path to save pickled model. | [
"Train",
"the",
"CRF",
"tagger",
"using",
"CRFSuite",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L335-L348 |
6,445 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | DictionaryTagger.load | def load(self, model):
"""Load pickled DAWG from disk."""
self._dawg.load(find_data(model))
self._loaded_model = True | python | def load(self, model):
"""Load pickled DAWG from disk."""
self._dawg.load(find_data(model))
self._loaded_model = True | [
"def",
"load",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"_dawg",
".",
"load",
"(",
"find_data",
"(",
"model",
")",
")",
"self",
".",
"_loaded_model",
"=",
"True"
] | Load pickled DAWG from disk. | [
"Load",
"pickled",
"DAWG",
"from",
"disk",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L379-L382 |
6,446 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | DictionaryTagger.build | def build(self, words):
"""Construct dictionary DAWG from tokenized words."""
words = [self._normalize(tokens) for tokens in words]
self._dawg = dawg.CompletionDAWG(words)
self._loaded_model = True | python | def build(self, words):
"""Construct dictionary DAWG from tokenized words."""
words = [self._normalize(tokens) for tokens in words]
self._dawg = dawg.CompletionDAWG(words)
self._loaded_model = True | [
"def",
"build",
"(",
"self",
",",
"words",
")",
":",
"words",
"=",
"[",
"self",
".",
"_normalize",
"(",
"tokens",
")",
"for",
"tokens",
"in",
"words",
"]",
"self",
".",
"_dawg",
"=",
"dawg",
".",
"CompletionDAWG",
"(",
"words",
")",
"self",
".",
"_loaded_model",
"=",
"True"
] | Construct dictionary DAWG from tokenized words. | [
"Construct",
"dictionary",
"DAWG",
"from",
"tokenized",
"words",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L388-L392 |
6,447 | mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | DictionaryTagger._normalize | def _normalize(self, tokens):
"""Normalization transform to apply to both dictionary words and input tokens."""
if self.case_sensitive:
return ' '.join(self.lexicon[t].normalized for t in tokens)
else:
return ' '.join(self.lexicon[t].lower for t in tokens) | python | def _normalize(self, tokens):
"""Normalization transform to apply to both dictionary words and input tokens."""
if self.case_sensitive:
return ' '.join(self.lexicon[t].normalized for t in tokens)
else:
return ' '.join(self.lexicon[t].lower for t in tokens) | [
"def",
"_normalize",
"(",
"self",
",",
"tokens",
")",
":",
"if",
"self",
".",
"case_sensitive",
":",
"return",
"' '",
".",
"join",
"(",
"self",
".",
"lexicon",
"[",
"t",
"]",
".",
"normalized",
"for",
"t",
"in",
"tokens",
")",
"else",
":",
"return",
"' '",
".",
"join",
"(",
"self",
".",
"lexicon",
"[",
"t",
"]",
".",
"lower",
"for",
"t",
"in",
"tokens",
")"
] | Normalization transform to apply to both dictionary words and input tokens. | [
"Normalization",
"transform",
"to",
"apply",
"to",
"both",
"dictionary",
"words",
"and",
"input",
"tokens",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L394-L399 |
6,448 | mcs07/ChemDataExtractor | chemdataextractor/parse/cem.py | standardize_role | def standardize_role(role):
"""Convert role text into standardized form."""
role = role.lower()
if any(c in role for c in {'synthesis', 'give', 'yield', 'afford', 'product', 'preparation of'}):
return 'product'
return role | python | def standardize_role(role):
"""Convert role text into standardized form."""
role = role.lower()
if any(c in role for c in {'synthesis', 'give', 'yield', 'afford', 'product', 'preparation of'}):
return 'product'
return role | [
"def",
"standardize_role",
"(",
"role",
")",
":",
"role",
"=",
"role",
".",
"lower",
"(",
")",
"if",
"any",
"(",
"c",
"in",
"role",
"for",
"c",
"in",
"{",
"'synthesis'",
",",
"'give'",
",",
"'yield'",
",",
"'afford'",
",",
"'product'",
",",
"'preparation of'",
"}",
")",
":",
"return",
"'product'",
"return",
"role"
] | Convert role text into standardized form. | [
"Convert",
"role",
"text",
"into",
"standardized",
"form",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/parse/cem.py#L279-L284 |
6,449 | mcs07/ChemDataExtractor | chemdataextractor/cli/data.py | list | def list(ctx):
"""List active data packages."""
log.debug('chemdataextractor.data.list')
click.echo('Downloaded\tPackage')
for package in PACKAGES:
click.echo('%s\t%s' % (package.local_exists(), package.path)) | python | def list(ctx):
"""List active data packages."""
log.debug('chemdataextractor.data.list')
click.echo('Downloaded\tPackage')
for package in PACKAGES:
click.echo('%s\t%s' % (package.local_exists(), package.path)) | [
"def",
"list",
"(",
"ctx",
")",
":",
"log",
".",
"debug",
"(",
"'chemdataextractor.data.list'",
")",
"click",
".",
"echo",
"(",
"'Downloaded\\tPackage'",
")",
"for",
"package",
"in",
"PACKAGES",
":",
"click",
".",
"echo",
"(",
"'%s\\t%s'",
"%",
"(",
"package",
".",
"local_exists",
"(",
")",
",",
"package",
".",
"path",
")",
")"
] | List active data packages. | [
"List",
"active",
"data",
"packages",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/data.py#L40-L45 |
6,450 | mcs07/ChemDataExtractor | chemdataextractor/cli/data.py | download | def download(ctx):
"""Download data."""
log.debug('chemdataextractor.data.download')
count = 0
for package in PACKAGES:
success = package.download()
if success:
count += 1
click.echo('Successfully downloaded %s new data packages (%s existing)' % (count, len(PACKAGES) - count)) | python | def download(ctx):
"""Download data."""
log.debug('chemdataextractor.data.download')
count = 0
for package in PACKAGES:
success = package.download()
if success:
count += 1
click.echo('Successfully downloaded %s new data packages (%s existing)' % (count, len(PACKAGES) - count)) | [
"def",
"download",
"(",
"ctx",
")",
":",
"log",
".",
"debug",
"(",
"'chemdataextractor.data.download'",
")",
"count",
"=",
"0",
"for",
"package",
"in",
"PACKAGES",
":",
"success",
"=",
"package",
".",
"download",
"(",
")",
"if",
"success",
":",
"count",
"+=",
"1",
"click",
".",
"echo",
"(",
"'Successfully downloaded %s new data packages (%s existing)'",
"%",
"(",
"count",
",",
"len",
"(",
"PACKAGES",
")",
"-",
"count",
")",
")"
] | Download data. | [
"Download",
"data",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/data.py#L50-L58 |
6,451 | mcs07/ChemDataExtractor | chemdataextractor/data.py | find_data | def find_data(path, warn=True):
"""Return the absolute path to a data file within the data directory."""
full_path = os.path.join(get_data_dir(), path)
if warn and not os.path.isfile(full_path):
for package in PACKAGES:
if path == package.path:
log.warn('%s doesn\'t exist. Run `cde data download` to get it.' % path)
break
return full_path | python | def find_data(path, warn=True):
"""Return the absolute path to a data file within the data directory."""
full_path = os.path.join(get_data_dir(), path)
if warn and not os.path.isfile(full_path):
for package in PACKAGES:
if path == package.path:
log.warn('%s doesn\'t exist. Run `cde data download` to get it.' % path)
break
return full_path | [
"def",
"find_data",
"(",
"path",
",",
"warn",
"=",
"True",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_data_dir",
"(",
")",
",",
"path",
")",
"if",
"warn",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"for",
"package",
"in",
"PACKAGES",
":",
"if",
"path",
"==",
"package",
".",
"path",
":",
"log",
".",
"warn",
"(",
"'%s doesn\\'t exist. Run `cde data download` to get it.'",
"%",
"path",
")",
"break",
"return",
"full_path"
] | Return the absolute path to a data file within the data directory. | [
"Return",
"the",
"absolute",
"path",
"to",
"a",
"data",
"file",
"within",
"the",
"data",
"directory",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/data.py#L119-L127 |
6,452 | mcs07/ChemDataExtractor | chemdataextractor/data.py | load_model | def load_model(path):
"""Load a model from a pickle file in the data directory. Cached so model is only loaded once."""
abspath = find_data(path)
cached = _model_cache.get(abspath)
if cached is not None:
log.debug('Using cached copy of %s' % path)
return cached
log.debug('Loading model %s' % path)
try:
with io.open(abspath, 'rb') as f:
model = six.moves.cPickle.load(f)
except IOError:
raise ModelNotFoundError('Could not load %s. Have you run `cde data download`?' % path)
_model_cache[abspath] = model
return model | python | def load_model(path):
"""Load a model from a pickle file in the data directory. Cached so model is only loaded once."""
abspath = find_data(path)
cached = _model_cache.get(abspath)
if cached is not None:
log.debug('Using cached copy of %s' % path)
return cached
log.debug('Loading model %s' % path)
try:
with io.open(abspath, 'rb') as f:
model = six.moves.cPickle.load(f)
except IOError:
raise ModelNotFoundError('Could not load %s. Have you run `cde data download`?' % path)
_model_cache[abspath] = model
return model | [
"def",
"load_model",
"(",
"path",
")",
":",
"abspath",
"=",
"find_data",
"(",
"path",
")",
"cached",
"=",
"_model_cache",
".",
"get",
"(",
"abspath",
")",
"if",
"cached",
"is",
"not",
"None",
":",
"log",
".",
"debug",
"(",
"'Using cached copy of %s'",
"%",
"path",
")",
"return",
"cached",
"log",
".",
"debug",
"(",
"'Loading model %s'",
"%",
"path",
")",
"try",
":",
"with",
"io",
".",
"open",
"(",
"abspath",
",",
"'rb'",
")",
"as",
"f",
":",
"model",
"=",
"six",
".",
"moves",
".",
"cPickle",
".",
"load",
"(",
"f",
")",
"except",
"IOError",
":",
"raise",
"ModelNotFoundError",
"(",
"'Could not load %s. Have you run `cde data download`?'",
"%",
"path",
")",
"_model_cache",
"[",
"abspath",
"]",
"=",
"model",
"return",
"model"
] | Load a model from a pickle file in the data directory. Cached so model is only loaded once. | [
"Load",
"a",
"model",
"from",
"a",
"pickle",
"file",
"in",
"the",
"data",
"directory",
".",
"Cached",
"so",
"model",
"is",
"only",
"loaded",
"once",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/data.py#L134-L148 |
6,453 | mcs07/ChemDataExtractor | chemdataextractor/text/normalize.py | ChemNormalizer.normalize | def normalize(self, text):
"""Normalize unicode, hyphens, whitespace, and some chemistry terms and formatting."""
text = super(ChemNormalizer, self).normalize(text)
# Normalize element spelling
if self.chem_spell:
text = re.sub(r'sulph', r'sulf', text, flags=re.I)
text = re.sub(r'aluminum', r'aluminium', text, flags=re.I)
text = re.sub(r'cesium', r'caesium', text, flags=re.I)
return text | python | def normalize(self, text):
"""Normalize unicode, hyphens, whitespace, and some chemistry terms and formatting."""
text = super(ChemNormalizer, self).normalize(text)
# Normalize element spelling
if self.chem_spell:
text = re.sub(r'sulph', r'sulf', text, flags=re.I)
text = re.sub(r'aluminum', r'aluminium', text, flags=re.I)
text = re.sub(r'cesium', r'caesium', text, flags=re.I)
return text | [
"def",
"normalize",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"super",
"(",
"ChemNormalizer",
",",
"self",
")",
".",
"normalize",
"(",
"text",
")",
"# Normalize element spelling",
"if",
"self",
".",
"chem_spell",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"r'sulph'",
",",
"r'sulf'",
",",
"text",
",",
"flags",
"=",
"re",
".",
"I",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'aluminum'",
",",
"r'aluminium'",
",",
"text",
",",
"flags",
"=",
"re",
".",
"I",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'cesium'",
",",
"r'caesium'",
",",
"text",
",",
"flags",
"=",
"re",
".",
"I",
")",
"return",
"text"
] | Normalize unicode, hyphens, whitespace, and some chemistry terms and formatting. | [
"Normalize",
"unicode",
"hyphens",
"whitespace",
"and",
"some",
"chemistry",
"terms",
"and",
"formatting",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/normalize.py#L181-L189 |
6,454 | mcs07/ChemDataExtractor | chemdataextractor/nlp/lexicon.py | Lexicon.add | def add(self, text):
"""Add text to the lexicon.
:param string text: The text to add.
"""
# logging.debug('Adding to lexicon: %s' % text)
if text not in self.lexemes:
normalized = self.normalized(text)
self.lexemes[text] = Lexeme(
text=text,
normalized=normalized,
lower=self.lower(normalized),
first=self.first(normalized),
suffix=self.suffix(normalized),
shape=self.shape(normalized),
length=self.length(normalized),
upper_count=self.upper_count(normalized),
lower_count=self.lower_count(normalized),
digit_count=self.digit_count(normalized),
is_alpha=self.is_alpha(normalized),
is_ascii=self.is_ascii(normalized),
is_digit=self.is_digit(normalized),
is_lower=self.is_lower(normalized),
is_upper=self.is_upper(normalized),
is_title=self.is_title(normalized),
is_punct=self.is_punct(normalized),
is_hyphenated=self.is_hyphenated(normalized),
like_url=self.like_url(normalized),
like_number=self.like_number(normalized),
cluster=self.cluster(normalized)
) | python | def add(self, text):
"""Add text to the lexicon.
:param string text: The text to add.
"""
# logging.debug('Adding to lexicon: %s' % text)
if text not in self.lexemes:
normalized = self.normalized(text)
self.lexemes[text] = Lexeme(
text=text,
normalized=normalized,
lower=self.lower(normalized),
first=self.first(normalized),
suffix=self.suffix(normalized),
shape=self.shape(normalized),
length=self.length(normalized),
upper_count=self.upper_count(normalized),
lower_count=self.lower_count(normalized),
digit_count=self.digit_count(normalized),
is_alpha=self.is_alpha(normalized),
is_ascii=self.is_ascii(normalized),
is_digit=self.is_digit(normalized),
is_lower=self.is_lower(normalized),
is_upper=self.is_upper(normalized),
is_title=self.is_title(normalized),
is_punct=self.is_punct(normalized),
is_hyphenated=self.is_hyphenated(normalized),
like_url=self.like_url(normalized),
like_number=self.like_number(normalized),
cluster=self.cluster(normalized)
) | [
"def",
"add",
"(",
"self",
",",
"text",
")",
":",
"# logging.debug('Adding to lexicon: %s' % text)",
"if",
"text",
"not",
"in",
"self",
".",
"lexemes",
":",
"normalized",
"=",
"self",
".",
"normalized",
"(",
"text",
")",
"self",
".",
"lexemes",
"[",
"text",
"]",
"=",
"Lexeme",
"(",
"text",
"=",
"text",
",",
"normalized",
"=",
"normalized",
",",
"lower",
"=",
"self",
".",
"lower",
"(",
"normalized",
")",
",",
"first",
"=",
"self",
".",
"first",
"(",
"normalized",
")",
",",
"suffix",
"=",
"self",
".",
"suffix",
"(",
"normalized",
")",
",",
"shape",
"=",
"self",
".",
"shape",
"(",
"normalized",
")",
",",
"length",
"=",
"self",
".",
"length",
"(",
"normalized",
")",
",",
"upper_count",
"=",
"self",
".",
"upper_count",
"(",
"normalized",
")",
",",
"lower_count",
"=",
"self",
".",
"lower_count",
"(",
"normalized",
")",
",",
"digit_count",
"=",
"self",
".",
"digit_count",
"(",
"normalized",
")",
",",
"is_alpha",
"=",
"self",
".",
"is_alpha",
"(",
"normalized",
")",
",",
"is_ascii",
"=",
"self",
".",
"is_ascii",
"(",
"normalized",
")",
",",
"is_digit",
"=",
"self",
".",
"is_digit",
"(",
"normalized",
")",
",",
"is_lower",
"=",
"self",
".",
"is_lower",
"(",
"normalized",
")",
",",
"is_upper",
"=",
"self",
".",
"is_upper",
"(",
"normalized",
")",
",",
"is_title",
"=",
"self",
".",
"is_title",
"(",
"normalized",
")",
",",
"is_punct",
"=",
"self",
".",
"is_punct",
"(",
"normalized",
")",
",",
"is_hyphenated",
"=",
"self",
".",
"is_hyphenated",
"(",
"normalized",
")",
",",
"like_url",
"=",
"self",
".",
"like_url",
"(",
"normalized",
")",
",",
"like_number",
"=",
"self",
".",
"like_number",
"(",
"normalized",
")",
",",
"cluster",
"=",
"self",
".",
"cluster",
"(",
"normalized",
")",
")"
] | Add text to the lexicon.
:param string text: The text to add. | [
"Add",
"text",
"to",
"the",
"lexicon",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/lexicon.py#L99-L129 |
6,455 | mcs07/ChemDataExtractor | chemdataextractor/doc/table.py | Table.serialize | def serialize(self):
"""Convert Table element to python dictionary."""
data = {
'type': self.__class__.__name__,
'caption': self.caption.serialize(),
'headings': [[cell.serialize() for cell in hrow] for hrow in self.headings],
'rows': [[cell.serialize() for cell in row] for row in self.rows],
}
return data | python | def serialize(self):
"""Convert Table element to python dictionary."""
data = {
'type': self.__class__.__name__,
'caption': self.caption.serialize(),
'headings': [[cell.serialize() for cell in hrow] for hrow in self.headings],
'rows': [[cell.serialize() for cell in row] for row in self.rows],
}
return data | [
"def",
"serialize",
"(",
"self",
")",
":",
"data",
"=",
"{",
"'type'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"'caption'",
":",
"self",
".",
"caption",
".",
"serialize",
"(",
")",
",",
"'headings'",
":",
"[",
"[",
"cell",
".",
"serialize",
"(",
")",
"for",
"cell",
"in",
"hrow",
"]",
"for",
"hrow",
"in",
"self",
".",
"headings",
"]",
",",
"'rows'",
":",
"[",
"[",
"cell",
".",
"serialize",
"(",
")",
"for",
"cell",
"in",
"row",
"]",
"for",
"row",
"in",
"self",
".",
"rows",
"]",
",",
"}",
"return",
"data"
] | Convert Table element to python dictionary. | [
"Convert",
"Table",
"element",
"to",
"python",
"dictionary",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/table.py#L78-L86 |
6,456 | mcs07/ChemDataExtractor | chemdataextractor/model.py | Compound.merge | def merge(self, other):
"""Merge data from another Compound into this Compound."""
log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))
for k in self.keys():
for new_item in other[k]:
if new_item not in self[k]:
self[k].append(new_item)
log.debug('Result: %s' % self.serialize())
return self | python | def merge(self, other):
"""Merge data from another Compound into this Compound."""
log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))
for k in self.keys():
for new_item in other[k]:
if new_item not in self[k]:
self[k].append(new_item)
log.debug('Result: %s' % self.serialize())
return self | [
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"log",
".",
"debug",
"(",
"'Merging: %s and %s'",
"%",
"(",
"self",
".",
"serialize",
"(",
")",
",",
"other",
".",
"serialize",
"(",
")",
")",
")",
"for",
"k",
"in",
"self",
".",
"keys",
"(",
")",
":",
"for",
"new_item",
"in",
"other",
"[",
"k",
"]",
":",
"if",
"new_item",
"not",
"in",
"self",
"[",
"k",
"]",
":",
"self",
"[",
"k",
"]",
".",
"append",
"(",
"new_item",
")",
"log",
".",
"debug",
"(",
"'Result: %s'",
"%",
"self",
".",
"serialize",
"(",
")",
")",
"return",
"self"
] | Merge data from another Compound into this Compound. | [
"Merge",
"data",
"from",
"another",
"Compound",
"into",
"this",
"Compound",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/model.py#L451-L459 |
6,457 | mcs07/ChemDataExtractor | chemdataextractor/model.py | Compound.merge_contextual | def merge_contextual(self, other):
"""Merge in contextual info from a template Compound."""
# TODO: This is currently dependent on our data model? Make more robust to schema changes
# Currently we assume all lists at Compound level, with 1 further potential nested level of lists
for k in self.keys():
# print('key: %s' % k)
for item in self[k]:
# print('item: %s' % item)
for other_item in other.get(k, []):
# Skip text properties (don't merge names, labels, roles)
if isinstance(other_item, six.text_type):
continue
for otherk in other_item.keys():
if isinstance(other_item[otherk], list):
if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:
other_nested_item = other_item[otherk][0]
for othernestedk in other_nested_item.keys():
for nested_item in item[otherk]:
if not nested_item[othernestedk]:
nested_item[othernestedk] = other_nested_item[othernestedk]
elif not item[otherk]:
item[otherk] = other_item[otherk]
log.debug('Result: %s' % self.serialize())
return self | python | def merge_contextual(self, other):
"""Merge in contextual info from a template Compound."""
# TODO: This is currently dependent on our data model? Make more robust to schema changes
# Currently we assume all lists at Compound level, with 1 further potential nested level of lists
for k in self.keys():
# print('key: %s' % k)
for item in self[k]:
# print('item: %s' % item)
for other_item in other.get(k, []):
# Skip text properties (don't merge names, labels, roles)
if isinstance(other_item, six.text_type):
continue
for otherk in other_item.keys():
if isinstance(other_item[otherk], list):
if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:
other_nested_item = other_item[otherk][0]
for othernestedk in other_nested_item.keys():
for nested_item in item[otherk]:
if not nested_item[othernestedk]:
nested_item[othernestedk] = other_nested_item[othernestedk]
elif not item[otherk]:
item[otherk] = other_item[otherk]
log.debug('Result: %s' % self.serialize())
return self | [
"def",
"merge_contextual",
"(",
"self",
",",
"other",
")",
":",
"# TODO: This is currently dependent on our data model? Make more robust to schema changes",
"# Currently we assume all lists at Compound level, with 1 further potential nested level of lists",
"for",
"k",
"in",
"self",
".",
"keys",
"(",
")",
":",
"# print('key: %s' % k)",
"for",
"item",
"in",
"self",
"[",
"k",
"]",
":",
"# print('item: %s' % item)",
"for",
"other_item",
"in",
"other",
".",
"get",
"(",
"k",
",",
"[",
"]",
")",
":",
"# Skip text properties (don't merge names, labels, roles)",
"if",
"isinstance",
"(",
"other_item",
",",
"six",
".",
"text_type",
")",
":",
"continue",
"for",
"otherk",
"in",
"other_item",
".",
"keys",
"(",
")",
":",
"if",
"isinstance",
"(",
"other_item",
"[",
"otherk",
"]",
",",
"list",
")",
":",
"if",
"len",
"(",
"other_item",
"[",
"otherk",
"]",
")",
">",
"0",
"and",
"len",
"(",
"item",
"[",
"otherk",
"]",
")",
">",
"0",
":",
"other_nested_item",
"=",
"other_item",
"[",
"otherk",
"]",
"[",
"0",
"]",
"for",
"othernestedk",
"in",
"other_nested_item",
".",
"keys",
"(",
")",
":",
"for",
"nested_item",
"in",
"item",
"[",
"otherk",
"]",
":",
"if",
"not",
"nested_item",
"[",
"othernestedk",
"]",
":",
"nested_item",
"[",
"othernestedk",
"]",
"=",
"other_nested_item",
"[",
"othernestedk",
"]",
"elif",
"not",
"item",
"[",
"otherk",
"]",
":",
"item",
"[",
"otherk",
"]",
"=",
"other_item",
"[",
"otherk",
"]",
"log",
".",
"debug",
"(",
"'Result: %s'",
"%",
"self",
".",
"serialize",
"(",
")",
")",
"return",
"self"
] | Merge in contextual info from a template Compound. | [
"Merge",
"in",
"contextual",
"info",
"from",
"a",
"template",
"Compound",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/model.py#L461-L484 |
6,458 | mcs07/ChemDataExtractor | chemdataextractor/model.py | Compound.is_id_only | def is_id_only(self):
"""Return True if identifier information only."""
for key, value in self.items():
if key not in {'names', 'labels', 'roles'} and value:
return False
if self.names or self.labels:
return True
return False | python | def is_id_only(self):
"""Return True if identifier information only."""
for key, value in self.items():
if key not in {'names', 'labels', 'roles'} and value:
return False
if self.names or self.labels:
return True
return False | [
"def",
"is_id_only",
"(",
"self",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"{",
"'names'",
",",
"'labels'",
",",
"'roles'",
"}",
"and",
"value",
":",
"return",
"False",
"if",
"self",
".",
"names",
"or",
"self",
".",
"labels",
":",
"return",
"True",
"return",
"False"
] | Return True if identifier information only. | [
"Return",
"True",
"if",
"identifier",
"information",
"only",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/model.py#L502-L509 |
6,459 | mcs07/ChemDataExtractor | chemdataextractor/parse/actions.py | join | def join(tokens, start, result):
"""Join tokens into a single string with spaces between."""
texts = []
if len(result) > 0:
for e in result:
for child in e.iter():
if child.text is not None:
texts.append(child.text)
return [E(result[0].tag, ' '.join(texts))] | python | def join(tokens, start, result):
"""Join tokens into a single string with spaces between."""
texts = []
if len(result) > 0:
for e in result:
for child in e.iter():
if child.text is not None:
texts.append(child.text)
return [E(result[0].tag, ' '.join(texts))] | [
"def",
"join",
"(",
"tokens",
",",
"start",
",",
"result",
")",
":",
"texts",
"=",
"[",
"]",
"if",
"len",
"(",
"result",
")",
">",
"0",
":",
"for",
"e",
"in",
"result",
":",
"for",
"child",
"in",
"e",
".",
"iter",
"(",
")",
":",
"if",
"child",
".",
"text",
"is",
"not",
"None",
":",
"texts",
".",
"append",
"(",
"child",
".",
"text",
")",
"return",
"[",
"E",
"(",
"result",
"[",
"0",
"]",
".",
"tag",
",",
"' '",
".",
"join",
"(",
"texts",
")",
")",
"]"
] | Join tokens into a single string with spaces between. | [
"Join",
"tokens",
"into",
"a",
"single",
"string",
"with",
"spaces",
"between",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/parse/actions.py#L33-L41 |
6,460 | mcs07/ChemDataExtractor | chemdataextractor/parse/actions.py | strip_stop | def strip_stop(tokens, start, result):
"""Remove trailing full stop from tokens."""
for e in result:
for child in e.iter():
if child.text.endswith('.'):
child.text = child.text[:-1]
return result | python | def strip_stop(tokens, start, result):
"""Remove trailing full stop from tokens."""
for e in result:
for child in e.iter():
if child.text.endswith('.'):
child.text = child.text[:-1]
return result | [
"def",
"strip_stop",
"(",
"tokens",
",",
"start",
",",
"result",
")",
":",
"for",
"e",
"in",
"result",
":",
"for",
"child",
"in",
"e",
".",
"iter",
"(",
")",
":",
"if",
"child",
".",
"text",
".",
"endswith",
"(",
"'.'",
")",
":",
"child",
".",
"text",
"=",
"child",
".",
"text",
"[",
":",
"-",
"1",
"]",
"return",
"result"
] | Remove trailing full stop from tokens. | [
"Remove",
"trailing",
"full",
"stop",
"from",
"tokens",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/parse/actions.py#L55-L61 |
6,461 | mcs07/ChemDataExtractor | chemdataextractor/parse/actions.py | fix_whitespace | def fix_whitespace(tokens, start, result):
"""Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts."""
for e in result:
for child in e.iter():
child.text = child.text.replace(' , ', ', ')
for hyphen in HYPHENS:
child.text = child.text.replace(' %s ' % hyphen, '%s' % hyphen)
child.text = re.sub(r'- (.) -', r'-\1-', child.text)
return result | python | def fix_whitespace(tokens, start, result):
"""Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts."""
for e in result:
for child in e.iter():
child.text = child.text.replace(' , ', ', ')
for hyphen in HYPHENS:
child.text = child.text.replace(' %s ' % hyphen, '%s' % hyphen)
child.text = re.sub(r'- (.) -', r'-\1-', child.text)
return result | [
"def",
"fix_whitespace",
"(",
"tokens",
",",
"start",
",",
"result",
")",
":",
"for",
"e",
"in",
"result",
":",
"for",
"child",
"in",
"e",
".",
"iter",
"(",
")",
":",
"child",
".",
"text",
"=",
"child",
".",
"text",
".",
"replace",
"(",
"' , '",
",",
"', '",
")",
"for",
"hyphen",
"in",
"HYPHENS",
":",
"child",
".",
"text",
"=",
"child",
".",
"text",
".",
"replace",
"(",
"' %s '",
"%",
"hyphen",
",",
"'%s'",
"%",
"hyphen",
")",
"child",
".",
"text",
"=",
"re",
".",
"sub",
"(",
"r'- (.) -'",
",",
"r'-\\1-'",
",",
"child",
".",
"text",
")",
"return",
"result"
] | Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts. | [
"Fix",
"whitespace",
"around",
"hyphens",
"and",
"commas",
".",
"Can",
"be",
"used",
"to",
"remove",
"whitespace",
"tokenization",
"artefacts",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/parse/actions.py#L64-L72 |
6,462 | mcs07/ChemDataExtractor | chemdataextractor/reader/plaintext.py | PlainTextReader.detect | def detect(self, fstring, fname=None):
"""Have a stab at most files."""
if fname is not None and '.' in fname:
extension = fname.rsplit('.', 1)[1]
if extension in {'pdf', 'html', 'xml'}:
return False
return True | python | def detect(self, fstring, fname=None):
"""Have a stab at most files."""
if fname is not None and '.' in fname:
extension = fname.rsplit('.', 1)[1]
if extension in {'pdf', 'html', 'xml'}:
return False
return True | [
"def",
"detect",
"(",
"self",
",",
"fstring",
",",
"fname",
"=",
"None",
")",
":",
"if",
"fname",
"is",
"not",
"None",
"and",
"'.'",
"in",
"fname",
":",
"extension",
"=",
"fname",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"1",
"]",
"if",
"extension",
"in",
"{",
"'pdf'",
",",
"'html'",
",",
"'xml'",
"}",
":",
"return",
"False",
"return",
"True"
] | Have a stab at most files. | [
"Have",
"a",
"stab",
"at",
"most",
"files",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/reader/plaintext.py#L26-L32 |
6,463 | mcs07/ChemDataExtractor | chemdataextractor/reader/pdf.py | PdfReader._process_layout | def _process_layout(self, layout):
"""Process an LTPage layout and return a list of elements."""
# Here we just group text into paragraphs
elements = []
for lt_obj in layout:
if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
elements.append(Paragraph(lt_obj.get_text().strip()))
elif isinstance(lt_obj, LTFigure):
# Recursive...
elements.extend(self._process_layout(lt_obj))
return elements | python | def _process_layout(self, layout):
"""Process an LTPage layout and return a list of elements."""
# Here we just group text into paragraphs
elements = []
for lt_obj in layout:
if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
elements.append(Paragraph(lt_obj.get_text().strip()))
elif isinstance(lt_obj, LTFigure):
# Recursive...
elements.extend(self._process_layout(lt_obj))
return elements | [
"def",
"_process_layout",
"(",
"self",
",",
"layout",
")",
":",
"# Here we just group text into paragraphs",
"elements",
"=",
"[",
"]",
"for",
"lt_obj",
"in",
"layout",
":",
"if",
"isinstance",
"(",
"lt_obj",
",",
"LTTextBox",
")",
"or",
"isinstance",
"(",
"lt_obj",
",",
"LTTextLine",
")",
":",
"elements",
".",
"append",
"(",
"Paragraph",
"(",
"lt_obj",
".",
"get_text",
"(",
")",
".",
"strip",
"(",
")",
")",
")",
"elif",
"isinstance",
"(",
"lt_obj",
",",
"LTFigure",
")",
":",
"# Recursive...",
"elements",
".",
"extend",
"(",
"self",
".",
"_process_layout",
"(",
"lt_obj",
")",
")",
"return",
"elements"
] | Process an LTPage layout and return a list of elements. | [
"Process",
"an",
"LTPage",
"layout",
"and",
"return",
"a",
"list",
"of",
"elements",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/reader/pdf.py#L38-L48 |
6,464 | mcs07/ChemDataExtractor | chemdataextractor/text/__init__.py | get_encoding | def get_encoding(input_string, guesses=None, is_html=False):
"""Return the encoding of a byte string. Uses bs4 UnicodeDammit.
:param string input_string: Encoded byte string.
:param list[string] guesses: (Optional) List of encoding guesses to prioritize.
:param bool is_html: Whether the input is HTML.
"""
converted = UnicodeDammit(input_string, override_encodings=[guesses] if guesses else [], is_html=is_html)
return converted.original_encoding | python | def get_encoding(input_string, guesses=None, is_html=False):
"""Return the encoding of a byte string. Uses bs4 UnicodeDammit.
:param string input_string: Encoded byte string.
:param list[string] guesses: (Optional) List of encoding guesses to prioritize.
:param bool is_html: Whether the input is HTML.
"""
converted = UnicodeDammit(input_string, override_encodings=[guesses] if guesses else [], is_html=is_html)
return converted.original_encoding | [
"def",
"get_encoding",
"(",
"input_string",
",",
"guesses",
"=",
"None",
",",
"is_html",
"=",
"False",
")",
":",
"converted",
"=",
"UnicodeDammit",
"(",
"input_string",
",",
"override_encodings",
"=",
"[",
"guesses",
"]",
"if",
"guesses",
"else",
"[",
"]",
",",
"is_html",
"=",
"is_html",
")",
"return",
"converted",
".",
"original_encoding"
] | Return the encoding of a byte string. Uses bs4 UnicodeDammit.
:param string input_string: Encoded byte string.
:param list[string] guesses: (Optional) List of encoding guesses to prioritize.
:param bool is_html: Whether the input is HTML. | [
"Return",
"the",
"encoding",
"of",
"a",
"byte",
"string",
".",
"Uses",
"bs4",
"UnicodeDammit",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/__init__.py#L221-L229 |
6,465 | mcs07/ChemDataExtractor | chemdataextractor/text/__init__.py | levenshtein | def levenshtein(s1, s2, allow_substring=False):
"""Return the Levenshtein distance between two strings.
The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted,
inserted or deleted to transform s1 into s2.
Setting the `allow_substring` parameter to True allows s1 to be a
substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero.
:param string s1: The first string
:param string s2: The second string
:param bool allow_substring: Whether to allow s1 to be a substring of s2
:returns: Levenshtein distance.
:rtype int
"""
len1, len2 = len(s1), len(s2)
lev = []
for i in range(len1 + 1):
lev.append([0] * (len2 + 1))
for i in range(len1 + 1):
lev[i][0] = i
for j in range(len2 + 1):
lev[0][j] = 0 if allow_substring else j
for i in range(len1):
for j in range(len2):
lev[i + 1][j + 1] = min(lev[i][j + 1] + 1, lev[i + 1][j] + 1, lev[i][j] + (s1[i] != s2[j]))
return min(lev[len1]) if allow_substring else lev[len1][len2] | python | def levenshtein(s1, s2, allow_substring=False):
"""Return the Levenshtein distance between two strings.
The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted,
inserted or deleted to transform s1 into s2.
Setting the `allow_substring` parameter to True allows s1 to be a
substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero.
:param string s1: The first string
:param string s2: The second string
:param bool allow_substring: Whether to allow s1 to be a substring of s2
:returns: Levenshtein distance.
:rtype int
"""
len1, len2 = len(s1), len(s2)
lev = []
for i in range(len1 + 1):
lev.append([0] * (len2 + 1))
for i in range(len1 + 1):
lev[i][0] = i
for j in range(len2 + 1):
lev[0][j] = 0 if allow_substring else j
for i in range(len1):
for j in range(len2):
lev[i + 1][j + 1] = min(lev[i][j + 1] + 1, lev[i + 1][j] + 1, lev[i][j] + (s1[i] != s2[j]))
return min(lev[len1]) if allow_substring else lev[len1][len2] | [
"def",
"levenshtein",
"(",
"s1",
",",
"s2",
",",
"allow_substring",
"=",
"False",
")",
":",
"len1",
",",
"len2",
"=",
"len",
"(",
"s1",
")",
",",
"len",
"(",
"s2",
")",
"lev",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len1",
"+",
"1",
")",
":",
"lev",
".",
"append",
"(",
"[",
"0",
"]",
"*",
"(",
"len2",
"+",
"1",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len1",
"+",
"1",
")",
":",
"lev",
"[",
"i",
"]",
"[",
"0",
"]",
"=",
"i",
"for",
"j",
"in",
"range",
"(",
"len2",
"+",
"1",
")",
":",
"lev",
"[",
"0",
"]",
"[",
"j",
"]",
"=",
"0",
"if",
"allow_substring",
"else",
"j",
"for",
"i",
"in",
"range",
"(",
"len1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len2",
")",
":",
"lev",
"[",
"i",
"+",
"1",
"]",
"[",
"j",
"+",
"1",
"]",
"=",
"min",
"(",
"lev",
"[",
"i",
"]",
"[",
"j",
"+",
"1",
"]",
"+",
"1",
",",
"lev",
"[",
"i",
"+",
"1",
"]",
"[",
"j",
"]",
"+",
"1",
",",
"lev",
"[",
"i",
"]",
"[",
"j",
"]",
"+",
"(",
"s1",
"[",
"i",
"]",
"!=",
"s2",
"[",
"j",
"]",
")",
")",
"return",
"min",
"(",
"lev",
"[",
"len1",
"]",
")",
"if",
"allow_substring",
"else",
"lev",
"[",
"len1",
"]",
"[",
"len2",
"]"
] | Return the Levenshtein distance between two strings.
The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted,
inserted or deleted to transform s1 into s2.
Setting the `allow_substring` parameter to True allows s1 to be a
substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero.
:param string s1: The first string
:param string s2: The second string
:param bool allow_substring: Whether to allow s1 to be a substring of s2
:returns: Levenshtein distance.
:rtype int | [
"Return",
"the",
"Levenshtein",
"distance",
"between",
"two",
"strings",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/__init__.py#L232-L258 |
6,466 | mcs07/ChemDataExtractor | chemdataextractor/text/__init__.py | bracket_level | def bracket_level(text, open={'(', '[', '{'}, close={')', ']', '}'}):
"""Return 0 if string contains balanced brackets or no brackets."""
level = 0
for c in text:
if c in open:
level += 1
elif c in close:
level -= 1
return level | python | def bracket_level(text, open={'(', '[', '{'}, close={')', ']', '}'}):
"""Return 0 if string contains balanced brackets or no brackets."""
level = 0
for c in text:
if c in open:
level += 1
elif c in close:
level -= 1
return level | [
"def",
"bracket_level",
"(",
"text",
",",
"open",
"=",
"{",
"'('",
",",
"'['",
",",
"'{'",
"}",
",",
"close",
"=",
"{",
"')'",
",",
"']'",
",",
"'}'",
"}",
")",
":",
"level",
"=",
"0",
"for",
"c",
"in",
"text",
":",
"if",
"c",
"in",
"open",
":",
"level",
"+=",
"1",
"elif",
"c",
"in",
"close",
":",
"level",
"-=",
"1",
"return",
"level"
] | Return 0 if string contains balanced brackets or no brackets. | [
"Return",
"0",
"if",
"string",
"contains",
"balanced",
"brackets",
"or",
"no",
"brackets",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/__init__.py#L261-L269 |
6,467 | mcs07/ChemDataExtractor | chemdataextractor/cli/config.py | list | def list(ctx):
"""List all config values."""
log.debug('chemdataextractor.config.list')
for k in config:
click.echo('%s : %s' % (k, config[k])) | python | def list(ctx):
"""List all config values."""
log.debug('chemdataextractor.config.list')
for k in config:
click.echo('%s : %s' % (k, config[k])) | [
"def",
"list",
"(",
"ctx",
")",
":",
"log",
".",
"debug",
"(",
"'chemdataextractor.config.list'",
")",
"for",
"k",
"in",
"config",
":",
"click",
".",
"echo",
"(",
"'%s : %s'",
"%",
"(",
"k",
",",
"config",
"[",
"k",
"]",
")",
")"
] | List all config values. | [
"List",
"all",
"config",
"values",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/config.py#L33-L37 |
6,468 | mcs07/ChemDataExtractor | chemdataextractor/cli/cem.py | train_crf | def train_crf(ctx, input, output, clusters):
"""Train CRF CEM recognizer."""
click.echo('chemdataextractor.crf.train')
sentences = []
for line in input:
sentence = []
for t in line.split():
token, tag, iob = t.rsplit('/', 2)
sentence.append(((token, tag), iob))
if sentence:
sentences.append(sentence)
tagger = CrfCemTagger(clusters=clusters)
tagger.train(sentences, output) | python | def train_crf(ctx, input, output, clusters):
"""Train CRF CEM recognizer."""
click.echo('chemdataextractor.crf.train')
sentences = []
for line in input:
sentence = []
for t in line.split():
token, tag, iob = t.rsplit('/', 2)
sentence.append(((token, tag), iob))
if sentence:
sentences.append(sentence)
tagger = CrfCemTagger(clusters=clusters)
tagger.train(sentences, output) | [
"def",
"train_crf",
"(",
"ctx",
",",
"input",
",",
"output",
",",
"clusters",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.crf.train'",
")",
"sentences",
"=",
"[",
"]",
"for",
"line",
"in",
"input",
":",
"sentence",
"=",
"[",
"]",
"for",
"t",
"in",
"line",
".",
"split",
"(",
")",
":",
"token",
",",
"tag",
",",
"iob",
"=",
"t",
".",
"rsplit",
"(",
"'/'",
",",
"2",
")",
"sentence",
".",
"append",
"(",
"(",
"(",
"token",
",",
"tag",
")",
",",
"iob",
")",
")",
"if",
"sentence",
":",
"sentences",
".",
"append",
"(",
"sentence",
")",
"tagger",
"=",
"CrfCemTagger",
"(",
"clusters",
"=",
"clusters",
")",
"tagger",
".",
"train",
"(",
"sentences",
",",
"output",
")"
] | Train CRF CEM recognizer. | [
"Train",
"CRF",
"CEM",
"recognizer",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/cem.py#L31-L44 |
6,469 | mcs07/ChemDataExtractor | chemdataextractor/doc/text.py | Text.sentences | def sentences(self):
"""Return a list of Sentences that make up this text passage."""
sents = []
spans = self.sentence_tokenizer.span_tokenize(self.text)
for span in spans:
sent = Sentence(
text=self.text[span[0]:span[1]],
start=span[0],
end=span[1],
word_tokenizer=self.word_tokenizer,
lexicon=self.lexicon,
abbreviation_detector=self.abbreviation_detector,
pos_tagger=self.pos_tagger,
ner_tagger=self.ner_tagger,
parsers=self.parsers,
document=self.document
)
sents.append(sent)
return sents | python | def sentences(self):
"""Return a list of Sentences that make up this text passage."""
sents = []
spans = self.sentence_tokenizer.span_tokenize(self.text)
for span in spans:
sent = Sentence(
text=self.text[span[0]:span[1]],
start=span[0],
end=span[1],
word_tokenizer=self.word_tokenizer,
lexicon=self.lexicon,
abbreviation_detector=self.abbreviation_detector,
pos_tagger=self.pos_tagger,
ner_tagger=self.ner_tagger,
parsers=self.parsers,
document=self.document
)
sents.append(sent)
return sents | [
"def",
"sentences",
"(",
"self",
")",
":",
"sents",
"=",
"[",
"]",
"spans",
"=",
"self",
".",
"sentence_tokenizer",
".",
"span_tokenize",
"(",
"self",
".",
"text",
")",
"for",
"span",
"in",
"spans",
":",
"sent",
"=",
"Sentence",
"(",
"text",
"=",
"self",
".",
"text",
"[",
"span",
"[",
"0",
"]",
":",
"span",
"[",
"1",
"]",
"]",
",",
"start",
"=",
"span",
"[",
"0",
"]",
",",
"end",
"=",
"span",
"[",
"1",
"]",
",",
"word_tokenizer",
"=",
"self",
".",
"word_tokenizer",
",",
"lexicon",
"=",
"self",
".",
"lexicon",
",",
"abbreviation_detector",
"=",
"self",
".",
"abbreviation_detector",
",",
"pos_tagger",
"=",
"self",
".",
"pos_tagger",
",",
"ner_tagger",
"=",
"self",
".",
"ner_tagger",
",",
"parsers",
"=",
"self",
".",
"parsers",
",",
"document",
"=",
"self",
".",
"document",
")",
"sents",
".",
"append",
"(",
"sent",
")",
"return",
"sents"
] | Return a list of Sentences that make up this text passage. | [
"Return",
"a",
"list",
"of",
"Sentences",
"that",
"make",
"up",
"this",
"text",
"passage",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/text.py#L139-L157 |
6,470 | mcs07/ChemDataExtractor | chemdataextractor/doc/text.py | Text.records | def records(self):
"""Return a list of records for this text passage."""
return ModelList(*[r for sent in self.sentences for r in sent.records]) | python | def records(self):
"""Return a list of records for this text passage."""
return ModelList(*[r for sent in self.sentences for r in sent.records]) | [
"def",
"records",
"(",
"self",
")",
":",
"return",
"ModelList",
"(",
"*",
"[",
"r",
"for",
"sent",
"in",
"self",
".",
"sentences",
"for",
"r",
"in",
"sent",
".",
"records",
"]",
")"
] | Return a list of records for this text passage. | [
"Return",
"a",
"list",
"of",
"records",
"for",
"this",
"text",
"passage",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/text.py#L231-L233 |
6,471 | mcs07/ChemDataExtractor | chemdataextractor/doc/text.py | Sentence.tokens | def tokens(self):
"""Return a list of token Spans for this sentence."""
spans = self.word_tokenizer.span_tokenize(self.text)
toks = [Token(
text=self.text[span[0]:span[1]],
start=span[0] + self.start,
end=span[1] + self.start,
lexicon=self.lexicon
) for span in spans]
return toks | python | def tokens(self):
"""Return a list of token Spans for this sentence."""
spans = self.word_tokenizer.span_tokenize(self.text)
toks = [Token(
text=self.text[span[0]:span[1]],
start=span[0] + self.start,
end=span[1] + self.start,
lexicon=self.lexicon
) for span in spans]
return toks | [
"def",
"tokens",
"(",
"self",
")",
":",
"spans",
"=",
"self",
".",
"word_tokenizer",
".",
"span_tokenize",
"(",
"self",
".",
"text",
")",
"toks",
"=",
"[",
"Token",
"(",
"text",
"=",
"self",
".",
"text",
"[",
"span",
"[",
"0",
"]",
":",
"span",
"[",
"1",
"]",
"]",
",",
"start",
"=",
"span",
"[",
"0",
"]",
"+",
"self",
".",
"start",
",",
"end",
"=",
"span",
"[",
"1",
"]",
"+",
"self",
".",
"start",
",",
"lexicon",
"=",
"self",
".",
"lexicon",
")",
"for",
"span",
"in",
"spans",
"]",
"return",
"toks"
] | Return a list of token Spans for this sentence. | [
"Return",
"a",
"list",
"of",
"token",
"Spans",
"for",
"this",
"sentence",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/text.py#L322-L331 |
6,472 | mcs07/ChemDataExtractor | chemdataextractor/doc/text.py | Sentence.tags | def tags(self):
"""Return combined POS and NER tags."""
tags = self.pos_tags
for i, tag in enumerate(self.ner_tags):
if tag is not None:
tags[i] = tag
return tags | python | def tags(self):
"""Return combined POS and NER tags."""
tags = self.pos_tags
for i, tag in enumerate(self.ner_tags):
if tag is not None:
tags[i] = tag
return tags | [
"def",
"tags",
"(",
"self",
")",
":",
"tags",
"=",
"self",
".",
"pos_tags",
"for",
"i",
",",
"tag",
"in",
"enumerate",
"(",
"self",
".",
"ner_tags",
")",
":",
"if",
"tag",
"is",
"not",
"None",
":",
"tags",
"[",
"i",
"]",
"=",
"tag",
"return",
"tags"
] | Return combined POS and NER tags. | [
"Return",
"combined",
"POS",
"and",
"NER",
"tags",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/text.py#L492-L498 |
6,473 | mcs07/ChemDataExtractor | chemdataextractor/doc/text.py | Sentence.records | def records(self):
"""Return a list of records for this sentence."""
compounds = ModelList()
seen_labels = set()
# Ensure no control characters are sent to a parser (need to be XML compatible)
tagged_tokens = [(CONTROL_RE.sub('', token), tag) for token, tag in self.tagged_tokens]
for parser in self.parsers:
for record in parser.parse(tagged_tokens):
p = record.serialize()
if not p: # TODO: Potential performance issues?
continue
# Skip duplicate records
if record in compounds:
continue
# Skip just labels that have already been seen (bit of a hack)
if all(k in {'labels', 'roles'} for k in p.keys()) and set(record.labels).issubset(seen_labels):
continue
seen_labels.update(record.labels)
compounds.append(record)
return compounds | python | def records(self):
"""Return a list of records for this sentence."""
compounds = ModelList()
seen_labels = set()
# Ensure no control characters are sent to a parser (need to be XML compatible)
tagged_tokens = [(CONTROL_RE.sub('', token), tag) for token, tag in self.tagged_tokens]
for parser in self.parsers:
for record in parser.parse(tagged_tokens):
p = record.serialize()
if not p: # TODO: Potential performance issues?
continue
# Skip duplicate records
if record in compounds:
continue
# Skip just labels that have already been seen (bit of a hack)
if all(k in {'labels', 'roles'} for k in p.keys()) and set(record.labels).issubset(seen_labels):
continue
seen_labels.update(record.labels)
compounds.append(record)
return compounds | [
"def",
"records",
"(",
"self",
")",
":",
"compounds",
"=",
"ModelList",
"(",
")",
"seen_labels",
"=",
"set",
"(",
")",
"# Ensure no control characters are sent to a parser (need to be XML compatible)",
"tagged_tokens",
"=",
"[",
"(",
"CONTROL_RE",
".",
"sub",
"(",
"''",
",",
"token",
")",
",",
"tag",
")",
"for",
"token",
",",
"tag",
"in",
"self",
".",
"tagged_tokens",
"]",
"for",
"parser",
"in",
"self",
".",
"parsers",
":",
"for",
"record",
"in",
"parser",
".",
"parse",
"(",
"tagged_tokens",
")",
":",
"p",
"=",
"record",
".",
"serialize",
"(",
")",
"if",
"not",
"p",
":",
"# TODO: Potential performance issues?",
"continue",
"# Skip duplicate records",
"if",
"record",
"in",
"compounds",
":",
"continue",
"# Skip just labels that have already been seen (bit of a hack)",
"if",
"all",
"(",
"k",
"in",
"{",
"'labels'",
",",
"'roles'",
"}",
"for",
"k",
"in",
"p",
".",
"keys",
"(",
")",
")",
"and",
"set",
"(",
"record",
".",
"labels",
")",
".",
"issubset",
"(",
"seen_labels",
")",
":",
"continue",
"seen_labels",
".",
"update",
"(",
"record",
".",
"labels",
")",
"compounds",
".",
"append",
"(",
"record",
")",
"return",
"compounds"
] | Return a list of records for this sentence. | [
"Return",
"a",
"list",
"of",
"records",
"for",
"this",
"sentence",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/text.py#L505-L524 |
6,474 | mcs07/ChemDataExtractor | chemdataextractor/cli/chemdner.py | prepare_gold | def prepare_gold(ctx, annotations, gout):
"""Prepare bc-evaluate gold file from annotations supplied by CHEMDNER."""
click.echo('chemdataextractor.chemdner.prepare_gold')
for line in annotations:
pmid, ta, start, end, text, category = line.strip().split('\t')
gout.write('%s\t%s:%s:%s\n' % (pmid, ta, start, end)) | python | def prepare_gold(ctx, annotations, gout):
"""Prepare bc-evaluate gold file from annotations supplied by CHEMDNER."""
click.echo('chemdataextractor.chemdner.prepare_gold')
for line in annotations:
pmid, ta, start, end, text, category = line.strip().split('\t')
gout.write('%s\t%s:%s:%s\n' % (pmid, ta, start, end)) | [
"def",
"prepare_gold",
"(",
"ctx",
",",
"annotations",
",",
"gout",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.chemdner.prepare_gold'",
")",
"for",
"line",
"in",
"annotations",
":",
"pmid",
",",
"ta",
",",
"start",
",",
"end",
",",
"text",
",",
"category",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"gout",
".",
"write",
"(",
"'%s\\t%s:%s:%s\\n'",
"%",
"(",
"pmid",
",",
"ta",
",",
"start",
",",
"end",
")",
")"
] | Prepare bc-evaluate gold file from annotations supplied by CHEMDNER. | [
"Prepare",
"bc",
"-",
"evaluate",
"gold",
"file",
"from",
"annotations",
"supplied",
"by",
"CHEMDNER",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/chemdner.py#L33-L38 |
6,475 | mcs07/ChemDataExtractor | chemdataextractor/cli/chemdner.py | prepare_tokens | def prepare_tokens(ctx, input, annotations, tout, lout):
"""Prepare tokenized and tagged corpus file from those supplied by CHEMDNER."""
click.echo('chemdataextractor.chemdner.prepare_tokens')
# Collect the annotations into a dict
anndict = defaultdict(list)
for line in annotations:
pmid, ta, start, end, text, category = line.strip().split('\t')
anndict[(pmid, ta)].append((int(start), int(end), text))
# Process the corpus
for line in input:
pmid, title, abstract = line.strip().split(u'\t')
for t, section, anns in [(Title(title), 'T', anndict.get((pmid, u'T'), [])), (Paragraph(abstract), u'A', anndict.get((pmid, u'A'), []))]:
# Write our tokens with POS and IOB tags
tagged = _prep_tags(t, anns)
for i, sentence in enumerate(tagged):
tout.write(u' '.join([u'/'.join([token, tag, label]) for token, tag, label in sentence]))
lout.write(u' '.join([u'/'.join([token, label]) for token, tag, label in sentence]))
tout.write(u'\n')
lout.write(u'\n')
tout.write(u'\n')
lout.write(u'\n') | python | def prepare_tokens(ctx, input, annotations, tout, lout):
"""Prepare tokenized and tagged corpus file from those supplied by CHEMDNER."""
click.echo('chemdataextractor.chemdner.prepare_tokens')
# Collect the annotations into a dict
anndict = defaultdict(list)
for line in annotations:
pmid, ta, start, end, text, category = line.strip().split('\t')
anndict[(pmid, ta)].append((int(start), int(end), text))
# Process the corpus
for line in input:
pmid, title, abstract = line.strip().split(u'\t')
for t, section, anns in [(Title(title), 'T', anndict.get((pmid, u'T'), [])), (Paragraph(abstract), u'A', anndict.get((pmid, u'A'), []))]:
# Write our tokens with POS and IOB tags
tagged = _prep_tags(t, anns)
for i, sentence in enumerate(tagged):
tout.write(u' '.join([u'/'.join([token, tag, label]) for token, tag, label in sentence]))
lout.write(u' '.join([u'/'.join([token, label]) for token, tag, label in sentence]))
tout.write(u'\n')
lout.write(u'\n')
tout.write(u'\n')
lout.write(u'\n') | [
"def",
"prepare_tokens",
"(",
"ctx",
",",
"input",
",",
"annotations",
",",
"tout",
",",
"lout",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.chemdner.prepare_tokens'",
")",
"# Collect the annotations into a dict",
"anndict",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"line",
"in",
"annotations",
":",
"pmid",
",",
"ta",
",",
"start",
",",
"end",
",",
"text",
",",
"category",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"anndict",
"[",
"(",
"pmid",
",",
"ta",
")",
"]",
".",
"append",
"(",
"(",
"int",
"(",
"start",
")",
",",
"int",
"(",
"end",
")",
",",
"text",
")",
")",
"# Process the corpus",
"for",
"line",
"in",
"input",
":",
"pmid",
",",
"title",
",",
"abstract",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"u'\\t'",
")",
"for",
"t",
",",
"section",
",",
"anns",
"in",
"[",
"(",
"Title",
"(",
"title",
")",
",",
"'T'",
",",
"anndict",
".",
"get",
"(",
"(",
"pmid",
",",
"u'T'",
")",
",",
"[",
"]",
")",
")",
",",
"(",
"Paragraph",
"(",
"abstract",
")",
",",
"u'A'",
",",
"anndict",
".",
"get",
"(",
"(",
"pmid",
",",
"u'A'",
")",
",",
"[",
"]",
")",
")",
"]",
":",
"# Write our tokens with POS and IOB tags",
"tagged",
"=",
"_prep_tags",
"(",
"t",
",",
"anns",
")",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"tagged",
")",
":",
"tout",
".",
"write",
"(",
"u' '",
".",
"join",
"(",
"[",
"u'/'",
".",
"join",
"(",
"[",
"token",
",",
"tag",
",",
"label",
"]",
")",
"for",
"token",
",",
"tag",
",",
"label",
"in",
"sentence",
"]",
")",
")",
"lout",
".",
"write",
"(",
"u' '",
".",
"join",
"(",
"[",
"u'/'",
".",
"join",
"(",
"[",
"token",
",",
"label",
"]",
")",
"for",
"token",
",",
"tag",
",",
"label",
"in",
"sentence",
"]",
")",
")",
"tout",
".",
"write",
"(",
"u'\\n'",
")",
"lout",
".",
"write",
"(",
"u'\\n'",
")",
"tout",
".",
"write",
"(",
"u'\\n'",
")",
"lout",
".",
"write",
"(",
"u'\\n'",
")"
] | Prepare tokenized and tagged corpus file from those supplied by CHEMDNER. | [
"Prepare",
"tokenized",
"and",
"tagged",
"corpus",
"file",
"from",
"those",
"supplied",
"by",
"CHEMDNER",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/chemdner.py#L47-L67 |
6,476 | mcs07/ChemDataExtractor | chemdataextractor/cli/chemdner.py | _prep_tags | def _prep_tags(t, annotations):
"""Apply IOB chemical entity tags and POS tags to text."""
tags = [['O' for _ in sent.tokens] for sent in t.sentences]
for start, end, text in annotations:
done_first = False
for i, sent in enumerate(t.sentences):
for j, token in enumerate(sent.tokens):
if start <= token.start < end or start < token.end <= end:
# Token start or end occurs within the annotation
tags[i][j] = 'I-CM' if done_first else 'B-CM'
done_first = True
tagged = [[(token[0], token[1], tags[i][j]) for j, token in enumerate(sentence.pos_tagged_tokens)] for i, sentence in enumerate(t.sentences)]
return tagged | python | def _prep_tags(t, annotations):
"""Apply IOB chemical entity tags and POS tags to text."""
tags = [['O' for _ in sent.tokens] for sent in t.sentences]
for start, end, text in annotations:
done_first = False
for i, sent in enumerate(t.sentences):
for j, token in enumerate(sent.tokens):
if start <= token.start < end or start < token.end <= end:
# Token start or end occurs within the annotation
tags[i][j] = 'I-CM' if done_first else 'B-CM'
done_first = True
tagged = [[(token[0], token[1], tags[i][j]) for j, token in enumerate(sentence.pos_tagged_tokens)] for i, sentence in enumerate(t.sentences)]
return tagged | [
"def",
"_prep_tags",
"(",
"t",
",",
"annotations",
")",
":",
"tags",
"=",
"[",
"[",
"'O'",
"for",
"_",
"in",
"sent",
".",
"tokens",
"]",
"for",
"sent",
"in",
"t",
".",
"sentences",
"]",
"for",
"start",
",",
"end",
",",
"text",
"in",
"annotations",
":",
"done_first",
"=",
"False",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"t",
".",
"sentences",
")",
":",
"for",
"j",
",",
"token",
"in",
"enumerate",
"(",
"sent",
".",
"tokens",
")",
":",
"if",
"start",
"<=",
"token",
".",
"start",
"<",
"end",
"or",
"start",
"<",
"token",
".",
"end",
"<=",
"end",
":",
"# Token start or end occurs within the annotation",
"tags",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"'I-CM'",
"if",
"done_first",
"else",
"'B-CM'",
"done_first",
"=",
"True",
"tagged",
"=",
"[",
"[",
"(",
"token",
"[",
"0",
"]",
",",
"token",
"[",
"1",
"]",
",",
"tags",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"for",
"j",
",",
"token",
"in",
"enumerate",
"(",
"sentence",
".",
"pos_tagged_tokens",
")",
"]",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"t",
".",
"sentences",
")",
"]",
"return",
"tagged"
] | Apply IOB chemical entity tags and POS tags to text. | [
"Apply",
"IOB",
"chemical",
"entity",
"tags",
"and",
"POS",
"tags",
"to",
"text",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/chemdner.py#L70-L82 |
6,477 | mcs07/ChemDataExtractor | chemdataextractor/cli/pos.py | train_all | def train_all(ctx, output):
"""Train POS tagger on WSJ, GENIA, and both. With and without cluster features."""
click.echo('chemdataextractor.pos.train_all')
click.echo('Output: %s' % output)
ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False)
ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True)
ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False)
ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True)
ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False)
ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True) | python | def train_all(ctx, output):
"""Train POS tagger on WSJ, GENIA, and both. With and without cluster features."""
click.echo('chemdataextractor.pos.train_all')
click.echo('Output: %s' % output)
ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False)
ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True)
ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False)
ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True)
ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False)
ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True) | [
"def",
"train_all",
"(",
"ctx",
",",
"output",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.pos.train_all'",
")",
"click",
".",
"echo",
"(",
"'Output: %s'",
"%",
"output",
")",
"ctx",
".",
"invoke",
"(",
"train",
",",
"output",
"=",
"'%s_wsj_nocluster.pickle'",
"%",
"output",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"train",
",",
"output",
"=",
"'%s_wsj.pickle'",
"%",
"output",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"True",
")",
"ctx",
".",
"invoke",
"(",
"train",
",",
"output",
"=",
"'%s_genia_nocluster.pickle'",
"%",
"output",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"train",
",",
"output",
"=",
"'%s_genia.pickle'",
"%",
"output",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"True",
")",
"ctx",
".",
"invoke",
"(",
"train",
",",
"output",
"=",
"'%s_wsj_genia_nocluster.pickle'",
"%",
"output",
",",
"corpus",
"=",
"'wsj+genia'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"train",
",",
"output",
"=",
"'%s_wsj_genia.pickle'",
"%",
"output",
",",
"corpus",
"=",
"'wsj+genia'",
",",
"clusters",
"=",
"True",
")"
] | Train POS tagger on WSJ, GENIA, and both. With and without cluster features. | [
"Train",
"POS",
"tagger",
"on",
"WSJ",
"GENIA",
"and",
"both",
".",
"With",
"and",
"without",
"cluster",
"features",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L35-L44 |
6,478 | mcs07/ChemDataExtractor | chemdataextractor/cli/pos.py | evaluate_all | def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True) | python | def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True) | [
"def",
"evaluate_all",
"(",
"ctx",
",",
"model",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.pos.evaluate_all'",
")",
"click",
".",
"echo",
"(",
"'Model: %s'",
"%",
"model",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj_nocluster.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj_nocluster.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"True",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"True",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_genia_nocluster.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_genia_nocluster.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_genia.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"True",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_genia.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"True",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj_genia_nocluster.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj_genia_nocluster.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"False",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj_genia.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'wsj'",
",",
"clusters",
"=",
"True",
")",
"ctx",
".",
"invoke",
"(",
"evaluate",
",",
"model",
"=",
"'%s_wsj_genia.pickle'",
"%",
"model",
",",
"corpus",
"=",
"'genia'",
",",
"clusters",
"=",
"True",
")"
] | Evaluate POS taggers on WSJ and GENIA. | [
"Evaluate",
"POS",
"taggers",
"on",
"WSJ",
"and",
"GENIA",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L50-L65 |
6,479 | mcs07/ChemDataExtractor | chemdataextractor/cli/pos.py | train | def train(ctx, output, corpus, clusters):
"""Train POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(clusters=clusters)
tagger.train(training_corpus, output) | python | def train(ctx, output, corpus, clusters):
"""Train POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(clusters=clusters)
tagger.train(training_corpus, output) | [
"def",
"train",
"(",
"ctx",
",",
"output",
",",
"corpus",
",",
"clusters",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.pos.train'",
")",
"click",
".",
"echo",
"(",
"'Output: %s'",
"%",
"output",
")",
"click",
".",
"echo",
"(",
"'Corpus: %s'",
"%",
"corpus",
")",
"click",
".",
"echo",
"(",
"'Clusters: %s'",
"%",
"clusters",
")",
"wsj_sents",
"=",
"[",
"]",
"genia_sents",
"=",
"[",
"]",
"if",
"corpus",
"==",
"'wsj'",
"or",
"corpus",
"==",
"'wsj+genia'",
":",
"wsj_sents",
"=",
"list",
"(",
"wsj_training",
".",
"tagged_sents",
"(",
")",
")",
"# For WSJ, remove all tokens with -NONE- tag",
"for",
"i",
",",
"wsj_sent",
"in",
"enumerate",
"(",
"wsj_sents",
")",
":",
"wsj_sents",
"[",
"i",
"]",
"=",
"[",
"t",
"for",
"t",
"in",
"wsj_sent",
"if",
"not",
"t",
"[",
"1",
"]",
"==",
"'-NONE-'",
"]",
"if",
"corpus",
"==",
"'genia'",
"or",
"corpus",
"==",
"'wsj+genia'",
":",
"genia_sents",
"=",
"list",
"(",
"genia_training",
".",
"tagged_sents",
"(",
")",
")",
"# Translate GENIA",
"for",
"i",
",",
"genia_sent",
"in",
"enumerate",
"(",
"genia_sents",
")",
":",
"for",
"j",
",",
"(",
"token",
",",
"tag",
")",
"in",
"enumerate",
"(",
"genia_sent",
")",
":",
"if",
"tag",
"==",
"'('",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'-LRB-'",
")",
"# ( to -RLB- (also do for evaluation)",
"elif",
"tag",
"==",
"')'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'-RRB-'",
")",
"# ) to -RRB- (also do for evaluation)",
"elif",
"tag",
"==",
"'CT'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'DT'",
")",
"# Typo?",
"elif",
"tag",
"==",
"'XT'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'DT'",
")",
"# Typo?",
"elif",
"tag",
"==",
"'-'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"':'",
")",
"# Single hyphen character for dash",
"elif",
"tag",
"==",
"'N'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'NN'",
")",
"# Typo?",
"elif",
"tag",
"==",
"'PP'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'PRP'",
")",
"# Typo?",
"elif",
"tag",
"==",
"''",
"and",
"token",
"==",
"')'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'-RRB-'",
")",
"# Typo?",
"elif",
"tag",
"==",
"''",
"and",
"token",
"==",
"'IFN-gamma'",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'NN'",
")",
"# Typo?",
"elif",
"'|'",
"in",
"tag",
":",
"genia_sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"tag",
".",
"split",
"(",
"'|'",
")",
"[",
"0",
"]",
")",
"# If contains |, choose first part",
"# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)",
"genia_sents",
"[",
"i",
"]",
"=",
"[",
"t",
"for",
"t",
"in",
"genia_sent",
"if",
"t",
"[",
"1",
"]",
"in",
"TAGS",
"]",
"if",
"corpus",
"==",
"'wsj'",
":",
"training_corpus",
"=",
"wsj_sents",
"elif",
"corpus",
"==",
"'genia'",
":",
"training_corpus",
"=",
"genia_sents",
"elif",
"corpus",
"==",
"'wsj+genia'",
":",
"training_corpus",
"=",
"wsj_sents",
"+",
"genia_sents",
"else",
":",
"raise",
"click",
".",
"ClickException",
"(",
"'Invalid corpus'",
")",
"tagger",
"=",
"ChemCrfPosTagger",
"(",
"clusters",
"=",
"clusters",
")",
"tagger",
".",
"train",
"(",
"training_corpus",
",",
"output",
")"
] | Train POS Tagger. | [
"Train",
"POS",
"Tagger",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L73-L127 |
6,480 | mcs07/ChemDataExtractor | chemdataextractor/cli/pos.py | evaluate | def evaluate(ctx, model, corpus, clusters):
"""Evaluate performance of POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
sents[i][j] = (token, '-LRB-')
elif tag == ')':
sents[i][j] = (token, '-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(model=model, clusters=clusters)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy)) | python | def evaluate(ctx, model, corpus, clusters):
"""Evaluate performance of POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
sents[i][j] = (token, '-LRB-')
elif tag == ')':
sents[i][j] = (token, '-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(model=model, clusters=clusters)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy)) | [
"def",
"evaluate",
"(",
"ctx",
",",
"model",
",",
"corpus",
",",
"clusters",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.pos.evaluate'",
")",
"if",
"corpus",
"==",
"'wsj'",
":",
"evaluation",
"=",
"wsj_evaluation",
"sents",
"=",
"list",
"(",
"evaluation",
".",
"tagged_sents",
"(",
")",
")",
"for",
"i",
",",
"wsj_sent",
"in",
"enumerate",
"(",
"sents",
")",
":",
"sents",
"[",
"i",
"]",
"=",
"[",
"t",
"for",
"t",
"in",
"wsj_sent",
"if",
"not",
"t",
"[",
"1",
"]",
"==",
"'-NONE-'",
"]",
"elif",
"corpus",
"==",
"'genia'",
":",
"evaluation",
"=",
"genia_evaluation",
"sents",
"=",
"list",
"(",
"evaluation",
".",
"tagged_sents",
"(",
")",
")",
"# Translate GENIA bracket tags",
"for",
"i",
",",
"genia_sent",
"in",
"enumerate",
"(",
"sents",
")",
":",
"for",
"j",
",",
"(",
"token",
",",
"tag",
")",
"in",
"enumerate",
"(",
"genia_sent",
")",
":",
"if",
"tag",
"==",
"'('",
":",
"sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'-LRB-'",
")",
"elif",
"tag",
"==",
"')'",
":",
"sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"'-RRB-'",
")",
"else",
":",
"raise",
"click",
".",
"ClickException",
"(",
"'Invalid corpus'",
")",
"tagger",
"=",
"ChemCrfPosTagger",
"(",
"model",
"=",
"model",
",",
"clusters",
"=",
"clusters",
")",
"accuracy",
"=",
"tagger",
".",
"evaluate",
"(",
"sents",
")",
"click",
".",
"echo",
"(",
"'%s on %s: %s'",
"%",
"(",
"model",
",",
"evaluation",
",",
"accuracy",
")",
")"
] | Evaluate performance of POS Tagger. | [
"Evaluate",
"performance",
"of",
"POS",
"Tagger",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L135-L157 |
6,481 | mcs07/ChemDataExtractor | chemdataextractor/cli/pos.py | evaluate_perceptron | def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy)) | python | def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy)) | [
"def",
"evaluate_perceptron",
"(",
"ctx",
",",
"model",
",",
"corpus",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.pos.evaluate'",
")",
"if",
"corpus",
"==",
"'wsj'",
":",
"evaluation",
"=",
"wsj_evaluation",
"sents",
"=",
"list",
"(",
"evaluation",
".",
"tagged_sents",
"(",
")",
")",
"for",
"i",
",",
"wsj_sent",
"in",
"enumerate",
"(",
"sents",
")",
":",
"sents",
"[",
"i",
"]",
"=",
"[",
"t",
"for",
"t",
"in",
"wsj_sent",
"if",
"not",
"t",
"[",
"1",
"]",
"==",
"u'-NONE-'",
"]",
"elif",
"corpus",
"==",
"'genia'",
":",
"evaluation",
"=",
"genia_evaluation",
"sents",
"=",
"list",
"(",
"evaluation",
".",
"tagged_sents",
"(",
")",
")",
"# Translate GENIA bracket tags",
"for",
"i",
",",
"genia_sent",
"in",
"enumerate",
"(",
"sents",
")",
":",
"for",
"j",
",",
"(",
"token",
",",
"tag",
")",
"in",
"enumerate",
"(",
"genia_sent",
")",
":",
"if",
"tag",
"==",
"u'('",
":",
"sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"u'-LRB-'",
")",
"elif",
"tag",
"==",
"u')'",
":",
"sents",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"(",
"token",
",",
"u'-RRB-'",
")",
"else",
":",
"raise",
"click",
".",
"ClickException",
"(",
"'Invalid corpus'",
")",
"tagger",
"=",
"ChemApPosTagger",
"(",
"model",
"=",
"model",
")",
"accuracy",
"=",
"tagger",
".",
"evaluate",
"(",
"sents",
")",
"click",
".",
"echo",
"(",
"'%s on %s: %s'",
"%",
"(",
"model",
",",
"evaluation",
",",
"accuracy",
")",
")"
] | Evaluate performance of Averaged Perceptron POS Tagger. | [
"Evaluate",
"performance",
"of",
"Averaged",
"Perceptron",
"POS",
"Tagger",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L227-L249 |
6,482 | mcs07/ChemDataExtractor | chemdataextractor/cli/pos.py | tag | def tag(ctx, input, output):
"""Output POS-tagged tokens."""
log.info('chemdataextractor.pos.tag')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
if isinstance(element, Text):
for sentence in element.sentences:
output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))
output.write(u'\n') | python | def tag(ctx, input, output):
"""Output POS-tagged tokens."""
log.info('chemdataextractor.pos.tag')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
if isinstance(element, Text):
for sentence in element.sentences:
output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))
output.write(u'\n') | [
"def",
"tag",
"(",
"ctx",
",",
"input",
",",
"output",
")",
":",
"log",
".",
"info",
"(",
"'chemdataextractor.pos.tag'",
")",
"log",
".",
"info",
"(",
"'Reading %s'",
"%",
"input",
".",
"name",
")",
"doc",
"=",
"Document",
".",
"from_file",
"(",
"input",
")",
"for",
"element",
"in",
"doc",
".",
"elements",
":",
"if",
"isinstance",
"(",
"element",
",",
"Text",
")",
":",
"for",
"sentence",
"in",
"element",
".",
"sentences",
":",
"output",
".",
"write",
"(",
"u' '",
".",
"join",
"(",
"u'/'",
".",
"join",
"(",
"[",
"token",
",",
"tag",
"]",
")",
"for",
"token",
",",
"tag",
"in",
"sentence",
".",
"pos_tagged_tokens",
")",
")",
"output",
".",
"write",
"(",
"u'\\n'",
")"
] | Output POS-tagged tokens. | [
"Output",
"POS",
"-",
"tagged",
"tokens",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L256-L265 |
6,483 | mcs07/ChemDataExtractor | chemdataextractor/scrape/scraper.py | GetRequester.make_request | def make_request(self, session, url, **kwargs):
"""Make a HTTP GET request.
:param url: The URL to get.
:returns: The response to the request.
:rtype: requests.Response
"""
log.debug('Making request: GET %s %s' % (url, kwargs))
return session.get(url, **kwargs) | python | def make_request(self, session, url, **kwargs):
"""Make a HTTP GET request.
:param url: The URL to get.
:returns: The response to the request.
:rtype: requests.Response
"""
log.debug('Making request: GET %s %s' % (url, kwargs))
return session.get(url, **kwargs) | [
"def",
"make_request",
"(",
"self",
",",
"session",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"debug",
"(",
"'Making request: GET %s %s'",
"%",
"(",
"url",
",",
"kwargs",
")",
")",
"return",
"session",
".",
"get",
"(",
"url",
",",
"*",
"*",
"kwargs",
")"
] | Make a HTTP GET request.
:param url: The URL to get.
:returns: The response to the request.
:rtype: requests.Response | [
"Make",
"a",
"HTTP",
"GET",
"request",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/scraper.py#L45-L53 |
6,484 | mcs07/ChemDataExtractor | chemdataextractor/scrape/scraper.py | PostRequester.make_request | def make_request(self, session, url, **kwargs):
"""Make a HTTP POST request.
:param url: The URL to post to.
:param data: The data to post.
:returns: The response to the request.
:rtype: requests.Response
"""
log.debug('Making request: POST %s %s' % (url, kwargs))
return session.post(url, **kwargs) | python | def make_request(self, session, url, **kwargs):
"""Make a HTTP POST request.
:param url: The URL to post to.
:param data: The data to post.
:returns: The response to the request.
:rtype: requests.Response
"""
log.debug('Making request: POST %s %s' % (url, kwargs))
return session.post(url, **kwargs) | [
"def",
"make_request",
"(",
"self",
",",
"session",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"debug",
"(",
"'Making request: POST %s %s'",
"%",
"(",
"url",
",",
"kwargs",
")",
")",
"return",
"session",
".",
"post",
"(",
"url",
",",
"*",
"*",
"kwargs",
")"
] | Make a HTTP POST request.
:param url: The URL to post to.
:param data: The data to post.
:returns: The response to the request.
:rtype: requests.Response | [
"Make",
"a",
"HTTP",
"POST",
"request",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/scraper.py#L58-L67 |
6,485 | mcs07/ChemDataExtractor | chemdataextractor/scrape/scraper.py | UrlScraper.run | def run(self, url):
"""Request URL, scrape response and return an EntityList."""
url = self.process_url(url)
if not url:
return
response = self.make_request(self.http, url)
selector = self.process_response(response)
entities = []
for root in self.get_roots(selector):
entity = self.entity(root)
entity = self.process_entity(entity)
if entity:
entities.append(entity)
return EntityList(*entities) | python | def run(self, url):
"""Request URL, scrape response and return an EntityList."""
url = self.process_url(url)
if not url:
return
response = self.make_request(self.http, url)
selector = self.process_response(response)
entities = []
for root in self.get_roots(selector):
entity = self.entity(root)
entity = self.process_entity(entity)
if entity:
entities.append(entity)
return EntityList(*entities) | [
"def",
"run",
"(",
"self",
",",
"url",
")",
":",
"url",
"=",
"self",
".",
"process_url",
"(",
"url",
")",
"if",
"not",
"url",
":",
"return",
"response",
"=",
"self",
".",
"make_request",
"(",
"self",
".",
"http",
",",
"url",
")",
"selector",
"=",
"self",
".",
"process_response",
"(",
"response",
")",
"entities",
"=",
"[",
"]",
"for",
"root",
"in",
"self",
".",
"get_roots",
"(",
"selector",
")",
":",
"entity",
"=",
"self",
".",
"entity",
"(",
"root",
")",
"entity",
"=",
"self",
".",
"process_entity",
"(",
"entity",
")",
"if",
"entity",
":",
"entities",
".",
"append",
"(",
"entity",
")",
"return",
"EntityList",
"(",
"*",
"entities",
")"
] | Request URL, scrape response and return an EntityList. | [
"Request",
"URL",
"scrape",
"response",
"and",
"return",
"an",
"EntityList",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/scraper.py#L77-L90 |
6,486 | mcs07/ChemDataExtractor | chemdataextractor/scrape/clean.py | Cleaner.clean_html | def clean_html(self, html):
"""Apply ``Cleaner`` to HTML string or document and return a cleaned string or document."""
result_type = type(html)
if isinstance(html, six.string_types):
doc = html_fromstring(html)
else:
doc = copy.deepcopy(html)
self(doc)
if issubclass(result_type, six.binary_type):
return tostring(doc, encoding='utf-8')
elif issubclass(result_type, six.text_type):
return tostring(doc, encoding='unicode')
else:
return doc | python | def clean_html(self, html):
"""Apply ``Cleaner`` to HTML string or document and return a cleaned string or document."""
result_type = type(html)
if isinstance(html, six.string_types):
doc = html_fromstring(html)
else:
doc = copy.deepcopy(html)
self(doc)
if issubclass(result_type, six.binary_type):
return tostring(doc, encoding='utf-8')
elif issubclass(result_type, six.text_type):
return tostring(doc, encoding='unicode')
else:
return doc | [
"def",
"clean_html",
"(",
"self",
",",
"html",
")",
":",
"result_type",
"=",
"type",
"(",
"html",
")",
"if",
"isinstance",
"(",
"html",
",",
"six",
".",
"string_types",
")",
":",
"doc",
"=",
"html_fromstring",
"(",
"html",
")",
"else",
":",
"doc",
"=",
"copy",
".",
"deepcopy",
"(",
"html",
")",
"self",
"(",
"doc",
")",
"if",
"issubclass",
"(",
"result_type",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"tostring",
"(",
"doc",
",",
"encoding",
"=",
"'utf-8'",
")",
"elif",
"issubclass",
"(",
"result_type",
",",
"six",
".",
"text_type",
")",
":",
"return",
"tostring",
"(",
"doc",
",",
"encoding",
"=",
"'unicode'",
")",
"else",
":",
"return",
"doc"
] | Apply ``Cleaner`` to HTML string or document and return a cleaned string or document. | [
"Apply",
"Cleaner",
"to",
"HTML",
"string",
"or",
"document",
"and",
"return",
"a",
"cleaned",
"string",
"or",
"document",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/clean.py#L159-L172 |
6,487 | mcs07/ChemDataExtractor | chemdataextractor/scrape/clean.py | Cleaner.clean_markup | def clean_markup(self, markup, parser=None):
"""Apply ``Cleaner`` to markup string or document and return a cleaned string or document."""
result_type = type(markup)
if isinstance(markup, six.string_types):
doc = fromstring(markup, parser=parser)
else:
doc = copy.deepcopy(markup)
self(doc)
if issubclass(result_type, six.binary_type):
return tostring(doc, encoding='utf-8')
elif issubclass(result_type, six.text_type):
return tostring(doc, encoding='unicode')
else:
return doc | python | def clean_markup(self, markup, parser=None):
"""Apply ``Cleaner`` to markup string or document and return a cleaned string or document."""
result_type = type(markup)
if isinstance(markup, six.string_types):
doc = fromstring(markup, parser=parser)
else:
doc = copy.deepcopy(markup)
self(doc)
if issubclass(result_type, six.binary_type):
return tostring(doc, encoding='utf-8')
elif issubclass(result_type, six.text_type):
return tostring(doc, encoding='unicode')
else:
return doc | [
"def",
"clean_markup",
"(",
"self",
",",
"markup",
",",
"parser",
"=",
"None",
")",
":",
"result_type",
"=",
"type",
"(",
"markup",
")",
"if",
"isinstance",
"(",
"markup",
",",
"six",
".",
"string_types",
")",
":",
"doc",
"=",
"fromstring",
"(",
"markup",
",",
"parser",
"=",
"parser",
")",
"else",
":",
"doc",
"=",
"copy",
".",
"deepcopy",
"(",
"markup",
")",
"self",
"(",
"doc",
")",
"if",
"issubclass",
"(",
"result_type",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"tostring",
"(",
"doc",
",",
"encoding",
"=",
"'utf-8'",
")",
"elif",
"issubclass",
"(",
"result_type",
",",
"six",
".",
"text_type",
")",
":",
"return",
"tostring",
"(",
"doc",
",",
"encoding",
"=",
"'unicode'",
")",
"else",
":",
"return",
"doc"
] | Apply ``Cleaner`` to markup string or document and return a cleaned string or document. | [
"Apply",
"Cleaner",
"to",
"markup",
"string",
"or",
"document",
"and",
"return",
"a",
"cleaned",
"string",
"or",
"document",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/clean.py#L174-L187 |
6,488 | mcs07/ChemDataExtractor | chemdataextractor/text/processors.py | floats | def floats(s):
"""Convert string to float. Handles more string formats that the standard python conversion."""
try:
return float(s)
except ValueError:
s = re.sub(r'(\d)\s*\(\d+(\.\d+)?\)', r'\1', s) # Remove bracketed numbers from end
s = re.sub(r'(\d)\s*±\s*\d+(\.\d+)?', r'\1', s) # Remove uncertainties from end
s = s.rstrip('\'"+-=<>/,.:;!?)]}…∼~≈×*_≥≤') # Remove trailing punctuation
s = s.lstrip('\'"+=<>/([{∼~≈×*_≥≤£$€#§') # Remove leading punctuation
s = s.replace(',', '') # Remove commas
s = ''.join(s.split()) # Strip whitespace
s = re.sub(r'(\d)\s*[×x]\s*10\^?(-?\d)', r'\1e\2', s) # Convert scientific notation
return float(s) | python | def floats(s):
"""Convert string to float. Handles more string formats that the standard python conversion."""
try:
return float(s)
except ValueError:
s = re.sub(r'(\d)\s*\(\d+(\.\d+)?\)', r'\1', s) # Remove bracketed numbers from end
s = re.sub(r'(\d)\s*±\s*\d+(\.\d+)?', r'\1', s) # Remove uncertainties from end
s = s.rstrip('\'"+-=<>/,.:;!?)]}…∼~≈×*_≥≤') # Remove trailing punctuation
s = s.lstrip('\'"+=<>/([{∼~≈×*_≥≤£$€#§') # Remove leading punctuation
s = s.replace(',', '') # Remove commas
s = ''.join(s.split()) # Strip whitespace
s = re.sub(r'(\d)\s*[×x]\s*10\^?(-?\d)', r'\1e\2', s) # Convert scientific notation
return float(s) | [
"def",
"floats",
"(",
"s",
")",
":",
"try",
":",
"return",
"float",
"(",
"s",
")",
"except",
"ValueError",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"r'(\\d)\\s*\\(\\d+(\\.\\d+)?\\)'",
",",
"r'\\1'",
",",
"s",
")",
"# Remove bracketed numbers from end",
"s",
"=",
"re",
".",
"sub",
"(",
"r'(\\d)\\s*±\\s*\\d+(\\.\\d+)?',",
" ",
"'\\1',",
" ",
")",
" ",
" Remove uncertainties from end",
"s",
"=",
"s",
".",
"rstrip",
"(",
"'\\'\"+-=<>/,.:;!?)]}…∼~≈×*_≥≤') ",
" ",
"ailing punctuation",
"s",
"=",
"s",
".",
"lstrip",
"(",
"'\\'\"+=<>/([{∼~≈×*_≥≤£$€#§') ",
" ",
"ing punctuation",
"s",
"=",
"s",
".",
"replace",
"(",
"','",
",",
"''",
")",
"# Remove commas",
"s",
"=",
"''",
".",
"join",
"(",
"s",
".",
"split",
"(",
")",
")",
"# Strip whitespace",
"s",
"=",
"re",
".",
"sub",
"(",
"r'(\\d)\\s*[×x]\\s*10\\^?(-?\\d)',",
" ",
"'\\1e\\2',",
" ",
")",
" ",
" Convert scientific notation",
"return",
"float",
"(",
"s",
")"
] | Convert string to float. Handles more string formats that the standard python conversion. | [
"Convert",
"string",
"to",
"float",
".",
"Handles",
"more",
"string",
"formats",
"that",
"the",
"standard",
"python",
"conversion",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/processors.py#L111-L123 |
6,489 | mcs07/ChemDataExtractor | chemdataextractor/text/processors.py | strip_querystring | def strip_querystring(url):
"""Remove the querystring from the end of a URL."""
p = six.moves.urllib.parse.urlparse(url)
return p.scheme + "://" + p.netloc + p.path | python | def strip_querystring(url):
"""Remove the querystring from the end of a URL."""
p = six.moves.urllib.parse.urlparse(url)
return p.scheme + "://" + p.netloc + p.path | [
"def",
"strip_querystring",
"(",
"url",
")",
":",
"p",
"=",
"six",
".",
"moves",
".",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"return",
"p",
".",
"scheme",
"+",
"\"://\"",
"+",
"p",
".",
"netloc",
"+",
"p",
".",
"path"
] | Remove the querystring from the end of a URL. | [
"Remove",
"the",
"querystring",
"from",
"the",
"end",
"of",
"a",
"URL",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/processors.py#L126-L129 |
6,490 | mcs07/ChemDataExtractor | chemdataextractor/text/processors.py | extract_emails | def extract_emails(text):
"""Return a list of email addresses extracted from the string."""
text = text.replace(u'\u2024', '.')
emails = []
for m in EMAIL_RE.findall(text):
emails.append(m[0])
return emails | python | def extract_emails(text):
"""Return a list of email addresses extracted from the string."""
text = text.replace(u'\u2024', '.')
emails = []
for m in EMAIL_RE.findall(text):
emails.append(m[0])
return emails | [
"def",
"extract_emails",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"u'\\u2024'",
",",
"'.'",
")",
"emails",
"=",
"[",
"]",
"for",
"m",
"in",
"EMAIL_RE",
".",
"findall",
"(",
"text",
")",
":",
"emails",
".",
"append",
"(",
"m",
"[",
"0",
"]",
")",
"return",
"emails"
] | Return a list of email addresses extracted from the string. | [
"Return",
"a",
"list",
"of",
"email",
"addresses",
"extracted",
"from",
"the",
"string",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/processors.py#L159-L165 |
6,491 | mcs07/ChemDataExtractor | chemdataextractor/text/processors.py | unapostrophe | def unapostrophe(text):
"""Strip apostrophe and 's' from the end of a string."""
text = re.sub(r'[%s]s?$' % ''.join(APOSTROPHES), '', text)
return text | python | def unapostrophe(text):
"""Strip apostrophe and 's' from the end of a string."""
text = re.sub(r'[%s]s?$' % ''.join(APOSTROPHES), '', text)
return text | [
"def",
"unapostrophe",
"(",
"text",
")",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"r'[%s]s?$'",
"%",
"''",
".",
"join",
"(",
"APOSTROPHES",
")",
",",
"''",
",",
"text",
")",
"return",
"text"
] | Strip apostrophe and 's' from the end of a string. | [
"Strip",
"apostrophe",
"and",
"s",
"from",
"the",
"end",
"of",
"a",
"string",
"."
] | 349a3bea965f2073141d62043b89319222e46af1 | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/processors.py#L168-L171 |
6,492 | linuxsoftware/ls.joyous | ls/joyous/utils/telltime.py | getLocalTime | def getLocalTime(date, time, *args, **kwargs):
"""
Get the time in the local timezone from date and time
"""
if time is not None:
return getLocalDateAndTime(date, time, *args, **kwargs)[1] | python | def getLocalTime(date, time, *args, **kwargs):
"""
Get the time in the local timezone from date and time
"""
if time is not None:
return getLocalDateAndTime(date, time, *args, **kwargs)[1] | [
"def",
"getLocalTime",
"(",
"date",
",",
"time",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"time",
"is",
"not",
"None",
":",
"return",
"getLocalDateAndTime",
"(",
"date",
",",
"time",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"[",
"1",
"]"
] | Get the time in the local timezone from date and time | [
"Get",
"the",
"time",
"in",
"the",
"local",
"timezone",
"from",
"date",
"and",
"time"
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/telltime.py#L19-L24 |
6,493 | linuxsoftware/ls.joyous | ls/joyous/utils/telltime.py | getLocalDateAndTime | def getLocalDateAndTime(date, time, *args, **kwargs):
"""
Get the date and time in the local timezone from date and optionally time
"""
localDt = getLocalDatetime(date, time, *args, **kwargs)
if time is not None:
return (localDt.date(), localDt.timetz())
else:
return (localDt.date(), None) | python | def getLocalDateAndTime(date, time, *args, **kwargs):
"""
Get the date and time in the local timezone from date and optionally time
"""
localDt = getLocalDatetime(date, time, *args, **kwargs)
if time is not None:
return (localDt.date(), localDt.timetz())
else:
return (localDt.date(), None) | [
"def",
"getLocalDateAndTime",
"(",
"date",
",",
"time",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"localDt",
"=",
"getLocalDatetime",
"(",
"date",
",",
"time",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"time",
"is",
"not",
"None",
":",
"return",
"(",
"localDt",
".",
"date",
"(",
")",
",",
"localDt",
".",
"timetz",
"(",
")",
")",
"else",
":",
"return",
"(",
"localDt",
".",
"date",
"(",
")",
",",
"None",
")"
] | Get the date and time in the local timezone from date and optionally time | [
"Get",
"the",
"date",
"and",
"time",
"in",
"the",
"local",
"timezone",
"from",
"date",
"and",
"optionally",
"time"
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/telltime.py#L26-L34 |
6,494 | linuxsoftware/ls.joyous | ls/joyous/utils/telltime.py | getLocalDatetime | def getLocalDatetime(date, time, tz=None, timeDefault=dt.time.max):
"""
Get a datetime in the local timezone from date and optionally time
"""
localTZ = timezone.get_current_timezone()
if tz is None or tz == localTZ:
localDt = getAwareDatetime(date, time, tz, timeDefault)
else:
# create in event's time zone
eventDt = getAwareDatetime(date, time, tz, timeDefault)
# convert to local time zone
localDt = eventDt.astimezone(localTZ)
if time is None:
localDt = getAwareDatetime(localDt.date(), None, localTZ, timeDefault)
return localDt | python | def getLocalDatetime(date, time, tz=None, timeDefault=dt.time.max):
"""
Get a datetime in the local timezone from date and optionally time
"""
localTZ = timezone.get_current_timezone()
if tz is None or tz == localTZ:
localDt = getAwareDatetime(date, time, tz, timeDefault)
else:
# create in event's time zone
eventDt = getAwareDatetime(date, time, tz, timeDefault)
# convert to local time zone
localDt = eventDt.astimezone(localTZ)
if time is None:
localDt = getAwareDatetime(localDt.date(), None, localTZ, timeDefault)
return localDt | [
"def",
"getLocalDatetime",
"(",
"date",
",",
"time",
",",
"tz",
"=",
"None",
",",
"timeDefault",
"=",
"dt",
".",
"time",
".",
"max",
")",
":",
"localTZ",
"=",
"timezone",
".",
"get_current_timezone",
"(",
")",
"if",
"tz",
"is",
"None",
"or",
"tz",
"==",
"localTZ",
":",
"localDt",
"=",
"getAwareDatetime",
"(",
"date",
",",
"time",
",",
"tz",
",",
"timeDefault",
")",
"else",
":",
"# create in event's time zone",
"eventDt",
"=",
"getAwareDatetime",
"(",
"date",
",",
"time",
",",
"tz",
",",
"timeDefault",
")",
"# convert to local time zone",
"localDt",
"=",
"eventDt",
".",
"astimezone",
"(",
"localTZ",
")",
"if",
"time",
"is",
"None",
":",
"localDt",
"=",
"getAwareDatetime",
"(",
"localDt",
".",
"date",
"(",
")",
",",
"None",
",",
"localTZ",
",",
"timeDefault",
")",
"return",
"localDt"
] | Get a datetime in the local timezone from date and optionally time | [
"Get",
"a",
"datetime",
"in",
"the",
"local",
"timezone",
"from",
"date",
"and",
"optionally",
"time"
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/telltime.py#L36-L50 |
6,495 | linuxsoftware/ls.joyous | ls/joyous/utils/telltime.py | getAwareDatetime | def getAwareDatetime(date, time, tz, timeDefault=dt.time.max):
"""
Get a datetime in the given timezone from date and optionally time.
If time is not given it will default to timeDefault if that is given
or if not then to the end of the day.
"""
if time is None:
time = timeDefault
datetime = dt.datetime.combine(date, time)
# arbitary rule to handle DST transitions:
# if daylight savings causes an error then use standard time
datetime = timezone.make_aware(datetime, tz, is_dst=False)
return datetime | python | def getAwareDatetime(date, time, tz, timeDefault=dt.time.max):
"""
Get a datetime in the given timezone from date and optionally time.
If time is not given it will default to timeDefault if that is given
or if not then to the end of the day.
"""
if time is None:
time = timeDefault
datetime = dt.datetime.combine(date, time)
# arbitary rule to handle DST transitions:
# if daylight savings causes an error then use standard time
datetime = timezone.make_aware(datetime, tz, is_dst=False)
return datetime | [
"def",
"getAwareDatetime",
"(",
"date",
",",
"time",
",",
"tz",
",",
"timeDefault",
"=",
"dt",
".",
"time",
".",
"max",
")",
":",
"if",
"time",
"is",
"None",
":",
"time",
"=",
"timeDefault",
"datetime",
"=",
"dt",
".",
"datetime",
".",
"combine",
"(",
"date",
",",
"time",
")",
"# arbitary rule to handle DST transitions:",
"# if daylight savings causes an error then use standard time",
"datetime",
"=",
"timezone",
".",
"make_aware",
"(",
"datetime",
",",
"tz",
",",
"is_dst",
"=",
"False",
")",
"return",
"datetime"
] | Get a datetime in the given timezone from date and optionally time.
If time is not given it will default to timeDefault if that is given
or if not then to the end of the day. | [
"Get",
"a",
"datetime",
"in",
"the",
"given",
"timezone",
"from",
"date",
"and",
"optionally",
"time",
".",
"If",
"time",
"is",
"not",
"given",
"it",
"will",
"default",
"to",
"timeDefault",
"if",
"that",
"is",
"given",
"or",
"if",
"not",
"then",
"to",
"the",
"end",
"of",
"the",
"day",
"."
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/telltime.py#L52-L64 |
6,496 | linuxsoftware/ls.joyous | ls/joyous/utils/weeks.py | _iso_num_weeks | def _iso_num_weeks(iso_year):
"Get the number of ISO-weeks in this year"
year_start = _iso_year_start(iso_year)
next_year_start = _iso_year_start(iso_year+1)
year_num_weeks = ((next_year_start - year_start).days) // 7
return year_num_weeks | python | def _iso_num_weeks(iso_year):
"Get the number of ISO-weeks in this year"
year_start = _iso_year_start(iso_year)
next_year_start = _iso_year_start(iso_year+1)
year_num_weeks = ((next_year_start - year_start).days) // 7
return year_num_weeks | [
"def",
"_iso_num_weeks",
"(",
"iso_year",
")",
":",
"year_start",
"=",
"_iso_year_start",
"(",
"iso_year",
")",
"next_year_start",
"=",
"_iso_year_start",
"(",
"iso_year",
"+",
"1",
")",
"year_num_weeks",
"=",
"(",
"(",
"next_year_start",
"-",
"year_start",
")",
".",
"days",
")",
"//",
"7",
"return",
"year_num_weeks"
] | Get the number of ISO-weeks in this year | [
"Get",
"the",
"number",
"of",
"ISO",
"-",
"weeks",
"in",
"this",
"year"
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/weeks.py#L31-L36 |
6,497 | linuxsoftware/ls.joyous | ls/joyous/utils/weeks.py | _iso_info | def _iso_info(iso_year, iso_week):
"Give all the iso info we need from one calculation"
prev_year_start = _iso_year_start(iso_year-1)
year_start = _iso_year_start(iso_year)
next_year_start = _iso_year_start(iso_year+1)
first_day = year_start + dt.timedelta(weeks=iso_week-1)
last_day = first_day + dt.timedelta(days=6)
prev_year_num_weeks = ((year_start - prev_year_start).days) // 7
year_num_weeks = ((next_year_start - year_start).days) // 7
return (first_day, last_day, prev_year_num_weeks, year_num_weeks) | python | def _iso_info(iso_year, iso_week):
"Give all the iso info we need from one calculation"
prev_year_start = _iso_year_start(iso_year-1)
year_start = _iso_year_start(iso_year)
next_year_start = _iso_year_start(iso_year+1)
first_day = year_start + dt.timedelta(weeks=iso_week-1)
last_day = first_day + dt.timedelta(days=6)
prev_year_num_weeks = ((year_start - prev_year_start).days) // 7
year_num_weeks = ((next_year_start - year_start).days) // 7
return (first_day, last_day, prev_year_num_weeks, year_num_weeks) | [
"def",
"_iso_info",
"(",
"iso_year",
",",
"iso_week",
")",
":",
"prev_year_start",
"=",
"_iso_year_start",
"(",
"iso_year",
"-",
"1",
")",
"year_start",
"=",
"_iso_year_start",
"(",
"iso_year",
")",
"next_year_start",
"=",
"_iso_year_start",
"(",
"iso_year",
"+",
"1",
")",
"first_day",
"=",
"year_start",
"+",
"dt",
".",
"timedelta",
"(",
"weeks",
"=",
"iso_week",
"-",
"1",
")",
"last_day",
"=",
"first_day",
"+",
"dt",
".",
"timedelta",
"(",
"days",
"=",
"6",
")",
"prev_year_num_weeks",
"=",
"(",
"(",
"year_start",
"-",
"prev_year_start",
")",
".",
"days",
")",
"//",
"7",
"year_num_weeks",
"=",
"(",
"(",
"next_year_start",
"-",
"year_start",
")",
".",
"days",
")",
"//",
"7",
"return",
"(",
"first_day",
",",
"last_day",
",",
"prev_year_num_weeks",
",",
"year_num_weeks",
")"
] | Give all the iso info we need from one calculation | [
"Give",
"all",
"the",
"iso",
"info",
"we",
"need",
"from",
"one",
"calculation"
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/weeks.py#L38-L47 |
6,498 | linuxsoftware/ls.joyous | ls/joyous/utils/weeks.py | _iso_week_of_month | def _iso_week_of_month(date_value):
"0-starting index which ISO-week in the month this date is"
weekday_of_first = date_value.replace(day=1).weekday()
return (date_value.day + weekday_of_first - 1) // 7 | python | def _iso_week_of_month(date_value):
"0-starting index which ISO-week in the month this date is"
weekday_of_first = date_value.replace(day=1).weekday()
return (date_value.day + weekday_of_first - 1) // 7 | [
"def",
"_iso_week_of_month",
"(",
"date_value",
")",
":",
"weekday_of_first",
"=",
"date_value",
".",
"replace",
"(",
"day",
"=",
"1",
")",
".",
"weekday",
"(",
")",
"return",
"(",
"date_value",
".",
"day",
"+",
"weekday_of_first",
"-",
"1",
")",
"//",
"7"
] | 0-starting index which ISO-week in the month this date is | [
"0",
"-",
"starting",
"index",
"which",
"ISO",
"-",
"week",
"in",
"the",
"month",
"this",
"date",
"is"
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/weeks.py#L53-L56 |
6,499 | linuxsoftware/ls.joyous | ls/joyous/utils/weeks.py | _ssweek_year_start | def _ssweek_year_start(ssweek_year):
"The gregorian calendar date of the first day of the given Sundaystarting-week year"
fifth_jan = dt.date(ssweek_year, 1, 5)
delta = dt.timedelta(fifth_jan.weekday()+1)
return fifth_jan - delta | python | def _ssweek_year_start(ssweek_year):
"The gregorian calendar date of the first day of the given Sundaystarting-week year"
fifth_jan = dt.date(ssweek_year, 1, 5)
delta = dt.timedelta(fifth_jan.weekday()+1)
return fifth_jan - delta | [
"def",
"_ssweek_year_start",
"(",
"ssweek_year",
")",
":",
"fifth_jan",
"=",
"dt",
".",
"date",
"(",
"ssweek_year",
",",
"1",
",",
"5",
")",
"delta",
"=",
"dt",
".",
"timedelta",
"(",
"fifth_jan",
".",
"weekday",
"(",
")",
"+",
"1",
")",
"return",
"fifth_jan",
"-",
"delta"
] | The gregorian calendar date of the first day of the given Sundaystarting-week year | [
"The",
"gregorian",
"calendar",
"date",
"of",
"the",
"first",
"day",
"of",
"the",
"given",
"Sundaystarting",
"-",
"week",
"year"
] | 316283140ca5171a68ad3170a5964fdc89be0b56 | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/weeks.py#L60-L64 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.