id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
3,700
bkabrda/flask-whooshee
flask_whooshee.py
Whooshee.create_index
def create_index(cls, app, wh): """Creates and opens an index for the given whoosheer and app. If the index already exists, it just opens it, otherwise it creates it first. :param app: The application instance. :param wh: The whoosheer instance for which a index should be created. """ # TODO: do we really want/need to use camel casing? # everywhere else, there is just .lower() if app.extensions['whooshee']['memory_storage']: storage = RamStorage() index = storage.create_index(wh.schema) assert index return index else: index_path = os.path.join(app.extensions['whooshee']['index_path_root'], getattr(wh, 'index_subdir', cls.camel_to_snake(wh.__name__))) if whoosh.index.exists_in(index_path): index = whoosh.index.open_dir(index_path) else: if not os.path.exists(index_path): os.makedirs(index_path) index = whoosh.index.create_in(index_path, wh.schema) return index
python
def create_index(cls, app, wh): """Creates and opens an index for the given whoosheer and app. If the index already exists, it just opens it, otherwise it creates it first. :param app: The application instance. :param wh: The whoosheer instance for which a index should be created. """ # TODO: do we really want/need to use camel casing? # everywhere else, there is just .lower() if app.extensions['whooshee']['memory_storage']: storage = RamStorage() index = storage.create_index(wh.schema) assert index return index else: index_path = os.path.join(app.extensions['whooshee']['index_path_root'], getattr(wh, 'index_subdir', cls.camel_to_snake(wh.__name__))) if whoosh.index.exists_in(index_path): index = whoosh.index.open_dir(index_path) else: if not os.path.exists(index_path): os.makedirs(index_path) index = whoosh.index.create_in(index_path, wh.schema) return index
[ "def", "create_index", "(", "cls", ",", "app", ",", "wh", ")", ":", "# TODO: do we really want/need to use camel casing?", "# everywhere else, there is just .lower()", "if", "app", ".", "extensions", "[", "'whooshee'", "]", "[", "'memory_storage'", "]", ":", "storage", "=", "RamStorage", "(", ")", "index", "=", "storage", ".", "create_index", "(", "wh", ".", "schema", ")", "assert", "index", "return", "index", "else", ":", "index_path", "=", "os", ".", "path", ".", "join", "(", "app", ".", "extensions", "[", "'whooshee'", "]", "[", "'index_path_root'", "]", ",", "getattr", "(", "wh", ",", "'index_subdir'", ",", "cls", ".", "camel_to_snake", "(", "wh", ".", "__name__", ")", ")", ")", "if", "whoosh", ".", "index", ".", "exists_in", "(", "index_path", ")", ":", "index", "=", "whoosh", ".", "index", ".", "open_dir", "(", "index_path", ")", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "index_path", ")", ":", "os", ".", "makedirs", "(", "index_path", ")", "index", "=", "whoosh", ".", "index", ".", "create_in", "(", "index_path", ",", "wh", ".", "schema", ")", "return", "index" ]
Creates and opens an index for the given whoosheer and app. If the index already exists, it just opens it, otherwise it creates it first. :param app: The application instance. :param wh: The whoosheer instance for which a index should be created.
[ "Creates", "and", "opens", "an", "index", "for", "the", "given", "whoosheer", "and", "app", ".", "If", "the", "index", "already", "exists", "it", "just", "opens", "it", "otherwise", "it", "creates", "it", "first", "." ]
773fc51ed53043bd5e92c65eadef5663845ae8c4
https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L375-L399
3,701
bkabrda/flask-whooshee
flask_whooshee.py
Whooshee.get_or_create_index
def get_or_create_index(cls, app, wh): """Gets a previously cached index or creates a new one for the given app and whoosheer. :param app: The application instance. :param wh: The whoosheer instance for which the index should be retrieved or created. """ if wh in app.extensions['whooshee']['whoosheers_indexes']: return app.extensions['whooshee']['whoosheers_indexes'][wh] index = cls.create_index(app, wh) app.extensions['whooshee']['whoosheers_indexes'][wh] = index return index
python
def get_or_create_index(cls, app, wh): """Gets a previously cached index or creates a new one for the given app and whoosheer. :param app: The application instance. :param wh: The whoosheer instance for which the index should be retrieved or created. """ if wh in app.extensions['whooshee']['whoosheers_indexes']: return app.extensions['whooshee']['whoosheers_indexes'][wh] index = cls.create_index(app, wh) app.extensions['whooshee']['whoosheers_indexes'][wh] = index return index
[ "def", "get_or_create_index", "(", "cls", ",", "app", ",", "wh", ")", ":", "if", "wh", "in", "app", ".", "extensions", "[", "'whooshee'", "]", "[", "'whoosheers_indexes'", "]", ":", "return", "app", ".", "extensions", "[", "'whooshee'", "]", "[", "'whoosheers_indexes'", "]", "[", "wh", "]", "index", "=", "cls", ".", "create_index", "(", "app", ",", "wh", ")", "app", ".", "extensions", "[", "'whooshee'", "]", "[", "'whoosheers_indexes'", "]", "[", "wh", "]", "=", "index", "return", "index" ]
Gets a previously cached index or creates a new one for the given app and whoosheer. :param app: The application instance. :param wh: The whoosheer instance for which the index should be retrieved or created.
[ "Gets", "a", "previously", "cached", "index", "or", "creates", "a", "new", "one", "for", "the", "given", "app", "and", "whoosheer", "." ]
773fc51ed53043bd5e92c65eadef5663845ae8c4
https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L410-L422
3,702
bkabrda/flask-whooshee
flask_whooshee.py
Whooshee.on_commit
def on_commit(self, changes): """Method that gets called when a model is changed. This serves to do the actual index writing. """ if _get_config(self)['enable_indexing'] is False: return None for wh in self.whoosheers: if not wh.auto_update: continue writer = None for change in changes: if change[0].__class__ in wh.models: method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower()) method = getattr(wh, method_name, None) if method: if not writer: writer = type(self).get_or_create_index(_get_app(self), wh).\ writer(timeout=_get_config(self)['writer_timeout']) method(writer, change[0]) if writer: writer.commit()
python
def on_commit(self, changes): """Method that gets called when a model is changed. This serves to do the actual index writing. """ if _get_config(self)['enable_indexing'] is False: return None for wh in self.whoosheers: if not wh.auto_update: continue writer = None for change in changes: if change[0].__class__ in wh.models: method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower()) method = getattr(wh, method_name, None) if method: if not writer: writer = type(self).get_or_create_index(_get_app(self), wh).\ writer(timeout=_get_config(self)['writer_timeout']) method(writer, change[0]) if writer: writer.commit()
[ "def", "on_commit", "(", "self", ",", "changes", ")", ":", "if", "_get_config", "(", "self", ")", "[", "'enable_indexing'", "]", "is", "False", ":", "return", "None", "for", "wh", "in", "self", ".", "whoosheers", ":", "if", "not", "wh", ".", "auto_update", ":", "continue", "writer", "=", "None", "for", "change", "in", "changes", ":", "if", "change", "[", "0", "]", ".", "__class__", "in", "wh", ".", "models", ":", "method_name", "=", "'{0}_{1}'", ".", "format", "(", "change", "[", "1", "]", ",", "change", "[", "0", "]", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", ")", "method", "=", "getattr", "(", "wh", ",", "method_name", ",", "None", ")", "if", "method", ":", "if", "not", "writer", ":", "writer", "=", "type", "(", "self", ")", ".", "get_or_create_index", "(", "_get_app", "(", "self", ")", ",", "wh", ")", ".", "writer", "(", "timeout", "=", "_get_config", "(", "self", ")", "[", "'writer_timeout'", "]", ")", "method", "(", "writer", ",", "change", "[", "0", "]", ")", "if", "writer", ":", "writer", ".", "commit", "(", ")" ]
Method that gets called when a model is changed. This serves to do the actual index writing.
[ "Method", "that", "gets", "called", "when", "a", "model", "is", "changed", ".", "This", "serves", "to", "do", "the", "actual", "index", "writing", "." ]
773fc51ed53043bd5e92c65eadef5663845ae8c4
https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L433-L454
3,703
bkabrda/flask-whooshee
flask_whooshee.py
Whooshee.reindex
def reindex(self): """Reindex all data This method retrieves all the data from the registered models and calls the ``update_<model>()`` function for every instance of such model. """ for wh in self.whoosheers: index = type(self).get_or_create_index(_get_app(self), wh) writer = index.writer(timeout=_get_config(self)['writer_timeout']) for model in wh.models: method_name = "{0}_{1}".format(UPDATE_KWD, model.__name__.lower()) for item in model.query.all(): getattr(wh, method_name)(writer, item) writer.commit()
python
def reindex(self): """Reindex all data This method retrieves all the data from the registered models and calls the ``update_<model>()`` function for every instance of such model. """ for wh in self.whoosheers: index = type(self).get_or_create_index(_get_app(self), wh) writer = index.writer(timeout=_get_config(self)['writer_timeout']) for model in wh.models: method_name = "{0}_{1}".format(UPDATE_KWD, model.__name__.lower()) for item in model.query.all(): getattr(wh, method_name)(writer, item) writer.commit()
[ "def", "reindex", "(", "self", ")", ":", "for", "wh", "in", "self", ".", "whoosheers", ":", "index", "=", "type", "(", "self", ")", ".", "get_or_create_index", "(", "_get_app", "(", "self", ")", ",", "wh", ")", "writer", "=", "index", ".", "writer", "(", "timeout", "=", "_get_config", "(", "self", ")", "[", "'writer_timeout'", "]", ")", "for", "model", "in", "wh", ".", "models", ":", "method_name", "=", "\"{0}_{1}\"", ".", "format", "(", "UPDATE_KWD", ",", "model", ".", "__name__", ".", "lower", "(", ")", ")", "for", "item", "in", "model", ".", "query", ".", "all", "(", ")", ":", "getattr", "(", "wh", ",", "method_name", ")", "(", "writer", ",", "item", ")", "writer", ".", "commit", "(", ")" ]
Reindex all data This method retrieves all the data from the registered models and calls the ``update_<model>()`` function for every instance of such model.
[ "Reindex", "all", "data" ]
773fc51ed53043bd5e92c65eadef5663845ae8c4
https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L456-L470
3,704
spry-group/python-vultr
examples/basic_list.py
dump_info
def dump_info(): '''Shows various details about the account & servers''' vultr = Vultr(API_KEY) try: logging.info('Listing account info:\n%s', dumps( vultr.account.info(), indent=2 )) logging.info('Listing apps:\n%s', dumps( vultr.app.list(), indent=2 )) logging.info('Listing backups:\n%s', dumps( vultr.backup.list(), indent=2 )) logging.info('Listing DNS:\n%s', dumps( vultr.dns.list(), indent=2 )) logging.info('Listing ISOs:\n%s', dumps( vultr.iso.list(), indent=2 )) logging.info('Listing OSs:\n%s', dumps( vultr.os.list(), indent=2 )) logging.info('Listing plans:\n%s', dumps( vultr.plans.list(), indent=2 )) logging.info('Listing regions:\n%s', dumps( vultr.regions.list(), indent=2 )) logging.info('Listing servers:\n%s', dumps( vultr.server.list(), indent=2 )) logging.info('Listing snapshots:\n%s', dumps( vultr.snapshot.list(), indent=2 )) logging.info('Listing SSH keys:\n%s', dumps( vultr.sshkey.list(), indent=2 )) logging.info('Listing startup scripts:\n%s', dumps( vultr.startupscript.list(), indent=2 )) except VultrError as ex: logging.error('VultrError: %s', ex)
python
def dump_info(): '''Shows various details about the account & servers''' vultr = Vultr(API_KEY) try: logging.info('Listing account info:\n%s', dumps( vultr.account.info(), indent=2 )) logging.info('Listing apps:\n%s', dumps( vultr.app.list(), indent=2 )) logging.info('Listing backups:\n%s', dumps( vultr.backup.list(), indent=2 )) logging.info('Listing DNS:\n%s', dumps( vultr.dns.list(), indent=2 )) logging.info('Listing ISOs:\n%s', dumps( vultr.iso.list(), indent=2 )) logging.info('Listing OSs:\n%s', dumps( vultr.os.list(), indent=2 )) logging.info('Listing plans:\n%s', dumps( vultr.plans.list(), indent=2 )) logging.info('Listing regions:\n%s', dumps( vultr.regions.list(), indent=2 )) logging.info('Listing servers:\n%s', dumps( vultr.server.list(), indent=2 )) logging.info('Listing snapshots:\n%s', dumps( vultr.snapshot.list(), indent=2 )) logging.info('Listing SSH keys:\n%s', dumps( vultr.sshkey.list(), indent=2 )) logging.info('Listing startup scripts:\n%s', dumps( vultr.startupscript.list(), indent=2 )) except VultrError as ex: logging.error('VultrError: %s', ex)
[ "def", "dump_info", "(", ")", ":", "vultr", "=", "Vultr", "(", "API_KEY", ")", "try", ":", "logging", ".", "info", "(", "'Listing account info:\\n%s'", ",", "dumps", "(", "vultr", ".", "account", ".", "info", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing apps:\\n%s'", ",", "dumps", "(", "vultr", ".", "app", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing backups:\\n%s'", ",", "dumps", "(", "vultr", ".", "backup", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing DNS:\\n%s'", ",", "dumps", "(", "vultr", ".", "dns", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing ISOs:\\n%s'", ",", "dumps", "(", "vultr", ".", "iso", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing OSs:\\n%s'", ",", "dumps", "(", "vultr", ".", "os", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing plans:\\n%s'", ",", "dumps", "(", "vultr", ".", "plans", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing regions:\\n%s'", ",", "dumps", "(", "vultr", ".", "regions", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing servers:\\n%s'", ",", "dumps", "(", "vultr", ".", "server", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing snapshots:\\n%s'", ",", "dumps", "(", "vultr", ".", "snapshot", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing SSH keys:\\n%s'", ",", "dumps", "(", "vultr", ".", "sshkey", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing startup scripts:\\n%s'", ",", "dumps", "(", "vultr", ".", "startupscript", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "except", "VultrError", "as", "ex", ":", "logging", ".", "error", "(", "'VultrError: %s'", ",", "ex", ")" ]
Shows various details about the account & servers
[ "Shows", "various", "details", "about", "the", "account", "&", "servers" ]
bad1448f1df7b5dba70fd3d11434f32580f0b850
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/examples/basic_list.py#L19-L72
3,705
spry-group/python-vultr
vultr/utils.py
update_params
def update_params(params, updates): '''Merges updates into params''' params = params.copy() if isinstance(params, dict) else dict() params.update(updates) return params
python
def update_params(params, updates): '''Merges updates into params''' params = params.copy() if isinstance(params, dict) else dict() params.update(updates) return params
[ "def", "update_params", "(", "params", ",", "updates", ")", ":", "params", "=", "params", ".", "copy", "(", ")", "if", "isinstance", "(", "params", ",", "dict", ")", "else", "dict", "(", ")", "params", ".", "update", "(", "updates", ")", "return", "params" ]
Merges updates into params
[ "Merges", "updates", "into", "params" ]
bad1448f1df7b5dba70fd3d11434f32580f0b850
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L94-L98
3,706
spry-group/python-vultr
vultr/utils.py
VultrBase._request_get_helper
def _request_get_helper(self, url, params=None): '''API GET request helper''' if not isinstance(params, dict): params = dict() if self.api_key: params['api_key'] = self.api_key return requests.get(url, params=params, timeout=60)
python
def _request_get_helper(self, url, params=None): '''API GET request helper''' if not isinstance(params, dict): params = dict() if self.api_key: params['api_key'] = self.api_key return requests.get(url, params=params, timeout=60)
[ "def", "_request_get_helper", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "if", "not", "isinstance", "(", "params", ",", "dict", ")", ":", "params", "=", "dict", "(", ")", "if", "self", ".", "api_key", ":", "params", "[", "'api_key'", "]", "=", "self", ".", "api_key", "return", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "timeout", "=", "60", ")" ]
API GET request helper
[ "API", "GET", "request", "helper" ]
bad1448f1df7b5dba70fd3d11434f32580f0b850
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L26-L33
3,707
spry-group/python-vultr
vultr/utils.py
VultrBase._request_post_helper
def _request_post_helper(self, url, params=None): '''API POST helper''' if self.api_key: query = {'api_key': self.api_key} return requests.post(url, params=query, data=params, timeout=60)
python
def _request_post_helper(self, url, params=None): '''API POST helper''' if self.api_key: query = {'api_key': self.api_key} return requests.post(url, params=query, data=params, timeout=60)
[ "def", "_request_post_helper", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "if", "self", ".", "api_key", ":", "query", "=", "{", "'api_key'", ":", "self", ".", "api_key", "}", "return", "requests", ".", "post", "(", "url", ",", "params", "=", "query", ",", "data", "=", "params", ",", "timeout", "=", "60", ")" ]
API POST helper
[ "API", "POST", "helper" ]
bad1448f1df7b5dba70fd3d11434f32580f0b850
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L35-L39
3,708
spry-group/python-vultr
vultr/utils.py
VultrBase._request_helper
def _request_helper(self, url, params, method): '''API request helper method''' try: if method == 'POST': return self._request_post_helper(url, params) elif method == 'GET': return self._request_get_helper(url, params) raise VultrError('Unsupported method %s' % method) except requests.RequestException as ex: raise RuntimeError(ex)
python
def _request_helper(self, url, params, method): '''API request helper method''' try: if method == 'POST': return self._request_post_helper(url, params) elif method == 'GET': return self._request_get_helper(url, params) raise VultrError('Unsupported method %s' % method) except requests.RequestException as ex: raise RuntimeError(ex)
[ "def", "_request_helper", "(", "self", ",", "url", ",", "params", ",", "method", ")", ":", "try", ":", "if", "method", "==", "'POST'", ":", "return", "self", ".", "_request_post_helper", "(", "url", ",", "params", ")", "elif", "method", "==", "'GET'", ":", "return", "self", ".", "_request_get_helper", "(", "url", ",", "params", ")", "raise", "VultrError", "(", "'Unsupported method %s'", "%", "method", ")", "except", "requests", ".", "RequestException", "as", "ex", ":", "raise", "RuntimeError", "(", "ex", ")" ]
API request helper method
[ "API", "request", "helper", "method" ]
bad1448f1df7b5dba70fd3d11434f32580f0b850
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L41-L50
3,709
spry-group/python-vultr
examples/basic_haltRunning.py
halt_running
def halt_running(): '''Halts all running servers''' vultr = Vultr(API_KEY) try: serverList = vultr.server.list() #logging.info('Listing servers:\n%s', dumps( #serverList, indent=2 #)) except VultrError as ex: logging.error('VultrError: %s', ex) for serverID in serverList: if serverList[serverID]['power_status'] == 'running': logging.info(serverList[serverID]['label'] + " will be gracefully shutdown.") vultr.server.halt(serverID)
python
def halt_running(): '''Halts all running servers''' vultr = Vultr(API_KEY) try: serverList = vultr.server.list() #logging.info('Listing servers:\n%s', dumps( #serverList, indent=2 #)) except VultrError as ex: logging.error('VultrError: %s', ex) for serverID in serverList: if serverList[serverID]['power_status'] == 'running': logging.info(serverList[serverID]['label'] + " will be gracefully shutdown.") vultr.server.halt(serverID)
[ "def", "halt_running", "(", ")", ":", "vultr", "=", "Vultr", "(", "API_KEY", ")", "try", ":", "serverList", "=", "vultr", ".", "server", ".", "list", "(", ")", "#logging.info('Listing servers:\\n%s', dumps(", "#serverList, indent=2", "#))", "except", "VultrError", "as", "ex", ":", "logging", ".", "error", "(", "'VultrError: %s'", ",", "ex", ")", "for", "serverID", "in", "serverList", ":", "if", "serverList", "[", "serverID", "]", "[", "'power_status'", "]", "==", "'running'", ":", "logging", ".", "info", "(", "serverList", "[", "serverID", "]", "[", "'label'", "]", "+", "\" will be gracefully shutdown.\"", ")", "vultr", ".", "server", ".", "halt", "(", "serverID", ")" ]
Halts all running servers
[ "Halts", "all", "running", "servers" ]
bad1448f1df7b5dba70fd3d11434f32580f0b850
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/examples/basic_haltRunning.py#L18-L33
3,710
inspirehep/refextract
refextract/references/tag.py
tag_arxiv
def tag_arxiv(line): """Tag arxiv report numbers We handle arXiv in 2 ways: * starting with arXiv:1022.1111 * this format exactly 9999.9999 We also format the output to the standard arxiv notation: * arXiv:2007.12.1111 * arXiv:2007.12.1111v2 """ def tagger(match): groups = match.groupdict() if match.group('suffix'): groups['suffix'] = ' ' + groups['suffix'] else: groups['suffix'] = '' return u'<cds.REPORTNUMBER>arXiv:%(year)s'\ u'%(month)s.%(num)s%(suffix)s' \ u'</cds.REPORTNUMBER>' % groups line = re_arxiv_5digits.sub(tagger, line) line = re_arxiv.sub(tagger, line) line = re_new_arxiv_5digits.sub(tagger, line) line = re_new_arxiv.sub(tagger, line) return line
python
def tag_arxiv(line): """Tag arxiv report numbers We handle arXiv in 2 ways: * starting with arXiv:1022.1111 * this format exactly 9999.9999 We also format the output to the standard arxiv notation: * arXiv:2007.12.1111 * arXiv:2007.12.1111v2 """ def tagger(match): groups = match.groupdict() if match.group('suffix'): groups['suffix'] = ' ' + groups['suffix'] else: groups['suffix'] = '' return u'<cds.REPORTNUMBER>arXiv:%(year)s'\ u'%(month)s.%(num)s%(suffix)s' \ u'</cds.REPORTNUMBER>' % groups line = re_arxiv_5digits.sub(tagger, line) line = re_arxiv.sub(tagger, line) line = re_new_arxiv_5digits.sub(tagger, line) line = re_new_arxiv.sub(tagger, line) return line
[ "def", "tag_arxiv", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "if", "match", ".", "group", "(", "'suffix'", ")", ":", "groups", "[", "'suffix'", "]", "=", "' '", "+", "groups", "[", "'suffix'", "]", "else", ":", "groups", "[", "'suffix'", "]", "=", "''", "return", "u'<cds.REPORTNUMBER>arXiv:%(year)s'", "u'%(month)s.%(num)s%(suffix)s'", "u'</cds.REPORTNUMBER>'", "%", "groups", "line", "=", "re_arxiv_5digits", ".", "sub", "(", "tagger", ",", "line", ")", "line", "=", "re_arxiv", ".", "sub", "(", "tagger", ",", "line", ")", "line", "=", "re_new_arxiv_5digits", ".", "sub", "(", "tagger", ",", "line", ")", "line", "=", "re_new_arxiv", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
Tag arxiv report numbers We handle arXiv in 2 ways: * starting with arXiv:1022.1111 * this format exactly 9999.9999 We also format the output to the standard arxiv notation: * arXiv:2007.12.1111 * arXiv:2007.12.1111v2
[ "Tag", "arxiv", "report", "numbers" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L360-L384
3,711
inspirehep/refextract
refextract/references/tag.py
tag_arxiv_more
def tag_arxiv_more(line): """Tag old arxiv report numbers Either formats: * hep-th/1234567 * arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111 """ line = RE_ARXIV_CATCHUP.sub(ur"\g<suffix>/\g<year>\g<month>\g<num>", line) for report_re, report_repl in RE_OLD_ARXIV: report_number = report_repl + ur"/\g<num>" line = report_re.sub( u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>', line ) return line
python
def tag_arxiv_more(line): """Tag old arxiv report numbers Either formats: * hep-th/1234567 * arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111 """ line = RE_ARXIV_CATCHUP.sub(ur"\g<suffix>/\g<year>\g<month>\g<num>", line) for report_re, report_repl in RE_OLD_ARXIV: report_number = report_repl + ur"/\g<num>" line = report_re.sub( u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>', line ) return line
[ "def", "tag_arxiv_more", "(", "line", ")", ":", "line", "=", "RE_ARXIV_CATCHUP", ".", "sub", "(", "ur\"\\g<suffix>/\\g<year>\\g<month>\\g<num>\"", ",", "line", ")", "for", "report_re", ",", "report_repl", "in", "RE_OLD_ARXIV", ":", "report_number", "=", "report_repl", "+", "ur\"/\\g<num>\"", "line", "=", "report_re", ".", "sub", "(", "u'<cds.REPORTNUMBER>'", "+", "report_number", "+", "u'</cds.REPORTNUMBER>'", ",", "line", ")", "return", "line" ]
Tag old arxiv report numbers Either formats: * hep-th/1234567 * arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111
[ "Tag", "old", "arxiv", "report", "numbers" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L387-L402
3,712
inspirehep/refextract
refextract/references/tag.py
tag_pos_volume
def tag_pos_volume(line): """Tag POS volume number POS is journal that has special volume numbers e.g. PoS LAT2007 (2007) 369 """ def tagger(match): groups = match.groupdict() try: year = match.group('year') except IndexError: # Extract year from volume name # which should always include the year g = re.search(re_pos_year_num, match.group( 'volume_num'), re.UNICODE) year = g.group(0) if year: groups[ 'year'] = ' <cds.YR>(%s)</cds.YR>' % year.strip().strip('()') else: groups['year'] = '' return '<cds.JOURNAL>PoS</cds.JOURNAL>' \ ' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' \ '%(year)s' \ ' <cds.PG>%(page)s</cds.PG>' % groups for p in re_pos: line = p.sub(tagger, line) return line
python
def tag_pos_volume(line): """Tag POS volume number POS is journal that has special volume numbers e.g. PoS LAT2007 (2007) 369 """ def tagger(match): groups = match.groupdict() try: year = match.group('year') except IndexError: # Extract year from volume name # which should always include the year g = re.search(re_pos_year_num, match.group( 'volume_num'), re.UNICODE) year = g.group(0) if year: groups[ 'year'] = ' <cds.YR>(%s)</cds.YR>' % year.strip().strip('()') else: groups['year'] = '' return '<cds.JOURNAL>PoS</cds.JOURNAL>' \ ' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' \ '%(year)s' \ ' <cds.PG>%(page)s</cds.PG>' % groups for p in re_pos: line = p.sub(tagger, line) return line
[ "def", "tag_pos_volume", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "try", ":", "year", "=", "match", ".", "group", "(", "'year'", ")", "except", "IndexError", ":", "# Extract year from volume name", "# which should always include the year", "g", "=", "re", ".", "search", "(", "re_pos_year_num", ",", "match", ".", "group", "(", "'volume_num'", ")", ",", "re", ".", "UNICODE", ")", "year", "=", "g", ".", "group", "(", "0", ")", "if", "year", ":", "groups", "[", "'year'", "]", "=", "' <cds.YR>(%s)</cds.YR>'", "%", "year", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "else", ":", "groups", "[", "'year'", "]", "=", "''", "return", "'<cds.JOURNAL>PoS</cds.JOURNAL>'", "' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>'", "'%(year)s'", "' <cds.PG>%(page)s</cds.PG>'", "%", "groups", "for", "p", "in", "re_pos", ":", "line", "=", "p", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
Tag POS volume number POS is journal that has special volume numbers e.g. PoS LAT2007 (2007) 369
[ "Tag", "POS", "volume", "number" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L405-L436
3,713
inspirehep/refextract
refextract/references/tag.py
find_numeration_more
def find_numeration_more(line): """Look for other numeration in line.""" # First, attempt to use marked-up titles patterns = ( re_correct_numeration_2nd_try_ptn1, re_correct_numeration_2nd_try_ptn2, re_correct_numeration_2nd_try_ptn3, re_correct_numeration_2nd_try_ptn4, ) for pattern in patterns: match = pattern.search(line) if match: info = match.groupdict() series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': len(info['aftertitle'])} return None
python
def find_numeration_more(line): """Look for other numeration in line.""" # First, attempt to use marked-up titles patterns = ( re_correct_numeration_2nd_try_ptn1, re_correct_numeration_2nd_try_ptn2, re_correct_numeration_2nd_try_ptn3, re_correct_numeration_2nd_try_ptn4, ) for pattern in patterns: match = pattern.search(line) if match: info = match.groupdict() series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': len(info['aftertitle'])} return None
[ "def", "find_numeration_more", "(", "line", ")", ":", "# First, attempt to use marked-up titles", "patterns", "=", "(", "re_correct_numeration_2nd_try_ptn1", ",", "re_correct_numeration_2nd_try_ptn2", ",", "re_correct_numeration_2nd_try_ptn3", ",", "re_correct_numeration_2nd_try_ptn4", ",", ")", "for", "pattern", "in", "patterns", ":", "match", "=", "pattern", ".", "search", "(", "line", ")", "if", "match", ":", "info", "=", "match", ".", "groupdict", "(", ")", "series", "=", "extract_series_from_volume", "(", "info", "[", "'vol'", "]", ")", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt'", "]", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt2'", "]", "return", "{", "'year'", ":", "info", ".", "get", "(", "'year'", ",", "None", ")", ",", "'series'", ":", "series", ",", "'volume'", ":", "info", "[", "'vol_num'", "]", ",", "'page'", ":", "info", "[", "'page'", "]", "or", "info", "[", "'jinst_page'", "]", ",", "'page_end'", ":", "info", "[", "'page_end'", "]", ",", "'len'", ":", "len", "(", "info", "[", "'aftertitle'", "]", ")", "}", "return", "None" ]
Look for other numeration in line.
[ "Look", "for", "other", "numeration", "in", "line", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L456-L481
3,714
inspirehep/refextract
refextract/references/tag.py
identify_ibids
def identify_ibids(line): """Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed """ ibid_match_txt = {} # Record details of each matched ibid: for m_ibid in re_ibid.finditer(line): ibid_match_txt[m_ibid.start()] = m_ibid.group(0) # Replace matched text in line with underscores: line = line[0:m_ibid.start()] + \ "_" * len(m_ibid.group(0)) + \ line[m_ibid.end():] return ibid_match_txt, line
python
def identify_ibids(line): """Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed """ ibid_match_txt = {} # Record details of each matched ibid: for m_ibid in re_ibid.finditer(line): ibid_match_txt[m_ibid.start()] = m_ibid.group(0) # Replace matched text in line with underscores: line = line[0:m_ibid.start()] + \ "_" * len(m_ibid.group(0)) + \ line[m_ibid.end():] return ibid_match_txt, line
[ "def", "identify_ibids", "(", "line", ")", ":", "ibid_match_txt", "=", "{", "}", "# Record details of each matched ibid:", "for", "m_ibid", "in", "re_ibid", ".", "finditer", "(", "line", ")", ":", "ibid_match_txt", "[", "m_ibid", ".", "start", "(", ")", "]", "=", "m_ibid", ".", "group", "(", "0", ")", "# Replace matched text in line with underscores:", "line", "=", "line", "[", "0", ":", "m_ibid", ".", "start", "(", ")", "]", "+", "\"_\"", "*", "len", "(", "m_ibid", ".", "group", "(", "0", ")", ")", "+", "line", "[", "m_ibid", ".", "end", "(", ")", ":", "]", "return", "ibid_match_txt", ",", "line" ]
Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed
[ "Find", "IBIDs", "within", "the", "line", "record", "their", "position", "and", "length", "and", "replace", "them", "with", "underscores", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1052-L1070
3,715
inspirehep/refextract
refextract/references/tag.py
find_numeration
def find_numeration(line): """Given a reference line, attempt to locate instances of citation 'numeration' in the line. @param line: (string) the reference line. @return: (string) the reference line after numeration has been checked and possibly recognized/marked-up. """ patterns = ( # vol,page,year re_numeration_vol_page_yr, re_numeration_vol_nucphys_page_yr, re_numeration_nucphys_vol_page_yr, # With sub volume re_numeration_vol_subvol_nucphys_yr_page, re_numeration_vol_nucphys_yr_subvol_page, # vol,year,page re_numeration_vol_yr_page, re_numeration_nucphys_vol_yr_page, re_numeration_vol_nucphys_series_yr_page, # vol,page,year re_numeration_vol_series_nucphys_page_yr, re_numeration_vol_nucphys_series_page_yr, # year,vol,page re_numeration_yr_vol_page, ) for pattern in patterns: match = pattern.match(line) if match: info = match.groupdict() series = info.get('series', None) if not series: series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': match.end()} return None
python
def find_numeration(line): """Given a reference line, attempt to locate instances of citation 'numeration' in the line. @param line: (string) the reference line. @return: (string) the reference line after numeration has been checked and possibly recognized/marked-up. """ patterns = ( # vol,page,year re_numeration_vol_page_yr, re_numeration_vol_nucphys_page_yr, re_numeration_nucphys_vol_page_yr, # With sub volume re_numeration_vol_subvol_nucphys_yr_page, re_numeration_vol_nucphys_yr_subvol_page, # vol,year,page re_numeration_vol_yr_page, re_numeration_nucphys_vol_yr_page, re_numeration_vol_nucphys_series_yr_page, # vol,page,year re_numeration_vol_series_nucphys_page_yr, re_numeration_vol_nucphys_series_page_yr, # year,vol,page re_numeration_yr_vol_page, ) for pattern in patterns: match = pattern.match(line) if match: info = match.groupdict() series = info.get('series', None) if not series: series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': match.end()} return None
[ "def", "find_numeration", "(", "line", ")", ":", "patterns", "=", "(", "# vol,page,year", "re_numeration_vol_page_yr", ",", "re_numeration_vol_nucphys_page_yr", ",", "re_numeration_nucphys_vol_page_yr", ",", "# With sub volume", "re_numeration_vol_subvol_nucphys_yr_page", ",", "re_numeration_vol_nucphys_yr_subvol_page", ",", "# vol,year,page", "re_numeration_vol_yr_page", ",", "re_numeration_nucphys_vol_yr_page", ",", "re_numeration_vol_nucphys_series_yr_page", ",", "# vol,page,year", "re_numeration_vol_series_nucphys_page_yr", ",", "re_numeration_vol_nucphys_series_page_yr", ",", "# year,vol,page", "re_numeration_yr_vol_page", ",", ")", "for", "pattern", "in", "patterns", ":", "match", "=", "pattern", ".", "match", "(", "line", ")", "if", "match", ":", "info", "=", "match", ".", "groupdict", "(", ")", "series", "=", "info", ".", "get", "(", "'series'", ",", "None", ")", "if", "not", "series", ":", "series", "=", "extract_series_from_volume", "(", "info", "[", "'vol'", "]", ")", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt'", "]", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt2'", "]", "return", "{", "'year'", ":", "info", ".", "get", "(", "'year'", ",", "None", ")", ",", "'series'", ":", "series", ",", "'volume'", ":", "info", "[", "'vol_num'", "]", ",", "'page'", ":", "info", "[", "'page'", "]", "or", "info", "[", "'jinst_page'", "]", ",", "'page_end'", ":", "info", "[", "'page_end'", "]", ",", "'len'", ":", "match", ".", "end", "(", ")", "}", "return", "None" ]
Given a reference line, attempt to locate instances of citation 'numeration' in the line. @param line: (string) the reference line. @return: (string) the reference line after numeration has been checked and possibly recognized/marked-up.
[ "Given", "a", "reference", "line", "attempt", "to", "locate", "instances", "of", "citation", "numeration", "in", "the", "line", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1083-L1127
3,716
inspirehep/refextract
refextract/references/engine.py
remove_reference_line_marker
def remove_reference_line_marker(line): """Trim a reference line's 'marker' from the beginning of the line. @param line: (string) - the reference line. @return: (tuple) containing two strings: + The reference line's marker (or if there was not one, a 'space' character. + The reference line with it's marker removed from the beginning. """ # Get patterns to identify reference-line marker patterns: marker_patterns = get_reference_line_numeration_marker_patterns() line = line.lstrip() marker_match = regex_match_list(line, marker_patterns) if marker_match is not None: # found a marker: marker_val = marker_match.group(u'mark') # trim the marker from the start of the line: line = line[marker_match.end():].lstrip() else: marker_val = u" " return (marker_val, line)
python
def remove_reference_line_marker(line): """Trim a reference line's 'marker' from the beginning of the line. @param line: (string) - the reference line. @return: (tuple) containing two strings: + The reference line's marker (or if there was not one, a 'space' character. + The reference line with it's marker removed from the beginning. """ # Get patterns to identify reference-line marker patterns: marker_patterns = get_reference_line_numeration_marker_patterns() line = line.lstrip() marker_match = regex_match_list(line, marker_patterns) if marker_match is not None: # found a marker: marker_val = marker_match.group(u'mark') # trim the marker from the start of the line: line = line[marker_match.end():].lstrip() else: marker_val = u" " return (marker_val, line)
[ "def", "remove_reference_line_marker", "(", "line", ")", ":", "# Get patterns to identify reference-line marker patterns:", "marker_patterns", "=", "get_reference_line_numeration_marker_patterns", "(", ")", "line", "=", "line", ".", "lstrip", "(", ")", "marker_match", "=", "regex_match_list", "(", "line", ",", "marker_patterns", ")", "if", "marker_match", "is", "not", "None", ":", "# found a marker:", "marker_val", "=", "marker_match", ".", "group", "(", "u'mark'", ")", "# trim the marker from the start of the line:", "line", "=", "line", "[", "marker_match", ".", "end", "(", ")", ":", "]", ".", "lstrip", "(", ")", "else", ":", "marker_val", "=", "u\" \"", "return", "(", "marker_val", ",", "line", ")" ]
Trim a reference line's 'marker' from the beginning of the line. @param line: (string) - the reference line. @return: (tuple) containing two strings: + The reference line's marker (or if there was not one, a 'space' character. + The reference line with it's marker removed from the beginning.
[ "Trim", "a", "reference", "line", "s", "marker", "from", "the", "beginning", "of", "the", "line", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L92-L114
3,717
inspirehep/refextract
refextract/references/engine.py
roman2arabic
def roman2arabic(num): """Convert numbers from roman to arabic This function expects a string like XXII and outputs an integer """ t = 0 p = 0 for r in num: n = 10 ** (205558 % ord(r) % 7) % 9995 t += n - 2 * p % n p = n return t
python
def roman2arabic(num): """Convert numbers from roman to arabic This function expects a string like XXII and outputs an integer """ t = 0 p = 0 for r in num: n = 10 ** (205558 % ord(r) % 7) % 9995 t += n - 2 * p % n p = n return t
[ "def", "roman2arabic", "(", "num", ")", ":", "t", "=", "0", "p", "=", "0", "for", "r", "in", "num", ":", "n", "=", "10", "**", "(", "205558", "%", "ord", "(", "r", ")", "%", "7", ")", "%", "9995", "t", "+=", "n", "-", "2", "*", "p", "%", "n", "p", "=", "n", "return", "t" ]
Convert numbers from roman to arabic This function expects a string like XXII and outputs an integer
[ "Convert", "numbers", "from", "roman", "to", "arabic" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L117-L129
3,718
inspirehep/refextract
refextract/references/engine.py
format_report_number
def format_report_number(citation_elements): """Format report numbers that are missing a dash e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01 """ re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE) for el in citation_elements: if el['type'] == 'REPORTNUMBER': m = re_report.match(el['report_num']) if m: name = m.group('name') if not name.endswith('-'): el['report_num'] = m.group('name') + '-' + m.group('nums') return citation_elements
python
def format_report_number(citation_elements): """Format report numbers that are missing a dash e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01 """ re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE) for el in citation_elements: if el['type'] == 'REPORTNUMBER': m = re_report.match(el['report_num']) if m: name = m.group('name') if not name.endswith('-'): el['report_num'] = m.group('name') + '-' + m.group('nums') return citation_elements
[ "def", "format_report_number", "(", "citation_elements", ")", ":", "re_report", "=", "re", ".", "compile", "(", "ur'^(?P<name>[A-Z-]+)(?P<nums>[\\d-]+)$'", ",", "re", ".", "UNICODE", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'REPORTNUMBER'", ":", "m", "=", "re_report", ".", "match", "(", "el", "[", "'report_num'", "]", ")", "if", "m", ":", "name", "=", "m", ".", "group", "(", "'name'", ")", "if", "not", "name", ".", "endswith", "(", "'-'", ")", ":", "el", "[", "'report_num'", "]", "=", "m", ".", "group", "(", "'name'", ")", "+", "'-'", "+", "m", ".", "group", "(", "'nums'", ")", "return", "citation_elements" ]
Format report numbers that are missing a dash e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01
[ "Format", "report", "numbers", "that", "are", "missing", "a", "dash" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L171-L184
3,719
inspirehep/refextract
refextract/references/engine.py
format_hep
def format_hep(citation_elements): """Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200 """ prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-', 'math-ph-') for el in citation_elements: if el['type'] == 'REPORTNUMBER': for p in prefixes: if el['report_num'].startswith(p): el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \ el['report_num'][len(p):] return citation_elements
python
def format_hep(citation_elements): """Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200 """ prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-', 'math-ph-') for el in citation_elements: if el['type'] == 'REPORTNUMBER': for p in prefixes: if el['report_num'].startswith(p): el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \ el['report_num'][len(p):] return citation_elements
[ "def", "format_hep", "(", "citation_elements", ")", ":", "prefixes", "=", "(", "'astro-ph-'", ",", "'hep-th-'", ",", "'hep-ph-'", ",", "'hep-ex-'", ",", "'hep-lat-'", ",", "'math-ph-'", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'REPORTNUMBER'", ":", "for", "p", "in", "prefixes", ":", "if", "el", "[", "'report_num'", "]", ".", "startswith", "(", "p", ")", ":", "el", "[", "'report_num'", "]", "=", "el", "[", "'report_num'", "]", "[", ":", "len", "(", "p", ")", "-", "1", "]", "+", "'/'", "+", "el", "[", "'report_num'", "]", "[", "len", "(", "p", ")", ":", "]", "return", "citation_elements" ]
Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200
[ "Format", "hep", "-", "th", "report", "numbers", "with", "a", "dash" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L187-L200
3,720
inspirehep/refextract
refextract/references/engine.py
look_for_books
def look_for_books(citation_elements, kbs): """Look for books in our kb Create book tags by using the authors and the title to find books in our knowledge base """ title = None for el in citation_elements: if el['type'] == 'QUOTED': title = el break if title: normalized_title = title['title'].upper() if normalized_title in kbs['books']: line = kbs['books'][normalized_title] el = {'type': 'BOOK', 'misc_txt': '', 'authors': line[0], 'title': line[1], 'year': line[2].strip(';')} citation_elements.append(el) citation_elements.remove(title) return citation_elements
python
def look_for_books(citation_elements, kbs): """Look for books in our kb Create book tags by using the authors and the title to find books in our knowledge base """ title = None for el in citation_elements: if el['type'] == 'QUOTED': title = el break if title: normalized_title = title['title'].upper() if normalized_title in kbs['books']: line = kbs['books'][normalized_title] el = {'type': 'BOOK', 'misc_txt': '', 'authors': line[0], 'title': line[1], 'year': line[2].strip(';')} citation_elements.append(el) citation_elements.remove(title) return citation_elements
[ "def", "look_for_books", "(", "citation_elements", ",", "kbs", ")", ":", "title", "=", "None", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'QUOTED'", ":", "title", "=", "el", "break", "if", "title", ":", "normalized_title", "=", "title", "[", "'title'", "]", ".", "upper", "(", ")", "if", "normalized_title", "in", "kbs", "[", "'books'", "]", ":", "line", "=", "kbs", "[", "'books'", "]", "[", "normalized_title", "]", "el", "=", "{", "'type'", ":", "'BOOK'", ",", "'misc_txt'", ":", "''", ",", "'authors'", ":", "line", "[", "0", "]", ",", "'title'", ":", "line", "[", "1", "]", ",", "'year'", ":", "line", "[", "2", "]", ".", "strip", "(", "';'", ")", "}", "citation_elements", ".", "append", "(", "el", ")", "citation_elements", ".", "remove", "(", "title", ")", "return", "citation_elements" ]
Look for books in our kb Create book tags by using the authors and the title to find books in our knowledge base
[ "Look", "for", "books", "in", "our", "kb" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L215-L239
3,721
inspirehep/refextract
refextract/references/engine.py
split_volume_from_journal
def split_volume_from_journal(citation_elements): """Split volume from journal title We need this because sometimes the volume is attached to the journal title instead of the volume. In those cases we move it here from the title to the volume """ for el in citation_elements: if el['type'] == 'JOURNAL' and ';' in el['title']: el['title'], series = el['title'].rsplit(';', 1) el['volume'] = series + el['volume'] return citation_elements
python
def split_volume_from_journal(citation_elements): """Split volume from journal title We need this because sometimes the volume is attached to the journal title instead of the volume. In those cases we move it here from the title to the volume """ for el in citation_elements: if el['type'] == 'JOURNAL' and ';' in el['title']: el['title'], series = el['title'].rsplit(';', 1) el['volume'] = series + el['volume'] return citation_elements
[ "def", "split_volume_from_journal", "(", "citation_elements", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", "and", "';'", "in", "el", "[", "'title'", "]", ":", "el", "[", "'title'", "]", ",", "series", "=", "el", "[", "'title'", "]", ".", "rsplit", "(", "';'", ",", "1", ")", "el", "[", "'volume'", "]", "=", "series", "+", "el", "[", "'volume'", "]", "return", "citation_elements" ]
Split volume from journal title We need this because sometimes the volume is attached to the journal title instead of the volume. In those cases we move it here from the title to the volume
[ "Split", "volume", "from", "journal", "title" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L242-L253
3,722
inspirehep/refextract
refextract/references/engine.py
remove_b_for_nucl_phys
def remove_b_for_nucl_phys(citation_elements): """Removes b from the volume of some journals Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE that journal is handled differently. """ for el in citation_elements: if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \ and 'volume' in el \ and (el['volume'].startswith('b') or el['volume'].startswith('B')): el['volume'] = el['volume'][1:] return citation_elements
python
def remove_b_for_nucl_phys(citation_elements): """Removes b from the volume of some journals Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE that journal is handled differently. """ for el in citation_elements: if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \ and 'volume' in el \ and (el['volume'].startswith('b') or el['volume'].startswith('B')): el['volume'] = el['volume'][1:] return citation_elements
[ "def", "remove_b_for_nucl_phys", "(", "citation_elements", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", "and", "el", "[", "'title'", "]", "==", "'Nucl.Phys.Proc.Suppl.'", "and", "'volume'", "in", "el", "and", "(", "el", "[", "'volume'", "]", ".", "startswith", "(", "'b'", ")", "or", "el", "[", "'volume'", "]", ".", "startswith", "(", "'B'", ")", ")", ":", "el", "[", "'volume'", "]", "=", "el", "[", "'volume'", "]", "[", "1", ":", "]", "return", "citation_elements" ]
Removes b from the volume of some journals Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE that journal is handled differently.
[ "Removes", "b", "from", "the", "volume", "of", "some", "journals" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L256-L267
3,723
inspirehep/refextract
refextract/references/engine.py
mangle_volume
def mangle_volume(citation_elements): """Make sure the volume letter is before the volume number e.g. transforms 100B to B100 """ volume_re = re.compile(ur"(\d+)([A-Z])", re.U | re.I) for el in citation_elements: if el['type'] == 'JOURNAL': matches = volume_re.match(el['volume']) if matches: el['volume'] = matches.group(2) + matches.group(1) return citation_elements
python
def mangle_volume(citation_elements): """Make sure the volume letter is before the volume number e.g. transforms 100B to B100 """ volume_re = re.compile(ur"(\d+)([A-Z])", re.U | re.I) for el in citation_elements: if el['type'] == 'JOURNAL': matches = volume_re.match(el['volume']) if matches: el['volume'] = matches.group(2) + matches.group(1) return citation_elements
[ "def", "mangle_volume", "(", "citation_elements", ")", ":", "volume_re", "=", "re", ".", "compile", "(", "ur\"(\\d+)([A-Z])\"", ",", "re", ".", "U", "|", "re", ".", "I", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", ":", "matches", "=", "volume_re", ".", "match", "(", "el", "[", "'volume'", "]", ")", "if", "matches", ":", "el", "[", "'volume'", "]", "=", "matches", ".", "group", "(", "2", ")", "+", "matches", ".", "group", "(", "1", ")", "return", "citation_elements" ]
Make sure the volume letter is before the volume number e.g. transforms 100B to B100
[ "Make", "sure", "the", "volume", "letter", "is", "before", "the", "volume", "number" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L270-L282
3,724
inspirehep/refextract
refextract/references/engine.py
split_citations
def split_citations(citation_elements): """Split a citation line in multiple citations We handle the case where the author has put 2 citations in the same line but split with ; or some other method. """ splitted_citations = [] new_elements = [] current_recid = None current_doi = None def check_ibid(current_elements, trigger_el): for el in new_elements: if el['type'] == 'AUTH': return # Check for ibid if trigger_el.get('is_ibid', False): if splitted_citations: els = chain(reversed(current_elements), reversed(splitted_citations[-1])) else: els = reversed(current_elements) for el in els: if el['type'] == 'AUTH': new_elements.append(el.copy()) break def start_new_citation(): """Start new citation""" splitted_citations.append(new_elements[:]) del new_elements[:] for el in citation_elements: try: el_recid = el['recid'] except KeyError: el_recid = None if current_recid and el_recid and current_recid == el_recid: # Do not start a new citation pass elif current_recid and el_recid and current_recid != el_recid \ or current_doi and el['type'] == 'DOI' and \ current_doi != el['doi_string']: start_new_citation() # Some authors may be found in the previous citation balance_authors(splitted_citations, new_elements) elif ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() # In case el['recid'] is None, we want to reset it # because we are starting a new reference current_recid = el_recid while ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() current_recid = None if el_recid: current_recid = el_recid if el['type'] == 'DOI': current_doi = el['doi_string'] check_ibid(new_elements, el) new_elements.append(el) splitted_citations.append(new_elements) return [el for el in splitted_citations if not empty_citation(el)]
python
def split_citations(citation_elements): """Split a citation line in multiple citations We handle the case where the author has put 2 citations in the same line but split with ; or some other method. """ splitted_citations = [] new_elements = [] current_recid = None current_doi = None def check_ibid(current_elements, trigger_el): for el in new_elements: if el['type'] == 'AUTH': return # Check for ibid if trigger_el.get('is_ibid', False): if splitted_citations: els = chain(reversed(current_elements), reversed(splitted_citations[-1])) else: els = reversed(current_elements) for el in els: if el['type'] == 'AUTH': new_elements.append(el.copy()) break def start_new_citation(): """Start new citation""" splitted_citations.append(new_elements[:]) del new_elements[:] for el in citation_elements: try: el_recid = el['recid'] except KeyError: el_recid = None if current_recid and el_recid and current_recid == el_recid: # Do not start a new citation pass elif current_recid and el_recid and current_recid != el_recid \ or current_doi and el['type'] == 'DOI' and \ current_doi != el['doi_string']: start_new_citation() # Some authors may be found in the previous citation balance_authors(splitted_citations, new_elements) elif ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() # In case el['recid'] is None, we want to reset it # because we are starting a new reference current_recid = el_recid while ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() current_recid = None if el_recid: current_recid = el_recid if el['type'] == 'DOI': current_doi = el['doi_string'] check_ibid(new_elements, el) new_elements.append(el) splitted_citations.append(new_elements) return [el for el in splitted_citations if not empty_citation(el)]
[ "def", "split_citations", "(", "citation_elements", ")", ":", "splitted_citations", "=", "[", "]", "new_elements", "=", "[", "]", "current_recid", "=", "None", "current_doi", "=", "None", "def", "check_ibid", "(", "current_elements", ",", "trigger_el", ")", ":", "for", "el", "in", "new_elements", ":", "if", "el", "[", "'type'", "]", "==", "'AUTH'", ":", "return", "# Check for ibid", "if", "trigger_el", ".", "get", "(", "'is_ibid'", ",", "False", ")", ":", "if", "splitted_citations", ":", "els", "=", "chain", "(", "reversed", "(", "current_elements", ")", ",", "reversed", "(", "splitted_citations", "[", "-", "1", "]", ")", ")", "else", ":", "els", "=", "reversed", "(", "current_elements", ")", "for", "el", "in", "els", ":", "if", "el", "[", "'type'", "]", "==", "'AUTH'", ":", "new_elements", ".", "append", "(", "el", ".", "copy", "(", ")", ")", "break", "def", "start_new_citation", "(", ")", ":", "\"\"\"Start new citation\"\"\"", "splitted_citations", ".", "append", "(", "new_elements", "[", ":", "]", ")", "del", "new_elements", "[", ":", "]", "for", "el", "in", "citation_elements", ":", "try", ":", "el_recid", "=", "el", "[", "'recid'", "]", "except", "KeyError", ":", "el_recid", "=", "None", "if", "current_recid", "and", "el_recid", "and", "current_recid", "==", "el_recid", ":", "# Do not start a new citation", "pass", "elif", "current_recid", "and", "el_recid", "and", "current_recid", "!=", "el_recid", "or", "current_doi", "and", "el", "[", "'type'", "]", "==", "'DOI'", "and", "current_doi", "!=", "el", "[", "'doi_string'", "]", ":", "start_new_citation", "(", ")", "# Some authors may be found in the previous citation", "balance_authors", "(", "splitted_citations", ",", "new_elements", ")", "elif", "';'", "in", "el", "[", "'misc_txt'", "]", ":", "misc_txt", ",", "el", "[", "'misc_txt'", "]", "=", "el", "[", "'misc_txt'", "]", ".", "split", "(", "';'", ",", "1", ")", "if", "misc_txt", ":", "new_elements", ".", "append", "(", "{", "'type'", ":", "'MISC'", ",", "'misc_txt'", ":", "misc_txt", "}", ")", "start_new_citation", "(", ")", "# In case el['recid'] is None, we want to reset it", "# because we are starting a new reference", "current_recid", "=", "el_recid", "while", "';'", "in", "el", "[", "'misc_txt'", "]", ":", "misc_txt", ",", "el", "[", "'misc_txt'", "]", "=", "el", "[", "'misc_txt'", "]", ".", "split", "(", "';'", ",", "1", ")", "if", "misc_txt", ":", "new_elements", ".", "append", "(", "{", "'type'", ":", "'MISC'", ",", "'misc_txt'", ":", "misc_txt", "}", ")", "start_new_citation", "(", ")", "current_recid", "=", "None", "if", "el_recid", ":", "current_recid", "=", "el_recid", "if", "el", "[", "'type'", "]", "==", "'DOI'", ":", "current_doi", "=", "el", "[", "'doi_string'", "]", "check_ibid", "(", "new_elements", ",", "el", ")", "new_elements", ".", "append", "(", "el", ")", "splitted_citations", ".", "append", "(", "new_elements", ")", "return", "[", "el", "for", "el", "in", "splitted_citations", "if", "not", "empty_citation", "(", "el", ")", "]" ]
Split a citation line in multiple citations We handle the case where the author has put 2 citations in the same line but split with ; or some other method.
[ "Split", "a", "citation", "line", "in", "multiple", "citations" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L307-L383
3,725
inspirehep/refextract
refextract/references/engine.py
look_for_hdl
def look_for_hdl(citation_elements): """Looks for handle identifiers in the misc txt of the citation elements When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process """ for el in list(citation_elements): matched_hdl = re_hdl.finditer(el['misc_txt']) for match in reversed(list(matched_hdl)): hdl_el = {'type': 'HDL', 'hdl_id': match.group('hdl_id'), 'misc_txt': el['misc_txt'][match.end():]} el['misc_txt'] = el['misc_txt'][0:match.start()] citation_elements.insert(citation_elements.index(el) + 1, hdl_el)
python
def look_for_hdl(citation_elements): """Looks for handle identifiers in the misc txt of the citation elements When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process """ for el in list(citation_elements): matched_hdl = re_hdl.finditer(el['misc_txt']) for match in reversed(list(matched_hdl)): hdl_el = {'type': 'HDL', 'hdl_id': match.group('hdl_id'), 'misc_txt': el['misc_txt'][match.end():]} el['misc_txt'] = el['misc_txt'][0:match.start()] citation_elements.insert(citation_elements.index(el) + 1, hdl_el)
[ "def", "look_for_hdl", "(", "citation_elements", ")", ":", "for", "el", "in", "list", "(", "citation_elements", ")", ":", "matched_hdl", "=", "re_hdl", ".", "finditer", "(", "el", "[", "'misc_txt'", "]", ")", "for", "match", "in", "reversed", "(", "list", "(", "matched_hdl", ")", ")", ":", "hdl_el", "=", "{", "'type'", ":", "'HDL'", ",", "'hdl_id'", ":", "match", ".", "group", "(", "'hdl_id'", ")", ",", "'misc_txt'", ":", "el", "[", "'misc_txt'", "]", "[", "match", ".", "end", "(", ")", ":", "]", "}", "el", "[", "'misc_txt'", "]", "=", "el", "[", "'misc_txt'", "]", "[", "0", ":", "match", ".", "start", "(", ")", "]", "citation_elements", ".", "insert", "(", "citation_elements", ".", "index", "(", "el", ")", "+", "1", ",", "hdl_el", ")" ]
Looks for handle identifiers in the misc txt of the citation elements When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process
[ "Looks", "for", "handle", "identifiers", "in", "the", "misc", "txt", "of", "the", "citation", "elements" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L596-L609
3,726
inspirehep/refextract
refextract/references/engine.py
look_for_hdl_urls
def look_for_hdl_urls(citation_elements): """Looks for handle identifiers that have already been identified as urls When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process """ for el in citation_elements: if el['type'] == 'URL': match = re_hdl.match(el['url_string']) if match: el['type'] = 'HDL' el['hdl_id'] = match.group('hdl_id') del el['url_desc'] del el['url_string']
python
def look_for_hdl_urls(citation_elements): """Looks for handle identifiers that have already been identified as urls When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process """ for el in citation_elements: if el['type'] == 'URL': match = re_hdl.match(el['url_string']) if match: el['type'] = 'HDL' el['hdl_id'] = match.group('hdl_id') del el['url_desc'] del el['url_string']
[ "def", "look_for_hdl_urls", "(", "citation_elements", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'URL'", ":", "match", "=", "re_hdl", ".", "match", "(", "el", "[", "'url_string'", "]", ")", "if", "match", ":", "el", "[", "'type'", "]", "=", "'HDL'", "el", "[", "'hdl_id'", "]", "=", "match", ".", "group", "(", "'hdl_id'", ")", "del", "el", "[", "'url_desc'", "]", "del", "el", "[", "'url_string'", "]" ]
Looks for handle identifiers that have already been identified as urls When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process
[ "Looks", "for", "handle", "identifiers", "that", "have", "already", "been", "identified", "as", "urls" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L612-L625
3,727
inspirehep/refextract
refextract/references/engine.py
parse_reference_line
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None): """Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects) """ # Strip the 'marker' (e.g. [1]) from this reference line: line_marker, ref_line = remove_reference_line_marker(ref_line) # Find DOI sections in citation ref_line, identified_dois = identify_and_tag_DOI(ref_line) # Identify and replace URLs in the line: ref_line, identified_urls = identify_and_tag_URLs(ref_line) # Tag <cds.JOURNAL>, etc. tagged_line, bad_titles_count = tag_reference_line(ref_line, kbs, bad_titles_count) # Debug print tagging (authors, titles, volumes, etc.) LOGGER.debug("tags %r", tagged_line) # Using the recorded information, create a MARC XML representation # of the rebuilt line: # At the same time, get stats of citations found in the reference line # (titles, urls, etc): citation_elements, line_marker, counts = \ parse_tagged_reference_line(line_marker, tagged_line, identified_dois, identified_urls) # Transformations on elements split_volume_from_journal(citation_elements) format_volume(citation_elements) handle_special_journals(citation_elements, kbs) format_report_number(citation_elements) format_author_ed(citation_elements) look_for_books(citation_elements, kbs) format_hep(citation_elements) remove_b_for_nucl_phys(citation_elements) mangle_volume(citation_elements) arxiv_urls_to_report_numbers(citation_elements) look_for_hdl(citation_elements) look_for_hdl_urls(citation_elements) # Link references if desired if linker_callback: associate_recids(citation_elements, linker_callback) # Split the reference in multiple ones if needed splitted_citations = split_citations(citation_elements) # Look for implied ibids look_for_implied_ibids(splitted_citations) # Find year add_year_elements(splitted_citations) # Look for books in misc field look_for_undetected_books(splitted_citations, kbs) if linker_callback: # Link references with the newly added ibids/books information for citations in splitted_citations: associate_recids(citations, linker_callback) # FIXME: Needed? # Remove references with only misc text # splitted_citations = remove_invalid_references(splitted_citations) # Merge references with only misc text # splitted_citations = merge_invalid_references(splitted_citations) remove_duplicated_authors(splitted_citations) remove_duplicated_dois(splitted_citations) remove_duplicated_collaborations(splitted_citations) add_recid_elements(splitted_citations) # For debugging purposes print_citations(splitted_citations, line_marker) return splitted_citations, line_marker, counts, bad_titles_count
python
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None): """Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects) """ # Strip the 'marker' (e.g. [1]) from this reference line: line_marker, ref_line = remove_reference_line_marker(ref_line) # Find DOI sections in citation ref_line, identified_dois = identify_and_tag_DOI(ref_line) # Identify and replace URLs in the line: ref_line, identified_urls = identify_and_tag_URLs(ref_line) # Tag <cds.JOURNAL>, etc. tagged_line, bad_titles_count = tag_reference_line(ref_line, kbs, bad_titles_count) # Debug print tagging (authors, titles, volumes, etc.) LOGGER.debug("tags %r", tagged_line) # Using the recorded information, create a MARC XML representation # of the rebuilt line: # At the same time, get stats of citations found in the reference line # (titles, urls, etc): citation_elements, line_marker, counts = \ parse_tagged_reference_line(line_marker, tagged_line, identified_dois, identified_urls) # Transformations on elements split_volume_from_journal(citation_elements) format_volume(citation_elements) handle_special_journals(citation_elements, kbs) format_report_number(citation_elements) format_author_ed(citation_elements) look_for_books(citation_elements, kbs) format_hep(citation_elements) remove_b_for_nucl_phys(citation_elements) mangle_volume(citation_elements) arxiv_urls_to_report_numbers(citation_elements) look_for_hdl(citation_elements) look_for_hdl_urls(citation_elements) # Link references if desired if linker_callback: associate_recids(citation_elements, linker_callback) # Split the reference in multiple ones if needed splitted_citations = split_citations(citation_elements) # Look for implied ibids look_for_implied_ibids(splitted_citations) # Find year add_year_elements(splitted_citations) # Look for books in misc field look_for_undetected_books(splitted_citations, kbs) if linker_callback: # Link references with the newly added ibids/books information for citations in splitted_citations: associate_recids(citations, linker_callback) # FIXME: Needed? # Remove references with only misc text # splitted_citations = remove_invalid_references(splitted_citations) # Merge references with only misc text # splitted_citations = merge_invalid_references(splitted_citations) remove_duplicated_authors(splitted_citations) remove_duplicated_dois(splitted_citations) remove_duplicated_collaborations(splitted_citations) add_recid_elements(splitted_citations) # For debugging purposes print_citations(splitted_citations, line_marker) return splitted_citations, line_marker, counts, bad_titles_count
[ "def", "parse_reference_line", "(", "ref_line", ",", "kbs", ",", "bad_titles_count", "=", "{", "}", ",", "linker_callback", "=", "None", ")", ":", "# Strip the 'marker' (e.g. [1]) from this reference line:", "line_marker", ",", "ref_line", "=", "remove_reference_line_marker", "(", "ref_line", ")", "# Find DOI sections in citation", "ref_line", ",", "identified_dois", "=", "identify_and_tag_DOI", "(", "ref_line", ")", "# Identify and replace URLs in the line:", "ref_line", ",", "identified_urls", "=", "identify_and_tag_URLs", "(", "ref_line", ")", "# Tag <cds.JOURNAL>, etc.", "tagged_line", ",", "bad_titles_count", "=", "tag_reference_line", "(", "ref_line", ",", "kbs", ",", "bad_titles_count", ")", "# Debug print tagging (authors, titles, volumes, etc.)", "LOGGER", ".", "debug", "(", "\"tags %r\"", ",", "tagged_line", ")", "# Using the recorded information, create a MARC XML representation", "# of the rebuilt line:", "# At the same time, get stats of citations found in the reference line", "# (titles, urls, etc):", "citation_elements", ",", "line_marker", ",", "counts", "=", "parse_tagged_reference_line", "(", "line_marker", ",", "tagged_line", ",", "identified_dois", ",", "identified_urls", ")", "# Transformations on elements", "split_volume_from_journal", "(", "citation_elements", ")", "format_volume", "(", "citation_elements", ")", "handle_special_journals", "(", "citation_elements", ",", "kbs", ")", "format_report_number", "(", "citation_elements", ")", "format_author_ed", "(", "citation_elements", ")", "look_for_books", "(", "citation_elements", ",", "kbs", ")", "format_hep", "(", "citation_elements", ")", "remove_b_for_nucl_phys", "(", "citation_elements", ")", "mangle_volume", "(", "citation_elements", ")", "arxiv_urls_to_report_numbers", "(", "citation_elements", ")", "look_for_hdl", "(", "citation_elements", ")", "look_for_hdl_urls", "(", "citation_elements", ")", "# Link references if desired", "if", "linker_callback", ":", "associate_recids", "(", "citation_elements", ",", "linker_callback", ")", "# Split the reference in multiple ones if needed", "splitted_citations", "=", "split_citations", "(", "citation_elements", ")", "# Look for implied ibids", "look_for_implied_ibids", "(", "splitted_citations", ")", "# Find year", "add_year_elements", "(", "splitted_citations", ")", "# Look for books in misc field", "look_for_undetected_books", "(", "splitted_citations", ",", "kbs", ")", "if", "linker_callback", ":", "# Link references with the newly added ibids/books information", "for", "citations", "in", "splitted_citations", ":", "associate_recids", "(", "citations", ",", "linker_callback", ")", "# FIXME: Needed?", "# Remove references with only misc text", "# splitted_citations = remove_invalid_references(splitted_citations)", "# Merge references with only misc text", "# splitted_citations = merge_invalid_references(splitted_citations)", "remove_duplicated_authors", "(", "splitted_citations", ")", "remove_duplicated_dois", "(", "splitted_citations", ")", "remove_duplicated_collaborations", "(", "splitted_citations", ")", "add_recid_elements", "(", "splitted_citations", ")", "# For debugging purposes", "print_citations", "(", "splitted_citations", ",", "line_marker", ")", "return", "splitted_citations", ",", "line_marker", ",", "counts", ",", "bad_titles_count" ]
Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects)
[ "Parse", "one", "reference", "line" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L639-L716
3,728
inspirehep/refextract
refextract/references/engine.py
search_for_book_in_misc
def search_for_book_in_misc(citation, kbs): """Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc. """ citation_year = year_from_citation(citation) for citation_element in citation: LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt']) for title in kbs['books']: startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title) if startIndex != -1: line = kbs['books'][title.upper()] book_year = line[2].strip(';') book_authors = line[0] book_found = False if citation_year == book_year: # For now consider the citation as valid, we are using # an exact search, we don't need to check the authors # However, the code below will be useful if we decide # to introduce fuzzy matching. book_found = True for author in get_possible_author_names(citation): if find_substring_ignore_special_chars(book_authors, author) != -1: book_found = True for author in re.findall('[a-zA-Z]{4,}', book_authors): if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1: book_found = True if book_found: LOGGER.debug(u"Book found: %s", title) book_element = {'type': 'BOOK', 'misc_txt': '', 'authors': book_authors, 'title': line[1], 'year': book_year} citation.append(book_element) citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex) # Remove year from misc txt citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year) return True LOGGER.debug("Book not found!") return False
python
def search_for_book_in_misc(citation, kbs): """Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc. """ citation_year = year_from_citation(citation) for citation_element in citation: LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt']) for title in kbs['books']: startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title) if startIndex != -1: line = kbs['books'][title.upper()] book_year = line[2].strip(';') book_authors = line[0] book_found = False if citation_year == book_year: # For now consider the citation as valid, we are using # an exact search, we don't need to check the authors # However, the code below will be useful if we decide # to introduce fuzzy matching. book_found = True for author in get_possible_author_names(citation): if find_substring_ignore_special_chars(book_authors, author) != -1: book_found = True for author in re.findall('[a-zA-Z]{4,}', book_authors): if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1: book_found = True if book_found: LOGGER.debug(u"Book found: %s", title) book_element = {'type': 'BOOK', 'misc_txt': '', 'authors': book_authors, 'title': line[1], 'year': book_year} citation.append(book_element) citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex) # Remove year from misc txt citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year) return True LOGGER.debug("Book not found!") return False
[ "def", "search_for_book_in_misc", "(", "citation", ",", "kbs", ")", ":", "citation_year", "=", "year_from_citation", "(", "citation", ")", "for", "citation_element", "in", "citation", ":", "LOGGER", ".", "debug", "(", "u\"Searching for book title in: %s\"", ",", "citation_element", "[", "'misc_txt'", "]", ")", "for", "title", "in", "kbs", "[", "'books'", "]", ":", "startIndex", "=", "find_substring_ignore_special_chars", "(", "citation_element", "[", "'misc_txt'", "]", ",", "title", ")", "if", "startIndex", "!=", "-", "1", ":", "line", "=", "kbs", "[", "'books'", "]", "[", "title", ".", "upper", "(", ")", "]", "book_year", "=", "line", "[", "2", "]", ".", "strip", "(", "';'", ")", "book_authors", "=", "line", "[", "0", "]", "book_found", "=", "False", "if", "citation_year", "==", "book_year", ":", "# For now consider the citation as valid, we are using", "# an exact search, we don't need to check the authors", "# However, the code below will be useful if we decide", "# to introduce fuzzy matching.", "book_found", "=", "True", "for", "author", "in", "get_possible_author_names", "(", "citation", ")", ":", "if", "find_substring_ignore_special_chars", "(", "book_authors", ",", "author", ")", "!=", "-", "1", ":", "book_found", "=", "True", "for", "author", "in", "re", ".", "findall", "(", "'[a-zA-Z]{4,}'", ",", "book_authors", ")", ":", "if", "find_substring_ignore_special_chars", "(", "citation_element", "[", "'misc_txt'", "]", ",", "author", ")", "!=", "-", "1", ":", "book_found", "=", "True", "if", "book_found", ":", "LOGGER", ".", "debug", "(", "u\"Book found: %s\"", ",", "title", ")", "book_element", "=", "{", "'type'", ":", "'BOOK'", ",", "'misc_txt'", ":", "''", ",", "'authors'", ":", "book_authors", ",", "'title'", ":", "line", "[", "1", "]", ",", "'year'", ":", "book_year", "}", "citation", ".", "append", "(", "book_element", ")", "citation_element", "[", "'misc_txt'", "]", "=", "cut_substring_with_special_chars", "(", "citation_element", "[", "'misc_txt'", "]", ",", "title", ",", "startIndex", ")", "# Remove year from misc txt", "citation_element", "[", "'misc_txt'", "]", "=", "remove_year", "(", "citation_element", "[", "'misc_txt'", "]", ",", "book_year", ")", "return", "True", "LOGGER", ".", "debug", "(", "\"Book not found!\"", ")", "return", "False" ]
Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc.
[ "Searches", "for", "books", "in", "the", "misc_txt", "field", "if", "the", "citation", "is", "not", "recognized", "as", "anything", "like", "a", "journal", "book", "etc", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L736-L779
3,729
inspirehep/refextract
refextract/references/engine.py
map_tag_to_subfield
def map_tag_to_subfield(tag_type, line, cur_misc_txt, dest): """Create a new reference element""" closing_tag = '</cds.%s>' % tag_type # extract the institutional report-number from the line: idx_closing_tag = line.find(closing_tag) # Sanity check - did we find a closing tag? if idx_closing_tag == -1: # no closing </cds.TAG> tag found - strip the opening tag and move past this # recognised reportnumber as it is unreliable: identified_citation_element = None line = line[len('<cds.%s>' % tag_type):] else: tag_content = line[:idx_closing_tag] identified_citation_element = {'type': tag_type, 'misc_txt': cur_misc_txt, dest: tag_content} ending_tag_pos = idx_closing_tag + len(closing_tag) line = line[ending_tag_pos:] cur_misc_txt = u"" return identified_citation_element, line, cur_misc_txt
python
def map_tag_to_subfield(tag_type, line, cur_misc_txt, dest): """Create a new reference element""" closing_tag = '</cds.%s>' % tag_type # extract the institutional report-number from the line: idx_closing_tag = line.find(closing_tag) # Sanity check - did we find a closing tag? if idx_closing_tag == -1: # no closing </cds.TAG> tag found - strip the opening tag and move past this # recognised reportnumber as it is unreliable: identified_citation_element = None line = line[len('<cds.%s>' % tag_type):] else: tag_content = line[:idx_closing_tag] identified_citation_element = {'type': tag_type, 'misc_txt': cur_misc_txt, dest: tag_content} ending_tag_pos = idx_closing_tag + len(closing_tag) line = line[ending_tag_pos:] cur_misc_txt = u"" return identified_citation_element, line, cur_misc_txt
[ "def", "map_tag_to_subfield", "(", "tag_type", ",", "line", ",", "cur_misc_txt", ",", "dest", ")", ":", "closing_tag", "=", "'</cds.%s>'", "%", "tag_type", "# extract the institutional report-number from the line:", "idx_closing_tag", "=", "line", ".", "find", "(", "closing_tag", ")", "# Sanity check - did we find a closing tag?", "if", "idx_closing_tag", "==", "-", "1", ":", "# no closing </cds.TAG> tag found - strip the opening tag and move past this", "# recognised reportnumber as it is unreliable:", "identified_citation_element", "=", "None", "line", "=", "line", "[", "len", "(", "'<cds.%s>'", "%", "tag_type", ")", ":", "]", "else", ":", "tag_content", "=", "line", "[", ":", "idx_closing_tag", "]", "identified_citation_element", "=", "{", "'type'", ":", "tag_type", ",", "'misc_txt'", ":", "cur_misc_txt", ",", "dest", ":", "tag_content", "}", "ending_tag_pos", "=", "idx_closing_tag", "+", "len", "(", "closing_tag", ")", "line", "=", "line", "[", "ending_tag_pos", ":", "]", "cur_misc_txt", "=", "u\"\"", "return", "identified_citation_element", ",", "line", ",", "cur_misc_txt" ]
Create a new reference element
[ "Create", "a", "new", "reference", "element" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1292-L1312
3,730
inspirehep/refextract
refextract/references/engine.py
remove_leading_garbage_lines_from_reference_section
def remove_leading_garbage_lines_from_reference_section(ref_sectn): """Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses. """ p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE) while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])): ref_sectn.pop(0) return ref_sectn
python
def remove_leading_garbage_lines_from_reference_section(ref_sectn): """Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses. """ p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE) while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])): ref_sectn.pop(0) return ref_sectn
[ "def", "remove_leading_garbage_lines_from_reference_section", "(", "ref_sectn", ")", ":", "p_email", "=", "re", ".", "compile", "(", "ur'^\\s*e\\-?mail'", ",", "re", ".", "UNICODE", ")", "while", "ref_sectn", "and", "(", "ref_sectn", "[", "0", "]", ".", "isspace", "(", ")", "or", "p_email", ".", "match", "(", "ref_sectn", "[", "0", "]", ")", ")", ":", "ref_sectn", ".", "pop", "(", "0", ")", "return", "ref_sectn" ]
Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses.
[ "Sometimes", "the", "first", "lines", "of", "the", "extracted", "references", "are", "completely", "blank", "or", "email", "addresses", ".", "These", "must", "be", "removed", "as", "they", "are", "not", "references", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1364-L1375
3,731
inspirehep/refextract
refextract/references/engine.py
get_plaintext_document_body
def get_plaintext_document_body(fpath, keep_layout=False): """Given a file-path to a full-text, return a list of unicode strings whereby each string is a line of the fulltext. In the case of a plain-text document, this simply means reading the contents in from the file. In the case of a PDF however, this means converting the document to plaintext. It raises UnknownDocumentTypeError if the document is not a PDF or plain text. @param fpath: (string) - the path to the fulltext file @return: (list) of strings - each string being a line in the document. """ textbody = [] mime_type = magic.from_file(fpath, mime=True) if mime_type == "text/plain": with open(fpath, "r") as f: textbody = [line.decode("utf-8") for line in f.readlines()] elif mime_type == "application/pdf": textbody = convert_PDF_to_plaintext(fpath, keep_layout) else: raise UnknownDocumentTypeError(mime_type) return textbody
python
def get_plaintext_document_body(fpath, keep_layout=False): """Given a file-path to a full-text, return a list of unicode strings whereby each string is a line of the fulltext. In the case of a plain-text document, this simply means reading the contents in from the file. In the case of a PDF however, this means converting the document to plaintext. It raises UnknownDocumentTypeError if the document is not a PDF or plain text. @param fpath: (string) - the path to the fulltext file @return: (list) of strings - each string being a line in the document. """ textbody = [] mime_type = magic.from_file(fpath, mime=True) if mime_type == "text/plain": with open(fpath, "r") as f: textbody = [line.decode("utf-8") for line in f.readlines()] elif mime_type == "application/pdf": textbody = convert_PDF_to_plaintext(fpath, keep_layout) else: raise UnknownDocumentTypeError(mime_type) return textbody
[ "def", "get_plaintext_document_body", "(", "fpath", ",", "keep_layout", "=", "False", ")", ":", "textbody", "=", "[", "]", "mime_type", "=", "magic", ".", "from_file", "(", "fpath", ",", "mime", "=", "True", ")", "if", "mime_type", "==", "\"text/plain\"", ":", "with", "open", "(", "fpath", ",", "\"r\"", ")", "as", "f", ":", "textbody", "=", "[", "line", ".", "decode", "(", "\"utf-8\"", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", "]", "elif", "mime_type", "==", "\"application/pdf\"", ":", "textbody", "=", "convert_PDF_to_plaintext", "(", "fpath", ",", "keep_layout", ")", "else", ":", "raise", "UnknownDocumentTypeError", "(", "mime_type", ")", "return", "textbody" ]
Given a file-path to a full-text, return a list of unicode strings whereby each string is a line of the fulltext. In the case of a plain-text document, this simply means reading the contents in from the file. In the case of a PDF however, this means converting the document to plaintext. It raises UnknownDocumentTypeError if the document is not a PDF or plain text. @param fpath: (string) - the path to the fulltext file @return: (list) of strings - each string being a line in the document.
[ "Given", "a", "file", "-", "path", "to", "a", "full", "-", "text", "return", "a", "list", "of", "unicode", "strings", "whereby", "each", "string", "is", "a", "line", "of", "the", "fulltext", ".", "In", "the", "case", "of", "a", "plain", "-", "text", "document", "this", "simply", "means", "reading", "the", "contents", "in", "from", "the", "file", ".", "In", "the", "case", "of", "a", "PDF", "however", "this", "means", "converting", "the", "document", "to", "plaintext", ".", "It", "raises", "UnknownDocumentTypeError", "if", "the", "document", "is", "not", "a", "PDF", "or", "plain", "text", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1384-L1408
3,732
inspirehep/refextract
refextract/references/engine.py
parse_references
def parse_references(reference_lines, recid=None, override_kbs_files=None, reference_format=u"{title} {volume} ({year}) {page}", linker_callback=None): """Parse a list of references Given a list of raw reference lines (list of strings), output a list of dictionaries containing the parsed references """ # RefExtract knowledge bases kbs = get_kbs(custom_kbs_files=override_kbs_files) # Identify journal titles, report numbers, URLs, DOIs, and authors... processed_references, counts, dummy_bad_titles_count = \ parse_references_elements(reference_lines, kbs, linker_callback) return (build_references(processed_references, reference_format), build_stats(counts))
python
def parse_references(reference_lines, recid=None, override_kbs_files=None, reference_format=u"{title} {volume} ({year}) {page}", linker_callback=None): """Parse a list of references Given a list of raw reference lines (list of strings), output a list of dictionaries containing the parsed references """ # RefExtract knowledge bases kbs = get_kbs(custom_kbs_files=override_kbs_files) # Identify journal titles, report numbers, URLs, DOIs, and authors... processed_references, counts, dummy_bad_titles_count = \ parse_references_elements(reference_lines, kbs, linker_callback) return (build_references(processed_references, reference_format), build_stats(counts))
[ "def", "parse_references", "(", "reference_lines", ",", "recid", "=", "None", ",", "override_kbs_files", "=", "None", ",", "reference_format", "=", "u\"{title} {volume} ({year}) {page}\"", ",", "linker_callback", "=", "None", ")", ":", "# RefExtract knowledge bases", "kbs", "=", "get_kbs", "(", "custom_kbs_files", "=", "override_kbs_files", ")", "# Identify journal titles, report numbers, URLs, DOIs, and authors...", "processed_references", ",", "counts", ",", "dummy_bad_titles_count", "=", "parse_references_elements", "(", "reference_lines", ",", "kbs", ",", "linker_callback", ")", "return", "(", "build_references", "(", "processed_references", ",", "reference_format", ")", ",", "build_stats", "(", "counts", ")", ")" ]
Parse a list of references Given a list of raw reference lines (list of strings), output a list of dictionaries containing the parsed references
[ "Parse", "a", "list", "of", "references" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1411-L1428
3,733
inspirehep/refextract
refextract/references/engine.py
build_stats
def build_stats(counts): """Return stats information from counts structure.""" stats = { 'status': 0, 'reportnum': counts['reportnum'], 'title': counts['title'], 'author': counts['auth_group'], 'url': counts['url'], 'doi': counts['doi'], 'misc': counts['misc'], } stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats stats["old_stats_str"] = stats_str stats["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") stats["version"] = version return stats
python
def build_stats(counts): """Return stats information from counts structure.""" stats = { 'status': 0, 'reportnum': counts['reportnum'], 'title': counts['title'], 'author': counts['auth_group'], 'url': counts['url'], 'doi': counts['doi'], 'misc': counts['misc'], } stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats stats["old_stats_str"] = stats_str stats["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") stats["version"] = version return stats
[ "def", "build_stats", "(", "counts", ")", ":", "stats", "=", "{", "'status'", ":", "0", ",", "'reportnum'", ":", "counts", "[", "'reportnum'", "]", ",", "'title'", ":", "counts", "[", "'title'", "]", ",", "'author'", ":", "counts", "[", "'auth_group'", "]", ",", "'url'", ":", "counts", "[", "'url'", "]", ",", "'doi'", ":", "counts", "[", "'doi'", "]", ",", "'misc'", ":", "counts", "[", "'misc'", "]", ",", "}", "stats_str", "=", "\"%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s\"", "%", "stats", "stats", "[", "\"old_stats_str\"", "]", "=", "stats_str", "stats", "[", "\"date\"", "]", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "stats", "[", "\"version\"", "]", "=", "version", "return", "stats" ]
Return stats information from counts structure.
[ "Return", "stats", "information", "from", "counts", "structure", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1431-L1446
3,734
inspirehep/refextract
refextract/documents/pdf.py
replace_undesirable_characters
def replace_undesirable_characters(line): """ Replace certain bad characters in a text line. @param line: (string) the text line in which bad characters are to be replaced. @return: (string) the text line after the bad characters have been replaced. """ # These are separate because we want a particular order for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS: line = line.replace(bad_string, replacement) for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS): line = line.replace(bad_char, replacement) return line
python
def replace_undesirable_characters(line): """ Replace certain bad characters in a text line. @param line: (string) the text line in which bad characters are to be replaced. @return: (string) the text line after the bad characters have been replaced. """ # These are separate because we want a particular order for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS: line = line.replace(bad_string, replacement) for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS): line = line.replace(bad_char, replacement) return line
[ "def", "replace_undesirable_characters", "(", "line", ")", ":", "# These are separate because we want a particular order", "for", "bad_string", ",", "replacement", "in", "UNDESIRABLE_STRING_REPLACEMENTS", ":", "line", "=", "line", ".", "replace", "(", "bad_string", ",", "replacement", ")", "for", "bad_char", ",", "replacement", "in", "iteritems", "(", "UNDESIRABLE_CHAR_REPLACEMENTS", ")", ":", "line", "=", "line", ".", "replace", "(", "bad_char", ",", "replacement", ")", "return", "line" ]
Replace certain bad characters in a text line. @param line: (string) the text line in which bad characters are to be replaced. @return: (string) the text line after the bad characters have been replaced.
[ "Replace", "certain", "bad", "characters", "in", "a", "text", "line", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/pdf.py#L434-L449
3,735
inspirehep/refextract
refextract/documents/pdf.py
convert_PDF_to_plaintext
def convert_PDF_to_plaintext(fpath, keep_layout=False): """ Convert PDF to txt using pdftotext Take the path to a PDF file and run pdftotext for this file, capturing the output. @param fpath: (string) path to the PDF file @return: (list) of unicode strings (contents of the PDF file translated into plaintext; each string is a line in the document.) """ if not os.path.isfile(CFG_PATH_PDFTOTEXT): raise IOError('Missing pdftotext executable') if keep_layout: layout_option = "-layout" else: layout_option = "-raw" doclines = [] # Pattern to check for lines with a leading page-break character. # If this pattern is matched, we want to split the page-break into # its own line because we rely upon this for trying to strip headers # and footers, and for some other pattern matching. p_break_in_line = re.compile(ur'^\s*\f(.+)$', re.UNICODE) # build pdftotext command: cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, "-q", "-enc", "UTF-8", fpath, "-"] LOGGER.debug(u"%s", ' '.join(cmd_pdftotext)) # open pipe to pdftotext: pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE) # read back results: for docline in pipe_pdftotext.stdout: unicodeline = docline.decode("utf-8") # Check for a page-break in this line: m_break_in_line = p_break_in_line.match(unicodeline) if m_break_in_line is None: # There was no page-break in this line. Just add the line: doclines.append(unicodeline) else: # If there was a page-break character in the same line as some # text, split it out into its own line so that we can later # try to find headers and footers: doclines.append(u"\f") doclines.append(m_break_in_line.group(1)) LOGGER.debug(u"convert_PDF_to_plaintext found: %s lines of text", len(doclines)) return doclines
python
def convert_PDF_to_plaintext(fpath, keep_layout=False): """ Convert PDF to txt using pdftotext Take the path to a PDF file and run pdftotext for this file, capturing the output. @param fpath: (string) path to the PDF file @return: (list) of unicode strings (contents of the PDF file translated into plaintext; each string is a line in the document.) """ if not os.path.isfile(CFG_PATH_PDFTOTEXT): raise IOError('Missing pdftotext executable') if keep_layout: layout_option = "-layout" else: layout_option = "-raw" doclines = [] # Pattern to check for lines with a leading page-break character. # If this pattern is matched, we want to split the page-break into # its own line because we rely upon this for trying to strip headers # and footers, and for some other pattern matching. p_break_in_line = re.compile(ur'^\s*\f(.+)$', re.UNICODE) # build pdftotext command: cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, "-q", "-enc", "UTF-8", fpath, "-"] LOGGER.debug(u"%s", ' '.join(cmd_pdftotext)) # open pipe to pdftotext: pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE) # read back results: for docline in pipe_pdftotext.stdout: unicodeline = docline.decode("utf-8") # Check for a page-break in this line: m_break_in_line = p_break_in_line.match(unicodeline) if m_break_in_line is None: # There was no page-break in this line. Just add the line: doclines.append(unicodeline) else: # If there was a page-break character in the same line as some # text, split it out into its own line so that we can later # try to find headers and footers: doclines.append(u"\f") doclines.append(m_break_in_line.group(1)) LOGGER.debug(u"convert_PDF_to_plaintext found: %s lines of text", len(doclines)) return doclines
[ "def", "convert_PDF_to_plaintext", "(", "fpath", ",", "keep_layout", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "CFG_PATH_PDFTOTEXT", ")", ":", "raise", "IOError", "(", "'Missing pdftotext executable'", ")", "if", "keep_layout", ":", "layout_option", "=", "\"-layout\"", "else", ":", "layout_option", "=", "\"-raw\"", "doclines", "=", "[", "]", "# Pattern to check for lines with a leading page-break character.", "# If this pattern is matched, we want to split the page-break into", "# its own line because we rely upon this for trying to strip headers", "# and footers, and for some other pattern matching.", "p_break_in_line", "=", "re", ".", "compile", "(", "ur'^\\s*\\f(.+)$'", ",", "re", ".", "UNICODE", ")", "# build pdftotext command:", "cmd_pdftotext", "=", "[", "CFG_PATH_PDFTOTEXT", ",", "layout_option", ",", "\"-q\"", ",", "\"-enc\"", ",", "\"UTF-8\"", ",", "fpath", ",", "\"-\"", "]", "LOGGER", ".", "debug", "(", "u\"%s\"", ",", "' '", ".", "join", "(", "cmd_pdftotext", ")", ")", "# open pipe to pdftotext:", "pipe_pdftotext", "=", "subprocess", ".", "Popen", "(", "cmd_pdftotext", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "# read back results:", "for", "docline", "in", "pipe_pdftotext", ".", "stdout", ":", "unicodeline", "=", "docline", ".", "decode", "(", "\"utf-8\"", ")", "# Check for a page-break in this line:", "m_break_in_line", "=", "p_break_in_line", ".", "match", "(", "unicodeline", ")", "if", "m_break_in_line", "is", "None", ":", "# There was no page-break in this line. Just add the line:", "doclines", ".", "append", "(", "unicodeline", ")", "else", ":", "# If there was a page-break character in the same line as some", "# text, split it out into its own line so that we can later", "# try to find headers and footers:", "doclines", ".", "append", "(", "u\"\\f\"", ")", "doclines", ".", "append", "(", "m_break_in_line", ".", "group", "(", "1", ")", ")", "LOGGER", ".", "debug", "(", "u\"convert_PDF_to_plaintext found: %s lines of text\"", ",", "len", "(", "doclines", ")", ")", "return", "doclines" ]
Convert PDF to txt using pdftotext Take the path to a PDF file and run pdftotext for this file, capturing the output. @param fpath: (string) path to the PDF file @return: (list) of unicode strings (contents of the PDF file translated into plaintext; each string is a line in the document.)
[ "Convert", "PDF", "to", "txt", "using", "pdftotext" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/pdf.py#L452-L499
3,736
inspirehep/refextract
refextract/authors/regexs.py
get_author_affiliation_numeration_str
def get_author_affiliation_numeration_str(punct=None): """The numeration which can be applied to author names. Numeration is sometimes found next to authors of papers. @return: (string), which can be compiled into a regex; identifies numeration next to an author name. """ # FIXME cater for start or end numeration (ie two puncs) # Number to look for, either general or specific re_number = r'(?:\d\d?)' re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number # Punctuation surrounding the number, either general or specific again if punct is None: re_punct = r"(?:[\{\(\[]?)" else: re_punct = re.escape(punct) # Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!) numeration_str = r""" (?:\s*(%(punct)s)\s* ## Left numeration punctuation (%(num)s\s* ## Core numeration item, either specific or generic %(num_chain)s ## Extra numeration, either generic or empty ) (?:(%(punct)s)) ## Right numeration punctuation )""" % {'num': re_number, 'num_chain': re_chained_numbers, 'punct': re_punct} return numeration_str
python
def get_author_affiliation_numeration_str(punct=None): """The numeration which can be applied to author names. Numeration is sometimes found next to authors of papers. @return: (string), which can be compiled into a regex; identifies numeration next to an author name. """ # FIXME cater for start or end numeration (ie two puncs) # Number to look for, either general or specific re_number = r'(?:\d\d?)' re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number # Punctuation surrounding the number, either general or specific again if punct is None: re_punct = r"(?:[\{\(\[]?)" else: re_punct = re.escape(punct) # Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!) numeration_str = r""" (?:\s*(%(punct)s)\s* ## Left numeration punctuation (%(num)s\s* ## Core numeration item, either specific or generic %(num_chain)s ## Extra numeration, either generic or empty ) (?:(%(punct)s)) ## Right numeration punctuation )""" % {'num': re_number, 'num_chain': re_chained_numbers, 'punct': re_punct} return numeration_str
[ "def", "get_author_affiliation_numeration_str", "(", "punct", "=", "None", ")", ":", "# FIXME cater for start or end numeration (ie two puncs)", "# Number to look for, either general or specific", "re_number", "=", "r'(?:\\d\\d?)'", "re_chained_numbers", "=", "r\"(?:(?:[,;]\\s*%s\\.?\\s*))*\"", "%", "re_number", "# Punctuation surrounding the number, either general or specific again", "if", "punct", "is", "None", ":", "re_punct", "=", "r\"(?:[\\{\\(\\[]?)\"", "else", ":", "re_punct", "=", "re", ".", "escape", "(", "punct", ")", "# Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!)", "numeration_str", "=", "r\"\"\"\n (?:\\s*(%(punct)s)\\s* ## Left numeration punctuation\n (%(num)s\\s* ## Core numeration item, either specific or generic\n %(num_chain)s ## Extra numeration, either generic or empty\n )\n (?:(%(punct)s)) ## Right numeration punctuation\n )\"\"\"", "%", "{", "'num'", ":", "re_number", ",", "'num_chain'", ":", "re_chained_numbers", ",", "'punct'", ":", "re_punct", "}", "return", "numeration_str" ]
The numeration which can be applied to author names. Numeration is sometimes found next to authors of papers. @return: (string), which can be compiled into a regex; identifies numeration next to an author name.
[ "The", "numeration", "which", "can", "be", "applied", "to", "author", "names", ".", "Numeration", "is", "sometimes", "found", "next", "to", "authors", "of", "papers", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L36-L64
3,737
inspirehep/refextract
refextract/authors/regexs.py
make_auth_regex_str
def make_auth_regex_str(etal, initial_surname_author=None, surname_initial_author=None): """ Returns a regular expression to be used to identify groups of author names in a citation. This method contains patterns for default authors, so no arguments are needed for the most reliable form of matching. The returned author pattern is capable of: 1. Identifying single authors, with at least one initial, of the form: 'Initial. [surname prefix...] Surname' 2. Identifying multiple authors, each with at least one initial, of the form: 'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]' ***(Note that a full stop, hyphen or apostrophe after each initial is absolutely vital in identifying authors for both of these above methods. Initials must also be uppercase.)*** 3. Capture 'et al' statements at the end of author groups (allows for authors with et al to be processed differently from 'standard' authors) 4. Identifying a single author surname name positioned before the phrase 'et al', with no initials: 'Surname et al' 5. Identifying two author surname name positioned before the phrase 'et al', with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al' 6. Identifying authors of the form: 'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose to represent the most important cited author (in a list of authors) by listing first their surname, and then their initials. Since this form has little distinguishing characteristics which could be used to create a reliable a pattern, at least one standard author must be present after it in order to improve the accuracy. 7. Capture editor notation, of which can take many forms e.g. 'eds. editors. edited by. etc.'. Authors captured in this way can be treated as 'editor groups', and hence processed differently if needed from standard authors @param etal: (string) The regular expression used to identify 'etal' notation @param author: (string) An optional argument, which replaces the default author regex used to identify author groups (initials, surnames... etc) @return: (string) The full author group identification regex, which will: - detect groups of authors in a range of formats, e.g.: C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al., - detect whether the author group has been marked up as editors of the doc. (therefore they will NOT be marked up as authors) e.g.: ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards | L. Kelloggs (editors) | M. Jackson (eds.) | ... -detect a maximum of two surnames only if the surname(s) is followed by 'et al' (must be separated by 'and' if there are two), e.g.: Amaldi et al., | Hayward and Yellow et al., """ if not initial_surname_author: # Standard author, with a maximum of 6 initials, and a surname. # The Initials MUST be uppercase, and MUST have at least a dot, hypen # or apostrophe between them. initial_surname_author = get_initial_surname_author_pattern() if not surname_initial_author: # The author name of the form: 'surname initial(s)' # This is sometimes the represention of the first author found inside an author group. # This author pattern is only used to find a maximum of ONE author inside an author group. # Authors of this form MUST have either a comma after the initials, or an 'and', # which denotes the presence of other authors in the author group. surname_initial_author = get_surname_initial_author_pattern() # Pattern used to locate a GROUP of author names in a reference # The format of an author can take many forms: # J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al., # (the use of 'et. al' is a giveaway that the preceeding # text was indeed an author name) # This will also match authors which seem to be labeled as editors (with the phrase 'ed.') # In which case, the author will be thrown away later on. # The regex returned has around 100 named groups already (max), so any new groups must be # started using '?:' return ur""" (?:^|\s+|\() ## Must be the start of the line, or a space (or an opening bracket in very few cases) (?P<es> ## Look for editor notation before the author (?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\.\s?)|(?:\.?\s))) ## 'eds?. ' | 'ed ' | 'ed.' |(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\.\s?)|(?:\.?\s))by(?:\s|([:,]\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: ' |(?:\(\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\.\s?)|(?:\.?\s))?\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)' )? ## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,) (?P<author_names> (?: (?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials [A-Z][^0-9_\.\s]{2,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## Surname (?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials (?P<multi_surs> (?:(?:[Aa][Nn][Dd]|\&)\s+) ## Maybe 'and' or '&' tied with another name [A-Z][^0-9_\.\s]{3,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## More surnames (?:[A-Z](?:[ -][A-Z])?\s+)? ## with initials )? (?: # Look for editor notation after the author group... \s*[,\s]?\s* # Eventually a coma/space %(ed)s )? (?P<et2> %(etal)s ## et al, MUST BE PRESENT however, for this author form ) (?: # Look for editor notation after the author group... \s*[,\s]?\s* # Eventually a coma/space %(ed)s )? ) | (?: ## **** (2) , The standard author form.. (e.g. J. Bloggs) ## This author form can either start with a normal 'initial surname' author, ## or it can begin with a single 'surname initial' author (?: ## The first author in the 'author group' %(i_s_author)s | (?P<sur_initial_auth>%(s_i_author)s) ) (?P<multi_auth> (?: ## Then 0 or more author names \s*[,\s]\s* (?: %(i_s_author)s | %(s_i_author)s ) )* (?: ## Maybe 'and' or '&' tied with another name (?: \s*[,\s]\s* ## handle "J. Dan, and H. Pon" (?:[Aa][Nn][DdsS]|\&) \s+ ) (?P<mult_auth_sub> %(i_s_author)s | %(s_i_author)s ) )? ) (?P<et> # 'et al' need not be present for either of \s*[,\s]\s* %(etal)s # 'initial surname' or 'surname initial' authors )? ) ) (?P<ee> \s*[,\s]\s* \(? (?:[Ee][Dd]s|[Ee]ditors)\.? \)? [\.\,]{0,2} )? # End of all author name patterns \)? # A possible closing bracket to finish the author group (?=[\s,.;:]) # Consolidate by checking we are not partially matching # something else """ % {'etal': etal, 'i_s_author': initial_surname_author, 's_i_author': surname_initial_author, 'ed': re_ed_notation}
python
def make_auth_regex_str(etal, initial_surname_author=None, surname_initial_author=None): """ Returns a regular expression to be used to identify groups of author names in a citation. This method contains patterns for default authors, so no arguments are needed for the most reliable form of matching. The returned author pattern is capable of: 1. Identifying single authors, with at least one initial, of the form: 'Initial. [surname prefix...] Surname' 2. Identifying multiple authors, each with at least one initial, of the form: 'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]' ***(Note that a full stop, hyphen or apostrophe after each initial is absolutely vital in identifying authors for both of these above methods. Initials must also be uppercase.)*** 3. Capture 'et al' statements at the end of author groups (allows for authors with et al to be processed differently from 'standard' authors) 4. Identifying a single author surname name positioned before the phrase 'et al', with no initials: 'Surname et al' 5. Identifying two author surname name positioned before the phrase 'et al', with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al' 6. Identifying authors of the form: 'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose to represent the most important cited author (in a list of authors) by listing first their surname, and then their initials. Since this form has little distinguishing characteristics which could be used to create a reliable a pattern, at least one standard author must be present after it in order to improve the accuracy. 7. Capture editor notation, of which can take many forms e.g. 'eds. editors. edited by. etc.'. Authors captured in this way can be treated as 'editor groups', and hence processed differently if needed from standard authors @param etal: (string) The regular expression used to identify 'etal' notation @param author: (string) An optional argument, which replaces the default author regex used to identify author groups (initials, surnames... etc) @return: (string) The full author group identification regex, which will: - detect groups of authors in a range of formats, e.g.: C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al., - detect whether the author group has been marked up as editors of the doc. (therefore they will NOT be marked up as authors) e.g.: ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards | L. Kelloggs (editors) | M. Jackson (eds.) | ... -detect a maximum of two surnames only if the surname(s) is followed by 'et al' (must be separated by 'and' if there are two), e.g.: Amaldi et al., | Hayward and Yellow et al., """ if not initial_surname_author: # Standard author, with a maximum of 6 initials, and a surname. # The Initials MUST be uppercase, and MUST have at least a dot, hypen # or apostrophe between them. initial_surname_author = get_initial_surname_author_pattern() if not surname_initial_author: # The author name of the form: 'surname initial(s)' # This is sometimes the represention of the first author found inside an author group. # This author pattern is only used to find a maximum of ONE author inside an author group. # Authors of this form MUST have either a comma after the initials, or an 'and', # which denotes the presence of other authors in the author group. surname_initial_author = get_surname_initial_author_pattern() # Pattern used to locate a GROUP of author names in a reference # The format of an author can take many forms: # J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al., # (the use of 'et. al' is a giveaway that the preceeding # text was indeed an author name) # This will also match authors which seem to be labeled as editors (with the phrase 'ed.') # In which case, the author will be thrown away later on. # The regex returned has around 100 named groups already (max), so any new groups must be # started using '?:' return ur""" (?:^|\s+|\() ## Must be the start of the line, or a space (or an opening bracket in very few cases) (?P<es> ## Look for editor notation before the author (?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\.\s?)|(?:\.?\s))) ## 'eds?. ' | 'ed ' | 'ed.' |(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\.\s?)|(?:\.?\s))by(?:\s|([:,]\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: ' |(?:\(\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\.\s?)|(?:\.?\s))?\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)' )? ## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,) (?P<author_names> (?: (?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials [A-Z][^0-9_\.\s]{2,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## Surname (?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials (?P<multi_surs> (?:(?:[Aa][Nn][Dd]|\&)\s+) ## Maybe 'and' or '&' tied with another name [A-Z][^0-9_\.\s]{3,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## More surnames (?:[A-Z](?:[ -][A-Z])?\s+)? ## with initials )? (?: # Look for editor notation after the author group... \s*[,\s]?\s* # Eventually a coma/space %(ed)s )? (?P<et2> %(etal)s ## et al, MUST BE PRESENT however, for this author form ) (?: # Look for editor notation after the author group... \s*[,\s]?\s* # Eventually a coma/space %(ed)s )? ) | (?: ## **** (2) , The standard author form.. (e.g. J. Bloggs) ## This author form can either start with a normal 'initial surname' author, ## or it can begin with a single 'surname initial' author (?: ## The first author in the 'author group' %(i_s_author)s | (?P<sur_initial_auth>%(s_i_author)s) ) (?P<multi_auth> (?: ## Then 0 or more author names \s*[,\s]\s* (?: %(i_s_author)s | %(s_i_author)s ) )* (?: ## Maybe 'and' or '&' tied with another name (?: \s*[,\s]\s* ## handle "J. Dan, and H. Pon" (?:[Aa][Nn][DdsS]|\&) \s+ ) (?P<mult_auth_sub> %(i_s_author)s | %(s_i_author)s ) )? ) (?P<et> # 'et al' need not be present for either of \s*[,\s]\s* %(etal)s # 'initial surname' or 'surname initial' authors )? ) ) (?P<ee> \s*[,\s]\s* \(? (?:[Ee][Dd]s|[Ee]ditors)\.? \)? [\.\,]{0,2} )? # End of all author name patterns \)? # A possible closing bracket to finish the author group (?=[\s,.;:]) # Consolidate by checking we are not partially matching # something else """ % {'etal': etal, 'i_s_author': initial_surname_author, 's_i_author': surname_initial_author, 'ed': re_ed_notation}
[ "def", "make_auth_regex_str", "(", "etal", ",", "initial_surname_author", "=", "None", ",", "surname_initial_author", "=", "None", ")", ":", "if", "not", "initial_surname_author", ":", "# Standard author, with a maximum of 6 initials, and a surname.", "# The Initials MUST be uppercase, and MUST have at least a dot, hypen", "# or apostrophe between them.", "initial_surname_author", "=", "get_initial_surname_author_pattern", "(", ")", "if", "not", "surname_initial_author", ":", "# The author name of the form: 'surname initial(s)'", "# This is sometimes the represention of the first author found inside an author group.", "# This author pattern is only used to find a maximum of ONE author inside an author group.", "# Authors of this form MUST have either a comma after the initials, or an 'and',", "# which denotes the presence of other authors in the author group.", "surname_initial_author", "=", "get_surname_initial_author_pattern", "(", ")", "# Pattern used to locate a GROUP of author names in a reference", "# The format of an author can take many forms:", "# J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al.,", "# (the use of 'et. al' is a giveaway that the preceeding", "# text was indeed an author name)", "# This will also match authors which seem to be labeled as editors (with the phrase 'ed.')", "# In which case, the author will be thrown away later on.", "# The regex returned has around 100 named groups already (max), so any new groups must be", "# started using '?:'", "return", "ur\"\"\"\n (?:^|\\s+|\\() ## Must be the start of the line, or a space (or an opening bracket in very few cases)\n (?P<es> ## Look for editor notation before the author\n (?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\\.\\s?)|(?:\\.?\\s))) ## 'eds?. ' | 'ed ' | 'ed.'\n |(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\\.\\s?)|(?:\\.?\\s))by(?:\\s|([:,]\\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: '\n |(?:\\(\\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\\.\\s?)|(?:\\.?\\s))?\\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)'\n )?\n\n ## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,)\n (?P<author_names>\n (?:\n (?:[A-Z](?:\\s*[.'’-]{1,2}\\s*[A-Z]){0,4}[.\\s]\\s*)? ## Initials\n [A-Z][^0-9_\\.\\s]{2,20}(?:(?:[,\\.]\\s*)|(?:[,\\.]?\\s+)) ## Surname\n (?:[A-Z](?:\\s*[.'’-]{1,2}\\s*[A-Z]){0,4}[.\\s]\\s*)? ## Initials\n (?P<multi_surs>\n (?:(?:[Aa][Nn][Dd]|\\&)\\s+) ## Maybe 'and' or '&' tied with another name\n [A-Z][^0-9_\\.\\s]{3,20}(?:(?:[,\\.]\\s*)|(?:[,\\.]?\\s+)) ## More surnames\n (?:[A-Z](?:[ -][A-Z])?\\s+)? ## with initials\n )?\n (?: # Look for editor notation after the author group...\n \\s*[,\\s]?\\s* # Eventually a coma/space\n %(ed)s\n )?\n (?P<et2>\n %(etal)s ## et al, MUST BE PRESENT however, for this author form\n )\n (?: # Look for editor notation after the author group...\n \\s*[,\\s]?\\s* # Eventually a coma/space\n %(ed)s\n )?\n ) |\n\n (?:\n ## **** (2) , The standard author form.. (e.g. J. Bloggs)\n ## This author form can either start with a normal 'initial surname' author,\n ## or it can begin with a single 'surname initial' author\n\n (?: ## The first author in the 'author group'\n %(i_s_author)s |\n (?P<sur_initial_auth>%(s_i_author)s)\n )\n\n (?P<multi_auth>\n (?: ## Then 0 or more author names\n \\s*[,\\s]\\s*\n (?:\n %(i_s_author)s | %(s_i_author)s\n )\n )*\n\n (?: ## Maybe 'and' or '&' tied with another name\n (?:\n \\s*[,\\s]\\s* ## handle \"J. Dan, and H. Pon\"\n (?:[Aa][Nn][DdsS]|\\&)\n \\s+\n )\n (?P<mult_auth_sub>\n %(i_s_author)s | %(s_i_author)s\n )\n )?\n )\n (?P<et> # 'et al' need not be present for either of\n \\s*[,\\s]\\s*\n %(etal)s # 'initial surname' or 'surname initial' authors\n )?\n )\n )\n (?P<ee>\n \\s*[,\\s]\\s*\n \\(?\n (?:[Ee][Dd]s|[Ee]ditors)\\.?\n \\)?\n [\\.\\,]{0,2}\n )?\n # End of all author name patterns\n\n \\)? # A possible closing bracket to finish the author group\n (?=[\\s,.;:]) # Consolidate by checking we are not partially matching\n # something else\n\n \"\"\"", "%", "{", "'etal'", ":", "etal", ",", "'i_s_author'", ":", "initial_surname_author", ",", "'s_i_author'", ":", "surname_initial_author", ",", "'ed'", ":", "re_ed_notation", "}" ]
Returns a regular expression to be used to identify groups of author names in a citation. This method contains patterns for default authors, so no arguments are needed for the most reliable form of matching. The returned author pattern is capable of: 1. Identifying single authors, with at least one initial, of the form: 'Initial. [surname prefix...] Surname' 2. Identifying multiple authors, each with at least one initial, of the form: 'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]' ***(Note that a full stop, hyphen or apostrophe after each initial is absolutely vital in identifying authors for both of these above methods. Initials must also be uppercase.)*** 3. Capture 'et al' statements at the end of author groups (allows for authors with et al to be processed differently from 'standard' authors) 4. Identifying a single author surname name positioned before the phrase 'et al', with no initials: 'Surname et al' 5. Identifying two author surname name positioned before the phrase 'et al', with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al' 6. Identifying authors of the form: 'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose to represent the most important cited author (in a list of authors) by listing first their surname, and then their initials. Since this form has little distinguishing characteristics which could be used to create a reliable a pattern, at least one standard author must be present after it in order to improve the accuracy. 7. Capture editor notation, of which can take many forms e.g. 'eds. editors. edited by. etc.'. Authors captured in this way can be treated as 'editor groups', and hence processed differently if needed from standard authors @param etal: (string) The regular expression used to identify 'etal' notation @param author: (string) An optional argument, which replaces the default author regex used to identify author groups (initials, surnames... etc) @return: (string) The full author group identification regex, which will: - detect groups of authors in a range of formats, e.g.: C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al., - detect whether the author group has been marked up as editors of the doc. (therefore they will NOT be marked up as authors) e.g.: ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards | L. Kelloggs (editors) | M. Jackson (eds.) | ... -detect a maximum of two surnames only if the surname(s) is followed by 'et al' (must be separated by 'and' if there are two), e.g.: Amaldi et al., | Hayward and Yellow et al.,
[ "Returns", "a", "regular", "expression", "to", "be", "used", "to", "identify", "groups", "of", "author", "names", "in", "a", "citation", ".", "This", "method", "contains", "patterns", "for", "default", "authors", "so", "no", "arguments", "are", "needed", "for", "the", "most", "reliable", "form", "of", "matching", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L192-L350
3,738
inspirehep/refextract
refextract/references/find.py
find_reference_section
def find_reference_section(docbody): """Search in document body for its reference section. More precisely, find the first line of the reference section. Effectively, the function starts at the end of a document and works backwards, line-by-line, looking for the title of a reference section. It stops when (if) it finds something that it considers to be the first line of a reference section. @param docbody: (list) of strings - the full document body. @return: (dictionary) : { 'start_line' : (integer) - index in docbody of 1st reference line, 'title_string' : (string) - title of the reference section. 'marker' : (string) - the marker of the first reference line, 'marker_pattern' : (string) - regexp string used to find the marker, 'title_marker_same_line' : (integer) - flag to indicate whether the reference section title was on the same line as the first reference line's marker or not. 1 if it was; 0 if not. } Much of this information is used by later functions to rebuild a reference section. -- OR -- (None) - when the reference section could not be found. """ ref_details = None title_patterns = get_reference_section_title_patterns() # Try to find refs section title: for title_pattern in title_patterns: # Look for title pattern in docbody for reversed_index, line in enumerate(reversed(docbody)): title_match = title_pattern.match(line) if title_match: title = title_match.group('title') index = len(docbody) - 1 - reversed_index temp_ref_details, found_title = find_numeration(docbody[index:index + 6], title) if temp_ref_details: if ref_details and 'title' in ref_details and ref_details['title'] and not temp_ref_details['title']: continue if ref_details and 'marker' in ref_details and ref_details['marker'] and not temp_ref_details['marker']: continue ref_details = temp_ref_details ref_details['start_line'] = index ref_details['title_string'] = title if found_title: break if ref_details: break return ref_details
python
def find_reference_section(docbody): """Search in document body for its reference section. More precisely, find the first line of the reference section. Effectively, the function starts at the end of a document and works backwards, line-by-line, looking for the title of a reference section. It stops when (if) it finds something that it considers to be the first line of a reference section. @param docbody: (list) of strings - the full document body. @return: (dictionary) : { 'start_line' : (integer) - index in docbody of 1st reference line, 'title_string' : (string) - title of the reference section. 'marker' : (string) - the marker of the first reference line, 'marker_pattern' : (string) - regexp string used to find the marker, 'title_marker_same_line' : (integer) - flag to indicate whether the reference section title was on the same line as the first reference line's marker or not. 1 if it was; 0 if not. } Much of this information is used by later functions to rebuild a reference section. -- OR -- (None) - when the reference section could not be found. """ ref_details = None title_patterns = get_reference_section_title_patterns() # Try to find refs section title: for title_pattern in title_patterns: # Look for title pattern in docbody for reversed_index, line in enumerate(reversed(docbody)): title_match = title_pattern.match(line) if title_match: title = title_match.group('title') index = len(docbody) - 1 - reversed_index temp_ref_details, found_title = find_numeration(docbody[index:index + 6], title) if temp_ref_details: if ref_details and 'title' in ref_details and ref_details['title'] and not temp_ref_details['title']: continue if ref_details and 'marker' in ref_details and ref_details['marker'] and not temp_ref_details['marker']: continue ref_details = temp_ref_details ref_details['start_line'] = index ref_details['title_string'] = title if found_title: break if ref_details: break return ref_details
[ "def", "find_reference_section", "(", "docbody", ")", ":", "ref_details", "=", "None", "title_patterns", "=", "get_reference_section_title_patterns", "(", ")", "# Try to find refs section title:", "for", "title_pattern", "in", "title_patterns", ":", "# Look for title pattern in docbody", "for", "reversed_index", ",", "line", "in", "enumerate", "(", "reversed", "(", "docbody", ")", ")", ":", "title_match", "=", "title_pattern", ".", "match", "(", "line", ")", "if", "title_match", ":", "title", "=", "title_match", ".", "group", "(", "'title'", ")", "index", "=", "len", "(", "docbody", ")", "-", "1", "-", "reversed_index", "temp_ref_details", ",", "found_title", "=", "find_numeration", "(", "docbody", "[", "index", ":", "index", "+", "6", "]", ",", "title", ")", "if", "temp_ref_details", ":", "if", "ref_details", "and", "'title'", "in", "ref_details", "and", "ref_details", "[", "'title'", "]", "and", "not", "temp_ref_details", "[", "'title'", "]", ":", "continue", "if", "ref_details", "and", "'marker'", "in", "ref_details", "and", "ref_details", "[", "'marker'", "]", "and", "not", "temp_ref_details", "[", "'marker'", "]", ":", "continue", "ref_details", "=", "temp_ref_details", "ref_details", "[", "'start_line'", "]", "=", "index", "ref_details", "[", "'title_string'", "]", "=", "title", "if", "found_title", ":", "break", "if", "ref_details", ":", "break", "return", "ref_details" ]
Search in document body for its reference section. More precisely, find the first line of the reference section. Effectively, the function starts at the end of a document and works backwards, line-by-line, looking for the title of a reference section. It stops when (if) it finds something that it considers to be the first line of a reference section. @param docbody: (list) of strings - the full document body. @return: (dictionary) : { 'start_line' : (integer) - index in docbody of 1st reference line, 'title_string' : (string) - title of the reference section. 'marker' : (string) - the marker of the first reference line, 'marker_pattern' : (string) - regexp string used to find the marker, 'title_marker_same_line' : (integer) - flag to indicate whether the reference section title was on the same line as the first reference line's marker or not. 1 if it was; 0 if not. } Much of this information is used by later functions to rebuild a reference section. -- OR -- (None) - when the reference section could not be found.
[ "Search", "in", "document", "body", "for", "its", "reference", "section", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/find.py#L45-L97
3,739
inspirehep/refextract
refextract/references/find.py
find_numeration
def find_numeration(docbody, title): """Find numeration pattern 1st try to find numeration in the title e.g. References [4] Riotto... 2nd find the numeration alone in the line after the title e.g. References 1 Riotto 3rnd find the numeration in the following line e.g. References [1] Riotto """ ref_details, found_title = find_numeration_in_title(docbody, title) if not ref_details: ref_details, found_title = find_numeration_in_body(docbody) return ref_details, found_title
python
def find_numeration(docbody, title): """Find numeration pattern 1st try to find numeration in the title e.g. References [4] Riotto... 2nd find the numeration alone in the line after the title e.g. References 1 Riotto 3rnd find the numeration in the following line e.g. References [1] Riotto """ ref_details, found_title = find_numeration_in_title(docbody, title) if not ref_details: ref_details, found_title = find_numeration_in_body(docbody) return ref_details, found_title
[ "def", "find_numeration", "(", "docbody", ",", "title", ")", ":", "ref_details", ",", "found_title", "=", "find_numeration_in_title", "(", "docbody", ",", "title", ")", "if", "not", "ref_details", ":", "ref_details", ",", "found_title", "=", "find_numeration_in_body", "(", "docbody", ")", "return", "ref_details", ",", "found_title" ]
Find numeration pattern 1st try to find numeration in the title e.g. References [4] Riotto... 2nd find the numeration alone in the line after the title e.g. References 1 Riotto 3rnd find the numeration in the following line e.g. References [1] Riotto
[ "Find", "numeration", "pattern" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/find.py#L181-L203
3,740
inspirehep/refextract
refextract/references/text.py
get_reference_lines
def get_reference_lines(docbody, ref_sect_start_line, ref_sect_end_line, ref_sect_title, ref_line_marker_ptn, title_marker_same_line): """After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document. """ start_idx = ref_sect_start_line if title_marker_same_line: # Title on same line as 1st ref- take title out! title_start = docbody[start_idx].find(ref_sect_title) if title_start != -1: # Set the first line with no title docbody[start_idx] = docbody[start_idx][title_start + len(ref_sect_title):] elif ref_sect_title is not None: # Set the start of the reference section to be after the title line start_idx += 1 if ref_sect_end_line is not None: ref_lines = docbody[start_idx:ref_sect_end_line + 1] else: ref_lines = docbody[start_idx:] if ref_sect_title: ref_lines = strip_footer(ref_lines, ref_sect_title) # Now rebuild reference lines: # (Go through each raw reference line, and format them into a set # of properly ordered lines based on markers) return rebuild_reference_lines(ref_lines, ref_line_marker_ptn)
python
def get_reference_lines(docbody, ref_sect_start_line, ref_sect_end_line, ref_sect_title, ref_line_marker_ptn, title_marker_same_line): """After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document. """ start_idx = ref_sect_start_line if title_marker_same_line: # Title on same line as 1st ref- take title out! title_start = docbody[start_idx].find(ref_sect_title) if title_start != -1: # Set the first line with no title docbody[start_idx] = docbody[start_idx][title_start + len(ref_sect_title):] elif ref_sect_title is not None: # Set the start of the reference section to be after the title line start_idx += 1 if ref_sect_end_line is not None: ref_lines = docbody[start_idx:ref_sect_end_line + 1] else: ref_lines = docbody[start_idx:] if ref_sect_title: ref_lines = strip_footer(ref_lines, ref_sect_title) # Now rebuild reference lines: # (Go through each raw reference line, and format them into a set # of properly ordered lines based on markers) return rebuild_reference_lines(ref_lines, ref_line_marker_ptn)
[ "def", "get_reference_lines", "(", "docbody", ",", "ref_sect_start_line", ",", "ref_sect_end_line", ",", "ref_sect_title", ",", "ref_line_marker_ptn", ",", "title_marker_same_line", ")", ":", "start_idx", "=", "ref_sect_start_line", "if", "title_marker_same_line", ":", "# Title on same line as 1st ref- take title out!", "title_start", "=", "docbody", "[", "start_idx", "]", ".", "find", "(", "ref_sect_title", ")", "if", "title_start", "!=", "-", "1", ":", "# Set the first line with no title", "docbody", "[", "start_idx", "]", "=", "docbody", "[", "start_idx", "]", "[", "title_start", "+", "len", "(", "ref_sect_title", ")", ":", "]", "elif", "ref_sect_title", "is", "not", "None", ":", "# Set the start of the reference section to be after the title line", "start_idx", "+=", "1", "if", "ref_sect_end_line", "is", "not", "None", ":", "ref_lines", "=", "docbody", "[", "start_idx", ":", "ref_sect_end_line", "+", "1", "]", "else", ":", "ref_lines", "=", "docbody", "[", "start_idx", ":", "]", "if", "ref_sect_title", ":", "ref_lines", "=", "strip_footer", "(", "ref_lines", ",", "ref_sect_title", ")", "# Now rebuild reference lines:", "# (Go through each raw reference line, and format them into a set", "# of properly ordered lines based on markers)", "return", "rebuild_reference_lines", "(", "ref_lines", ",", "ref_line_marker_ptn", ")" ]
After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document.
[ "After", "the", "reference", "section", "of", "a", "document", "has", "been", "identified", "and", "the", "first", "and", "last", "lines", "of", "the", "reference", "section", "have", "been", "recorded", "this", "function", "is", "called", "to", "take", "the", "reference", "lines", "out", "of", "the", "document", "body", ".", "The", "document", "s", "reference", "lines", "are", "returned", "in", "a", "list", "of", "strings", "whereby", "each", "string", "is", "a", "reference", "line", ".", "Before", "this", "can", "be", "done", "however", "the", "reference", "section", "is", "passed", "to", "another", "function", "that", "rebuilds", "any", "broken", "reference", "lines", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L91-L142
3,741
inspirehep/refextract
refextract/references/text.py
match_pagination
def match_pagination(ref_line): """Remove footer pagination from references lines""" pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$' re_footer = re.compile(pattern, re.UNICODE) match = re_footer.match(ref_line) if match: return int(match.group(1)) return None
python
def match_pagination(ref_line): """Remove footer pagination from references lines""" pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$' re_footer = re.compile(pattern, re.UNICODE) match = re_footer.match(ref_line) if match: return int(match.group(1)) return None
[ "def", "match_pagination", "(", "ref_line", ")", ":", "pattern", "=", "ur'\\(?\\[?(\\d{1,4})\\]?\\)?\\.?\\s*$'", "re_footer", "=", "re", ".", "compile", "(", "pattern", ",", "re", ".", "UNICODE", ")", "match", "=", "re_footer", ".", "match", "(", "ref_line", ")", "if", "match", ":", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")", "return", "None" ]
Remove footer pagination from references lines
[ "Remove", "footer", "pagination", "from", "references", "lines" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L145-L152
3,742
inspirehep/refextract
refextract/references/text.py
strip_footer
def strip_footer(ref_lines, section_title): """Remove footer title from references lines""" pattern = ur'\(?\[?\d{0,4}\]?\)?\.?\s*%s\s*$' % re.escape(section_title) re_footer = re.compile(pattern, re.UNICODE) return [l for l in ref_lines if not re_footer.match(l)]
python
def strip_footer(ref_lines, section_title): """Remove footer title from references lines""" pattern = ur'\(?\[?\d{0,4}\]?\)?\.?\s*%s\s*$' % re.escape(section_title) re_footer = re.compile(pattern, re.UNICODE) return [l for l in ref_lines if not re_footer.match(l)]
[ "def", "strip_footer", "(", "ref_lines", ",", "section_title", ")", ":", "pattern", "=", "ur'\\(?\\[?\\d{0,4}\\]?\\)?\\.?\\s*%s\\s*$'", "%", "re", ".", "escape", "(", "section_title", ")", "re_footer", "=", "re", ".", "compile", "(", "pattern", ",", "re", ".", "UNICODE", ")", "return", "[", "l", "for", "l", "in", "ref_lines", "if", "not", "re_footer", ".", "match", "(", "l", ")", "]" ]
Remove footer title from references lines
[ "Remove", "footer", "title", "from", "references", "lines" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L155-L159
3,743
inspirehep/refextract
refextract/references/api.py
extract_references_from_url
def extract_references_from_url(url, headers=None, chunk_size=1024, **kwargs): """Extract references from the pdf specified in the url. The first parameter is the URL of the file. It returns a list of parsed references. It raises FullTextNotAvailableError if the URL gives a 404, UnknownDocumentTypeError if it is not a PDF or plain text. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_url(path, reference_format="{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_url(path, override_kbs_files={'journals': 'my/path/to.kb'}) """ # Get temporary filepath to download to filename, filepath = mkstemp( suffix=u"_{0}".format(os.path.basename(url)), ) os.close(filename) try: req = requests.get( url=url, headers=headers, stream=True ) req.raise_for_status() with open(filepath, 'wb') as f: for chunk in req.iter_content(chunk_size): f.write(chunk) references = extract_references_from_file(filepath, **kwargs) except requests.exceptions.HTTPError: raise FullTextNotAvailableError(u"URL not found: '{0}'".format(url)), None, sys.exc_info()[2] finally: os.remove(filepath) return references
python
def extract_references_from_url(url, headers=None, chunk_size=1024, **kwargs): """Extract references from the pdf specified in the url. The first parameter is the URL of the file. It returns a list of parsed references. It raises FullTextNotAvailableError if the URL gives a 404, UnknownDocumentTypeError if it is not a PDF or plain text. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_url(path, reference_format="{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_url(path, override_kbs_files={'journals': 'my/path/to.kb'}) """ # Get temporary filepath to download to filename, filepath = mkstemp( suffix=u"_{0}".format(os.path.basename(url)), ) os.close(filename) try: req = requests.get( url=url, headers=headers, stream=True ) req.raise_for_status() with open(filepath, 'wb') as f: for chunk in req.iter_content(chunk_size): f.write(chunk) references = extract_references_from_file(filepath, **kwargs) except requests.exceptions.HTTPError: raise FullTextNotAvailableError(u"URL not found: '{0}'".format(url)), None, sys.exc_info()[2] finally: os.remove(filepath) return references
[ "def", "extract_references_from_url", "(", "url", ",", "headers", "=", "None", ",", "chunk_size", "=", "1024", ",", "*", "*", "kwargs", ")", ":", "# Get temporary filepath to download to", "filename", ",", "filepath", "=", "mkstemp", "(", "suffix", "=", "u\"_{0}\"", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "url", ")", ")", ",", ")", "os", ".", "close", "(", "filename", ")", "try", ":", "req", "=", "requests", ".", "get", "(", "url", "=", "url", ",", "headers", "=", "headers", ",", "stream", "=", "True", ")", "req", ".", "raise_for_status", "(", ")", "with", "open", "(", "filepath", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "req", ".", "iter_content", "(", "chunk_size", ")", ":", "f", ".", "write", "(", "chunk", ")", "references", "=", "extract_references_from_file", "(", "filepath", ",", "*", "*", "kwargs", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", ":", "raise", "FullTextNotAvailableError", "(", "u\"URL not found: '{0}'\"", ".", "format", "(", "url", ")", ")", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "finally", ":", "os", ".", "remove", "(", "filepath", ")", "return", "references" ]
Extract references from the pdf specified in the url. The first parameter is the URL of the file. It returns a list of parsed references. It raises FullTextNotAvailableError if the URL gives a 404, UnknownDocumentTypeError if it is not a PDF or plain text. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_url(path, reference_format="{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_url(path, override_kbs_files={'journals': 'my/path/to.kb'})
[ "Extract", "references", "from", "the", "pdf", "specified", "in", "the", "url", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/api.py#L54-L99
3,744
inspirehep/refextract
refextract/references/api.py
extract_references_from_file
def extract_references_from_file(path, recid=None, reference_format=u"{title} {volume} ({year}) {page}", linker_callback=None, override_kbs_files=None): """Extract references from a local pdf file. The first parameter is the path to the file. It returns a list of parsed references. It raises FullTextNotAvailableError if the file does not exist, UnknownDocumentTypeError if it is not a PDF or plain text. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_file(path, reference_format=u"{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_file(path, override_kbs_files={'journals': 'my/path/to.kb'}) """ if not os.path.isfile(path): raise FullTextNotAvailableError(u"File not found: '{0}'".format(path)) docbody = get_plaintext_document_body(path) reflines, dummy, dummy = extract_references_from_fulltext(docbody) if not reflines: docbody = get_plaintext_document_body(path, keep_layout=True) reflines, dummy, dummy = extract_references_from_fulltext(docbody) parsed_refs, stats = parse_references( reflines, recid=recid, reference_format=reference_format, linker_callback=linker_callback, override_kbs_files=override_kbs_files, ) if magic.from_file(path, mime=True) == "application/pdf": texkeys = extract_texkeys_from_pdf(path) if len(texkeys) == len(parsed_refs): parsed_refs = [dict(ref, texkey=[key]) for ref, key in izip(parsed_refs, texkeys)] return parsed_refs
python
def extract_references_from_file(path, recid=None, reference_format=u"{title} {volume} ({year}) {page}", linker_callback=None, override_kbs_files=None): """Extract references from a local pdf file. The first parameter is the path to the file. It returns a list of parsed references. It raises FullTextNotAvailableError if the file does not exist, UnknownDocumentTypeError if it is not a PDF or plain text. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_file(path, reference_format=u"{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_file(path, override_kbs_files={'journals': 'my/path/to.kb'}) """ if not os.path.isfile(path): raise FullTextNotAvailableError(u"File not found: '{0}'".format(path)) docbody = get_plaintext_document_body(path) reflines, dummy, dummy = extract_references_from_fulltext(docbody) if not reflines: docbody = get_plaintext_document_body(path, keep_layout=True) reflines, dummy, dummy = extract_references_from_fulltext(docbody) parsed_refs, stats = parse_references( reflines, recid=recid, reference_format=reference_format, linker_callback=linker_callback, override_kbs_files=override_kbs_files, ) if magic.from_file(path, mime=True) == "application/pdf": texkeys = extract_texkeys_from_pdf(path) if len(texkeys) == len(parsed_refs): parsed_refs = [dict(ref, texkey=[key]) for ref, key in izip(parsed_refs, texkeys)] return parsed_refs
[ "def", "extract_references_from_file", "(", "path", ",", "recid", "=", "None", ",", "reference_format", "=", "u\"{title} {volume} ({year}) {page}\"", ",", "linker_callback", "=", "None", ",", "override_kbs_files", "=", "None", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "raise", "FullTextNotAvailableError", "(", "u\"File not found: '{0}'\"", ".", "format", "(", "path", ")", ")", "docbody", "=", "get_plaintext_document_body", "(", "path", ")", "reflines", ",", "dummy", ",", "dummy", "=", "extract_references_from_fulltext", "(", "docbody", ")", "if", "not", "reflines", ":", "docbody", "=", "get_plaintext_document_body", "(", "path", ",", "keep_layout", "=", "True", ")", "reflines", ",", "dummy", ",", "dummy", "=", "extract_references_from_fulltext", "(", "docbody", ")", "parsed_refs", ",", "stats", "=", "parse_references", "(", "reflines", ",", "recid", "=", "recid", ",", "reference_format", "=", "reference_format", ",", "linker_callback", "=", "linker_callback", ",", "override_kbs_files", "=", "override_kbs_files", ",", ")", "if", "magic", ".", "from_file", "(", "path", ",", "mime", "=", "True", ")", "==", "\"application/pdf\"", ":", "texkeys", "=", "extract_texkeys_from_pdf", "(", "path", ")", "if", "len", "(", "texkeys", ")", "==", "len", "(", "parsed_refs", ")", ":", "parsed_refs", "=", "[", "dict", "(", "ref", ",", "texkey", "=", "[", "key", "]", ")", "for", "ref", ",", "key", "in", "izip", "(", "parsed_refs", ",", "texkeys", ")", "]", "return", "parsed_refs" ]
Extract references from a local pdf file. The first parameter is the path to the file. It returns a list of parsed references. It raises FullTextNotAvailableError if the file does not exist, UnknownDocumentTypeError if it is not a PDF or plain text. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_file(path, reference_format=u"{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_file(path, override_kbs_files={'journals': 'my/path/to.kb'})
[ "Extract", "references", "from", "a", "local", "pdf", "file", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/api.py#L102-L151
3,745
inspirehep/refextract
refextract/references/api.py
extract_references_from_string
def extract_references_from_string(source, is_only_references=True, recid=None, reference_format="{title} {volume} ({year}) {page}", linker_callback=None, override_kbs_files=None): """Extract references from a raw string. The first parameter is the path to the file. It returns a tuple (references, stats). If the string does not only contain references, improve accuracy by specifing ``is_only_references=False``. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_string(path, reference_format="{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_string(path, override_kbs_files={'journals': 'my/path/to.kb'}) """ docbody = source.split('\n') if not is_only_references: reflines, dummy, dummy = extract_references_from_fulltext(docbody) else: refs_info = get_reference_section_beginning(docbody) if not refs_info: refs_info, dummy = find_numeration_in_body(docbody) refs_info['start_line'] = 0 refs_info['end_line'] = len(docbody) - 1, reflines = rebuild_reference_lines( docbody, refs_info['marker_pattern']) parsed_refs, stats = parse_references( reflines, recid=recid, reference_format=reference_format, linker_callback=linker_callback, override_kbs_files=override_kbs_files, ) return parsed_refs
python
def extract_references_from_string(source, is_only_references=True, recid=None, reference_format="{title} {volume} ({year}) {page}", linker_callback=None, override_kbs_files=None): """Extract references from a raw string. The first parameter is the path to the file. It returns a tuple (references, stats). If the string does not only contain references, improve accuracy by specifing ``is_only_references=False``. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_string(path, reference_format="{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_string(path, override_kbs_files={'journals': 'my/path/to.kb'}) """ docbody = source.split('\n') if not is_only_references: reflines, dummy, dummy = extract_references_from_fulltext(docbody) else: refs_info = get_reference_section_beginning(docbody) if not refs_info: refs_info, dummy = find_numeration_in_body(docbody) refs_info['start_line'] = 0 refs_info['end_line'] = len(docbody) - 1, reflines = rebuild_reference_lines( docbody, refs_info['marker_pattern']) parsed_refs, stats = parse_references( reflines, recid=recid, reference_format=reference_format, linker_callback=linker_callback, override_kbs_files=override_kbs_files, ) return parsed_refs
[ "def", "extract_references_from_string", "(", "source", ",", "is_only_references", "=", "True", ",", "recid", "=", "None", ",", "reference_format", "=", "\"{title} {volume} ({year}) {page}\"", ",", "linker_callback", "=", "None", ",", "override_kbs_files", "=", "None", ")", ":", "docbody", "=", "source", ".", "split", "(", "'\\n'", ")", "if", "not", "is_only_references", ":", "reflines", ",", "dummy", ",", "dummy", "=", "extract_references_from_fulltext", "(", "docbody", ")", "else", ":", "refs_info", "=", "get_reference_section_beginning", "(", "docbody", ")", "if", "not", "refs_info", ":", "refs_info", ",", "dummy", "=", "find_numeration_in_body", "(", "docbody", ")", "refs_info", "[", "'start_line'", "]", "=", "0", "refs_info", "[", "'end_line'", "]", "=", "len", "(", "docbody", ")", "-", "1", ",", "reflines", "=", "rebuild_reference_lines", "(", "docbody", ",", "refs_info", "[", "'marker_pattern'", "]", ")", "parsed_refs", ",", "stats", "=", "parse_references", "(", "reflines", ",", "recid", "=", "recid", ",", "reference_format", "=", "reference_format", ",", "linker_callback", "=", "linker_callback", ",", "override_kbs_files", "=", "override_kbs_files", ",", ")", "return", "parsed_refs" ]
Extract references from a raw string. The first parameter is the path to the file. It returns a tuple (references, stats). If the string does not only contain references, improve accuracy by specifing ``is_only_references=False``. The standard reference format is: {title} {volume} ({year}) {page}. E.g. you can change that by passing the reference_format: >>> extract_references_from_string(path, reference_format="{title},{volume},{page}") If you want to also link each reference to some other resource (like a record), you can provide a linker_callback function to be executed for every reference element found. To override KBs for journal names etc., use ``override_kbs_files``: >>> extract_references_from_string(path, override_kbs_files={'journals': 'my/path/to.kb'})
[ "Extract", "references", "from", "a", "raw", "string", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/api.py#L154-L201
3,746
inspirehep/refextract
refextract/references/api.py
extract_journal_reference
def extract_journal_reference(line, override_kbs_files=None): """Extract the journal reference from string. Extracts the journal reference from string and parses for specific journal information. """ kbs = get_kbs(custom_kbs_files=override_kbs_files) references, dummy_m, dummy_c, dummy_co = parse_reference_line(line, kbs) for elements in references: for el in elements: if el['type'] == 'JOURNAL': return el
python
def extract_journal_reference(line, override_kbs_files=None): """Extract the journal reference from string. Extracts the journal reference from string and parses for specific journal information. """ kbs = get_kbs(custom_kbs_files=override_kbs_files) references, dummy_m, dummy_c, dummy_co = parse_reference_line(line, kbs) for elements in references: for el in elements: if el['type'] == 'JOURNAL': return el
[ "def", "extract_journal_reference", "(", "line", ",", "override_kbs_files", "=", "None", ")", ":", "kbs", "=", "get_kbs", "(", "custom_kbs_files", "=", "override_kbs_files", ")", "references", ",", "dummy_m", ",", "dummy_c", ",", "dummy_co", "=", "parse_reference_line", "(", "line", ",", "kbs", ")", "for", "elements", "in", "references", ":", "for", "el", "in", "elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", ":", "return", "el" ]
Extract the journal reference from string. Extracts the journal reference from string and parses for specific journal information.
[ "Extract", "the", "journal", "reference", "from", "string", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/api.py#L204-L216
3,747
inspirehep/refextract
refextract/references/record.py
build_references
def build_references(citations, reference_format=False): """Build list of reference dictionaries from a references list """ # Now, run the method which will take as input: # 1. A list of lists of dictionaries, where each dictionary is a piece # of citation information corresponding to a tag in the citation. # 2. The line marker for this entire citation line (mulitple citation # 'finds' inside a single citation will use the same marker value) # The resulting xml line will be a properly marked up form of the # citation. It will take into account authors to try and split up # references which should be read as two SEPARATE ones. return [c for citation_elements in citations for elements in citation_elements['elements'] for c in build_reference_fields(elements, citation_elements['line_marker'], citation_elements['raw_ref'], reference_format)]
python
def build_references(citations, reference_format=False): """Build list of reference dictionaries from a references list """ # Now, run the method which will take as input: # 1. A list of lists of dictionaries, where each dictionary is a piece # of citation information corresponding to a tag in the citation. # 2. The line marker for this entire citation line (mulitple citation # 'finds' inside a single citation will use the same marker value) # The resulting xml line will be a properly marked up form of the # citation. It will take into account authors to try and split up # references which should be read as two SEPARATE ones. return [c for citation_elements in citations for elements in citation_elements['elements'] for c in build_reference_fields(elements, citation_elements['line_marker'], citation_elements['raw_ref'], reference_format)]
[ "def", "build_references", "(", "citations", ",", "reference_format", "=", "False", ")", ":", "# Now, run the method which will take as input:", "# 1. A list of lists of dictionaries, where each dictionary is a piece", "# of citation information corresponding to a tag in the citation.", "# 2. The line marker for this entire citation line (mulitple citation", "# 'finds' inside a single citation will use the same marker value)", "# The resulting xml line will be a properly marked up form of the", "# citation. It will take into account authors to try and split up", "# references which should be read as two SEPARATE ones.", "return", "[", "c", "for", "citation_elements", "in", "citations", "for", "elements", "in", "citation_elements", "[", "'elements'", "]", "for", "c", "in", "build_reference_fields", "(", "elements", ",", "citation_elements", "[", "'line_marker'", "]", ",", "citation_elements", "[", "'raw_ref'", "]", ",", "reference_format", ")", "]" ]
Build list of reference dictionaries from a references list
[ "Build", "list", "of", "reference", "dictionaries", "from", "a", "references", "list" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/record.py#L31-L47
3,748
inspirehep/refextract
refextract/references/record.py
build_reference_fields
def build_reference_fields(citation_elements, line_marker, raw_ref, reference_format): """Create the final representation of the reference information. @param citation_elements: (list) an ordered list of dictionary elements, with each element corresponding to a found piece of information from a reference line. @param line_marker: (string) The line marker for this single reference line (e.g. [19]) @param raw_ref: (string) The raw string of this line @return reference_fields: (list) A list of one dictionary containing the reference elements """ # Begin the datafield element current_field = create_reference_field(line_marker) current_field['raw_ref'] = [raw_ref] reference_fields = [current_field] for element in citation_elements: # Before going onto checking 'what' the next element is, # handle misc text and semi-colons # Multiple misc text subfields will be compressed later # This will also be the only part of the code that deals with MISC # tag_typed elements misc_txt = element['misc_txt'] if misc_txt.strip("., [](){}"): misc_txt = misc_txt.lstrip('])} ,.').rstrip('[({ ,.') add_subfield(current_field, 'misc', misc_txt) # Now handle the type dependent actions # JOURNAL if element['type'] == "JOURNAL": add_journal_subfield(current_field, element, reference_format) # REPORT NUMBER elif element['type'] == "REPORTNUMBER": add_subfield(current_field, 'reportnumber', element['report_num']) # URL elif element['type'] == "URL": if element['url_string'] == element['url_desc']: # Build the datafield for the URL segment of the reference # line: add_subfield(current_field, 'url', element['url_string']) # Else, in the case that the url string and the description differ # in some way, include them both else: add_subfield(current_field, 'url', element['url_string']) add_subfield(current_field, 'urldesc', element['url_desc']) # DOI elif element['type'] == "DOI": add_subfield(current_field, 'doi', 'doi:' + element['doi_string']) # HDL elif element['type'] == "HDL": add_subfield(current_field, 'hdl', 'hdl:' + element['hdl_id']) # AUTHOR elif element['type'] == "AUTH": value = element['auth_txt'] if element['auth_type'] == 'incl': value = "(%s)" % value add_subfield(current_field, 'author', value) elif element['type'] == "QUOTED": add_subfield(current_field, 'title', element['title']) elif element['type'] == "ISBN": add_subfield(current_field, 'isbn', element['ISBN']) elif element['type'] == "BOOK": add_subfield(current_field, 'title', element['title']) elif element['type'] == "PUBLISHER": add_subfield(current_field, 'publisher', element['publisher']) elif element['type'] == "YEAR": add_subfield(current_field, 'year', element['year']) elif element['type'] == "COLLABORATION": add_subfield(current_field, 'collaboration', element['collaboration']) elif element['type'] == "RECID": add_subfield(current_field, 'recid', str(element['recid'])) return reference_fields
python
def build_reference_fields(citation_elements, line_marker, raw_ref, reference_format): """Create the final representation of the reference information. @param citation_elements: (list) an ordered list of dictionary elements, with each element corresponding to a found piece of information from a reference line. @param line_marker: (string) The line marker for this single reference line (e.g. [19]) @param raw_ref: (string) The raw string of this line @return reference_fields: (list) A list of one dictionary containing the reference elements """ # Begin the datafield element current_field = create_reference_field(line_marker) current_field['raw_ref'] = [raw_ref] reference_fields = [current_field] for element in citation_elements: # Before going onto checking 'what' the next element is, # handle misc text and semi-colons # Multiple misc text subfields will be compressed later # This will also be the only part of the code that deals with MISC # tag_typed elements misc_txt = element['misc_txt'] if misc_txt.strip("., [](){}"): misc_txt = misc_txt.lstrip('])} ,.').rstrip('[({ ,.') add_subfield(current_field, 'misc', misc_txt) # Now handle the type dependent actions # JOURNAL if element['type'] == "JOURNAL": add_journal_subfield(current_field, element, reference_format) # REPORT NUMBER elif element['type'] == "REPORTNUMBER": add_subfield(current_field, 'reportnumber', element['report_num']) # URL elif element['type'] == "URL": if element['url_string'] == element['url_desc']: # Build the datafield for the URL segment of the reference # line: add_subfield(current_field, 'url', element['url_string']) # Else, in the case that the url string and the description differ # in some way, include them both else: add_subfield(current_field, 'url', element['url_string']) add_subfield(current_field, 'urldesc', element['url_desc']) # DOI elif element['type'] == "DOI": add_subfield(current_field, 'doi', 'doi:' + element['doi_string']) # HDL elif element['type'] == "HDL": add_subfield(current_field, 'hdl', 'hdl:' + element['hdl_id']) # AUTHOR elif element['type'] == "AUTH": value = element['auth_txt'] if element['auth_type'] == 'incl': value = "(%s)" % value add_subfield(current_field, 'author', value) elif element['type'] == "QUOTED": add_subfield(current_field, 'title', element['title']) elif element['type'] == "ISBN": add_subfield(current_field, 'isbn', element['ISBN']) elif element['type'] == "BOOK": add_subfield(current_field, 'title', element['title']) elif element['type'] == "PUBLISHER": add_subfield(current_field, 'publisher', element['publisher']) elif element['type'] == "YEAR": add_subfield(current_field, 'year', element['year']) elif element['type'] == "COLLABORATION": add_subfield(current_field, 'collaboration', element['collaboration']) elif element['type'] == "RECID": add_subfield(current_field, 'recid', str(element['recid'])) return reference_fields
[ "def", "build_reference_fields", "(", "citation_elements", ",", "line_marker", ",", "raw_ref", ",", "reference_format", ")", ":", "# Begin the datafield element", "current_field", "=", "create_reference_field", "(", "line_marker", ")", "current_field", "[", "'raw_ref'", "]", "=", "[", "raw_ref", "]", "reference_fields", "=", "[", "current_field", "]", "for", "element", "in", "citation_elements", ":", "# Before going onto checking 'what' the next element is,", "# handle misc text and semi-colons", "# Multiple misc text subfields will be compressed later", "# This will also be the only part of the code that deals with MISC", "# tag_typed elements", "misc_txt", "=", "element", "[", "'misc_txt'", "]", "if", "misc_txt", ".", "strip", "(", "\"., [](){}\"", ")", ":", "misc_txt", "=", "misc_txt", ".", "lstrip", "(", "'])} ,.'", ")", ".", "rstrip", "(", "'[({ ,.'", ")", "add_subfield", "(", "current_field", ",", "'misc'", ",", "misc_txt", ")", "# Now handle the type dependent actions", "# JOURNAL", "if", "element", "[", "'type'", "]", "==", "\"JOURNAL\"", ":", "add_journal_subfield", "(", "current_field", ",", "element", ",", "reference_format", ")", "# REPORT NUMBER", "elif", "element", "[", "'type'", "]", "==", "\"REPORTNUMBER\"", ":", "add_subfield", "(", "current_field", ",", "'reportnumber'", ",", "element", "[", "'report_num'", "]", ")", "# URL", "elif", "element", "[", "'type'", "]", "==", "\"URL\"", ":", "if", "element", "[", "'url_string'", "]", "==", "element", "[", "'url_desc'", "]", ":", "# Build the datafield for the URL segment of the reference", "# line:", "add_subfield", "(", "current_field", ",", "'url'", ",", "element", "[", "'url_string'", "]", ")", "# Else, in the case that the url string and the description differ", "# in some way, include them both", "else", ":", "add_subfield", "(", "current_field", ",", "'url'", ",", "element", "[", "'url_string'", "]", ")", "add_subfield", "(", "current_field", ",", "'urldesc'", ",", "element", "[", "'url_desc'", "]", ")", "# DOI", "elif", "element", "[", "'type'", "]", "==", "\"DOI\"", ":", "add_subfield", "(", "current_field", ",", "'doi'", ",", "'doi:'", "+", "element", "[", "'doi_string'", "]", ")", "# HDL", "elif", "element", "[", "'type'", "]", "==", "\"HDL\"", ":", "add_subfield", "(", "current_field", ",", "'hdl'", ",", "'hdl:'", "+", "element", "[", "'hdl_id'", "]", ")", "# AUTHOR", "elif", "element", "[", "'type'", "]", "==", "\"AUTH\"", ":", "value", "=", "element", "[", "'auth_txt'", "]", "if", "element", "[", "'auth_type'", "]", "==", "'incl'", ":", "value", "=", "\"(%s)\"", "%", "value", "add_subfield", "(", "current_field", ",", "'author'", ",", "value", ")", "elif", "element", "[", "'type'", "]", "==", "\"QUOTED\"", ":", "add_subfield", "(", "current_field", ",", "'title'", ",", "element", "[", "'title'", "]", ")", "elif", "element", "[", "'type'", "]", "==", "\"ISBN\"", ":", "add_subfield", "(", "current_field", ",", "'isbn'", ",", "element", "[", "'ISBN'", "]", ")", "elif", "element", "[", "'type'", "]", "==", "\"BOOK\"", ":", "add_subfield", "(", "current_field", ",", "'title'", ",", "element", "[", "'title'", "]", ")", "elif", "element", "[", "'type'", "]", "==", "\"PUBLISHER\"", ":", "add_subfield", "(", "current_field", ",", "'publisher'", ",", "element", "[", "'publisher'", "]", ")", "elif", "element", "[", "'type'", "]", "==", "\"YEAR\"", ":", "add_subfield", "(", "current_field", ",", "'year'", ",", "element", "[", "'year'", "]", ")", "elif", "element", "[", "'type'", "]", "==", "\"COLLABORATION\"", ":", "add_subfield", "(", "current_field", ",", "'collaboration'", ",", "element", "[", "'collaboration'", "]", ")", "elif", "element", "[", "'type'", "]", "==", "\"RECID\"", ":", "add_subfield", "(", "current_field", ",", "'recid'", ",", "str", "(", "element", "[", "'recid'", "]", ")", ")", "return", "reference_fields" ]
Create the final representation of the reference information. @param citation_elements: (list) an ordered list of dictionary elements, with each element corresponding to a found piece of information from a reference line. @param line_marker: (string) The line marker for this single reference line (e.g. [19]) @param raw_ref: (string) The raw string of this line @return reference_fields: (list) A list of one dictionary containing the reference elements
[ "Create", "the", "final", "representation", "of", "the", "reference", "information", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/record.py#L71-L161
3,749
inspirehep/refextract
refextract/references/pdf.py
extract_texkeys_from_pdf
def extract_texkeys_from_pdf(pdf_file): """ Extract the texkeys from the given PDF file This is done by looking up the named destinations in the PDF @param pdf_file: path to a PDF @return: list of all texkeys found in the PDF """ with open(pdf_file, 'rb') as pdf_stream: try: pdf = PdfFileReader(pdf_stream, strict=False) destinations = pdf.getNamedDestinations() except Exception: LOGGER.debug(u"PDF: Internal PyPDF2 error, no TeXkeys returned.") return [] # not all named destinations point to references refs = [dest for dest in destinations.iteritems() if re_reference_in_dest.match(dest[0])] try: if _destinations_in_two_columns(pdf, refs): LOGGER.debug(u"PDF: Using two-column layout") def sortfunc(dest_couple): return _destination_position(pdf, dest_couple[1]) else: LOGGER.debug(u"PDF: Using single-column layout") def sortfunc(dest_couple): (page, _, ypos, xpos) = _destination_position( pdf, dest_couple[1]) return (page, ypos, xpos) refs.sort(key=sortfunc) # extract the TeXkey from the named destination name return [re_reference_in_dest.match(destname).group(1) for (destname, _) in refs] except Exception: LOGGER.debug(u"PDF: Impossible to determine layout, no TeXkeys returned") return []
python
def extract_texkeys_from_pdf(pdf_file): """ Extract the texkeys from the given PDF file This is done by looking up the named destinations in the PDF @param pdf_file: path to a PDF @return: list of all texkeys found in the PDF """ with open(pdf_file, 'rb') as pdf_stream: try: pdf = PdfFileReader(pdf_stream, strict=False) destinations = pdf.getNamedDestinations() except Exception: LOGGER.debug(u"PDF: Internal PyPDF2 error, no TeXkeys returned.") return [] # not all named destinations point to references refs = [dest for dest in destinations.iteritems() if re_reference_in_dest.match(dest[0])] try: if _destinations_in_two_columns(pdf, refs): LOGGER.debug(u"PDF: Using two-column layout") def sortfunc(dest_couple): return _destination_position(pdf, dest_couple[1]) else: LOGGER.debug(u"PDF: Using single-column layout") def sortfunc(dest_couple): (page, _, ypos, xpos) = _destination_position( pdf, dest_couple[1]) return (page, ypos, xpos) refs.sort(key=sortfunc) # extract the TeXkey from the named destination name return [re_reference_in_dest.match(destname).group(1) for (destname, _) in refs] except Exception: LOGGER.debug(u"PDF: Impossible to determine layout, no TeXkeys returned") return []
[ "def", "extract_texkeys_from_pdf", "(", "pdf_file", ")", ":", "with", "open", "(", "pdf_file", ",", "'rb'", ")", "as", "pdf_stream", ":", "try", ":", "pdf", "=", "PdfFileReader", "(", "pdf_stream", ",", "strict", "=", "False", ")", "destinations", "=", "pdf", ".", "getNamedDestinations", "(", ")", "except", "Exception", ":", "LOGGER", ".", "debug", "(", "u\"PDF: Internal PyPDF2 error, no TeXkeys returned.\"", ")", "return", "[", "]", "# not all named destinations point to references", "refs", "=", "[", "dest", "for", "dest", "in", "destinations", ".", "iteritems", "(", ")", "if", "re_reference_in_dest", ".", "match", "(", "dest", "[", "0", "]", ")", "]", "try", ":", "if", "_destinations_in_two_columns", "(", "pdf", ",", "refs", ")", ":", "LOGGER", ".", "debug", "(", "u\"PDF: Using two-column layout\"", ")", "def", "sortfunc", "(", "dest_couple", ")", ":", "return", "_destination_position", "(", "pdf", ",", "dest_couple", "[", "1", "]", ")", "else", ":", "LOGGER", ".", "debug", "(", "u\"PDF: Using single-column layout\"", ")", "def", "sortfunc", "(", "dest_couple", ")", ":", "(", "page", ",", "_", ",", "ypos", ",", "xpos", ")", "=", "_destination_position", "(", "pdf", ",", "dest_couple", "[", "1", "]", ")", "return", "(", "page", ",", "ypos", ",", "xpos", ")", "refs", ".", "sort", "(", "key", "=", "sortfunc", ")", "# extract the TeXkey from the named destination name", "return", "[", "re_reference_in_dest", ".", "match", "(", "destname", ")", ".", "group", "(", "1", ")", "for", "(", "destname", ",", "_", ")", "in", "refs", "]", "except", "Exception", ":", "LOGGER", ".", "debug", "(", "u\"PDF: Impossible to determine layout, no TeXkeys returned\"", ")", "return", "[", "]" ]
Extract the texkeys from the given PDF file This is done by looking up the named destinations in the PDF @param pdf_file: path to a PDF @return: list of all texkeys found in the PDF
[ "Extract", "the", "texkeys", "from", "the", "given", "PDF", "file" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/pdf.py#L43-L84
3,750
inspirehep/refextract
refextract/references/regexs.py
get_reference_line_numeration_marker_patterns
def get_reference_line_numeration_marker_patterns(prefix=u''): """Return a list of compiled regex patterns used to search for the marker of a reference line in a full-text document. @param prefix: (string) the possible prefix to a reference line @return: (list) of compiled regex patterns. """ title = u"" if type(prefix) in (str, unicode): title = prefix g_name = u'(?P<mark>' g_close = u')' space = ur'\s*' patterns = [ # [1] space + title + g_name + ur'\[\s*(?P<marknum>\d+)\s*\]' + g_close, # [<letters and numbers] space + title + g_name + ur'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close, # {1} space + title + g_name + ur'\{\s*(?P<marknum>\d+)\s*\}' + g_close, # (1) space + title + g_name + ur'\<\s*(?P<marknum>\d+)\s*\>' + g_close, space + title + g_name + ur'\(\s*(?P<marknum>\d+)\s*\)' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s*\.(?!\d)' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s+' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s*\]' + g_close, # 1] space + title + g_name + ur'(?P<marknum>\d+)\s*\}' + g_close, # 1} space + title + g_name + ur'(?P<marknum>\d+)\s*\)' + g_close, # 1) space + title + g_name + ur'(?P<marknum>\d+)\s*\>' + g_close, # [1.1] space + title + g_name + ur'\[\s*\d+\.\d+\s*\]' + g_close, # [ ] space + title + g_name + ur'\[\s*\]' + g_close, # * space + title + g_name + ur'\*' + g_close, ] return [re.compile(p, re.I | re.UNICODE) for p in patterns]
python
def get_reference_line_numeration_marker_patterns(prefix=u''): """Return a list of compiled regex patterns used to search for the marker of a reference line in a full-text document. @param prefix: (string) the possible prefix to a reference line @return: (list) of compiled regex patterns. """ title = u"" if type(prefix) in (str, unicode): title = prefix g_name = u'(?P<mark>' g_close = u')' space = ur'\s*' patterns = [ # [1] space + title + g_name + ur'\[\s*(?P<marknum>\d+)\s*\]' + g_close, # [<letters and numbers] space + title + g_name + ur'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close, # {1} space + title + g_name + ur'\{\s*(?P<marknum>\d+)\s*\}' + g_close, # (1) space + title + g_name + ur'\<\s*(?P<marknum>\d+)\s*\>' + g_close, space + title + g_name + ur'\(\s*(?P<marknum>\d+)\s*\)' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s*\.(?!\d)' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s+' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s*\]' + g_close, # 1] space + title + g_name + ur'(?P<marknum>\d+)\s*\}' + g_close, # 1} space + title + g_name + ur'(?P<marknum>\d+)\s*\)' + g_close, # 1) space + title + g_name + ur'(?P<marknum>\d+)\s*\>' + g_close, # [1.1] space + title + g_name + ur'\[\s*\d+\.\d+\s*\]' + g_close, # [ ] space + title + g_name + ur'\[\s*\]' + g_close, # * space + title + g_name + ur'\*' + g_close, ] return [re.compile(p, re.I | re.UNICODE) for p in patterns]
[ "def", "get_reference_line_numeration_marker_patterns", "(", "prefix", "=", "u''", ")", ":", "title", "=", "u\"\"", "if", "type", "(", "prefix", ")", "in", "(", "str", ",", "unicode", ")", ":", "title", "=", "prefix", "g_name", "=", "u'(?P<mark>'", "g_close", "=", "u')'", "space", "=", "ur'\\s*'", "patterns", "=", "[", "# [1]", "space", "+", "title", "+", "g_name", "+", "ur'\\[\\s*(?P<marknum>\\d+)\\s*\\]'", "+", "g_close", ",", "# [<letters and numbers]", "space", "+", "title", "+", "g_name", "+", "ur'\\[\\s*[a-zA-Z:-]+\\+?\\s?(\\d{1,4}[A-Za-z:-]?)?\\s*\\]'", "+", "g_close", ",", "# {1}", "space", "+", "title", "+", "g_name", "+", "ur'\\{\\s*(?P<marknum>\\d+)\\s*\\}'", "+", "g_close", ",", "# (1)", "space", "+", "title", "+", "g_name", "+", "ur'\\<\\s*(?P<marknum>\\d+)\\s*\\>'", "+", "g_close", ",", "space", "+", "title", "+", "g_name", "+", "ur'\\(\\s*(?P<marknum>\\d+)\\s*\\)'", "+", "g_close", ",", "space", "+", "title", "+", "g_name", "+", "ur'(?P<marknum>\\d+)\\s*\\.(?!\\d)'", "+", "g_close", ",", "space", "+", "title", "+", "g_name", "+", "ur'(?P<marknum>\\d+)\\s+'", "+", "g_close", ",", "space", "+", "title", "+", "g_name", "+", "ur'(?P<marknum>\\d+)\\s*\\]'", "+", "g_close", ",", "# 1]", "space", "+", "title", "+", "g_name", "+", "ur'(?P<marknum>\\d+)\\s*\\}'", "+", "g_close", ",", "# 1}", "space", "+", "title", "+", "g_name", "+", "ur'(?P<marknum>\\d+)\\s*\\)'", "+", "g_close", ",", "# 1)", "space", "+", "title", "+", "g_name", "+", "ur'(?P<marknum>\\d+)\\s*\\>'", "+", "g_close", ",", "# [1.1]", "space", "+", "title", "+", "g_name", "+", "ur'\\[\\s*\\d+\\.\\d+\\s*\\]'", "+", "g_close", ",", "# [ ]", "space", "+", "title", "+", "g_name", "+", "ur'\\[\\s*\\]'", "+", "g_close", ",", "# *", "space", "+", "title", "+", "g_name", "+", "ur'\\*'", "+", "g_close", ",", "]", "return", "[", "re", ".", "compile", "(", "p", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", "for", "p", "in", "patterns", "]" ]
Return a list of compiled regex patterns used to search for the marker of a reference line in a full-text document. @param prefix: (string) the possible prefix to a reference line @return: (list) of compiled regex patterns.
[ "Return", "a", "list", "of", "compiled", "regex", "patterns", "used", "to", "search", "for", "the", "marker", "of", "a", "reference", "line", "in", "a", "full", "-", "text", "document", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/regexs.py#L733-L772
3,751
inspirehep/refextract
refextract/references/regexs.py
get_post_reference_section_title_patterns
def get_post_reference_section_title_patterns(): """Return a list of compiled regex patterns used to search for the title of the section after the reference section in a full-text document. @return: (list) of compiled regex patterns. """ compiled_patterns = [] thead = ur'^\s*([\{\(\<\[]?\s*(\w|\d)\s*[\)\}\>\.\-\]]?\s*)?' ttail = ur'(\s*\:\s*)?' numatn = ur'(\d+|\w\b|i{1,3}v?|vi{0,3})[\.\,]{0,2}\b' roman_numbers = ur'[LVIX]' patterns = [ # Section titles thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'appendix') + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'appendices') + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'acknowledgement') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'acknowledgment') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'table') + ur'\w?s?\d?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'figure') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'list of figure') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'annex') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'discussion') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'remercie') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'index') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'summary') + ur's?' + ttail, # Figure nums ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'figure') + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'fig') + ur'\.\s*' + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'fig') + ur'\.?\s*\d\w?\b', # Tables ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'table') + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'tab') + ur'\.\s*' + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'tab') + ur'\.?\s*\d\w?\b', # Other titles formats ur'^\s*' + roman_numbers + ur'\.?\s*[Cc]onclusion[\w\s]*$', ur'^\s*Appendix\s[A-Z]\s*\:\s*[a-zA-Z]+\s*', ] for p in patterns: compiled_patterns.append(re.compile(p, re.I | re.UNICODE)) return compiled_patterns
python
def get_post_reference_section_title_patterns(): """Return a list of compiled regex patterns used to search for the title of the section after the reference section in a full-text document. @return: (list) of compiled regex patterns. """ compiled_patterns = [] thead = ur'^\s*([\{\(\<\[]?\s*(\w|\d)\s*[\)\}\>\.\-\]]?\s*)?' ttail = ur'(\s*\:\s*)?' numatn = ur'(\d+|\w\b|i{1,3}v?|vi{0,3})[\.\,]{0,2}\b' roman_numbers = ur'[LVIX]' patterns = [ # Section titles thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'appendix') + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'appendices') + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'acknowledgement') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'acknowledgment') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'table') + ur'\w?s?\d?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'figure') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'list of figure') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'annex') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'discussion') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'remercie') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'index') + ur's?' + ttail, thead + _create_regex_pattern_add_optional_spaces_to_word_characters( u'summary') + ur's?' + ttail, # Figure nums ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'figure') + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'fig') + ur'\.\s*' + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'fig') + ur'\.?\s*\d\w?\b', # Tables ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'table') + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'tab') + ur'\.\s*' + numatn, ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'tab') + ur'\.?\s*\d\w?\b', # Other titles formats ur'^\s*' + roman_numbers + ur'\.?\s*[Cc]onclusion[\w\s]*$', ur'^\s*Appendix\s[A-Z]\s*\:\s*[a-zA-Z]+\s*', ] for p in patterns: compiled_patterns.append(re.compile(p, re.I | re.UNICODE)) return compiled_patterns
[ "def", "get_post_reference_section_title_patterns", "(", ")", ":", "compiled_patterns", "=", "[", "]", "thead", "=", "ur'^\\s*([\\{\\(\\<\\[]?\\s*(\\w|\\d)\\s*[\\)\\}\\>\\.\\-\\]]?\\s*)?'", "ttail", "=", "ur'(\\s*\\:\\s*)?'", "numatn", "=", "ur'(\\d+|\\w\\b|i{1,3}v?|vi{0,3})[\\.\\,]{0,2}\\b'", "roman_numbers", "=", "ur'[LVIX]'", "patterns", "=", "[", "# Section titles", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'appendix'", ")", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'appendices'", ")", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'acknowledgement'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'acknowledgment'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'table'", ")", "+", "ur'\\w?s?\\d?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'figure'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'list of figure'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'annex'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'discussion'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'remercie'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'index'", ")", "+", "ur's?'", "+", "ttail", ",", "thead", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'summary'", ")", "+", "ur's?'", "+", "ttail", ",", "# Figure nums", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'figure'", ")", "+", "numatn", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'fig'", ")", "+", "ur'\\.\\s*'", "+", "numatn", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'fig'", ")", "+", "ur'\\.?\\s*\\d\\w?\\b'", ",", "# Tables", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'table'", ")", "+", "numatn", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'tab'", ")", "+", "ur'\\.\\s*'", "+", "numatn", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'tab'", ")", "+", "ur'\\.?\\s*\\d\\w?\\b'", ",", "# Other titles formats", "ur'^\\s*'", "+", "roman_numbers", "+", "ur'\\.?\\s*[Cc]onclusion[\\w\\s]*$'", ",", "ur'^\\s*Appendix\\s[A-Z]\\s*\\:\\s*[a-zA-Z]+\\s*'", ",", "]", "for", "p", "in", "patterns", ":", "compiled_patterns", ".", "append", "(", "re", ".", "compile", "(", "p", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", ")", "return", "compiled_patterns" ]
Return a list of compiled regex patterns used to search for the title of the section after the reference section in a full-text document. @return: (list) of compiled regex patterns.
[ "Return", "a", "list", "of", "compiled", "regex", "patterns", "used", "to", "search", "for", "the", "title", "of", "the", "section", "after", "the", "reference", "section", "in", "a", "full", "-", "text", "document", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/regexs.py#L800-L871
3,752
inspirehep/refextract
refextract/references/regexs.py
get_post_reference_section_keyword_patterns
def get_post_reference_section_keyword_patterns(): """Return a list of compiled regex patterns used to search for various keywords that can often be found after, and therefore suggest the end of, a reference section in a full-text document. @return: (list) of compiled regex patterns. """ compiled_patterns = [] patterns = [u'(' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'prepared') + ur'|' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'created') + ur').*(AAS\s*)?\sLATEX', ur'AAS\s+?LATEX\s+?' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'macros') + u'v', ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'This paper has been produced using'), ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'This article was processed by the author using Springer-Verlag') + u' LATEX'] for p in patterns: compiled_patterns.append(re.compile(p, re.I | re.UNICODE)) return compiled_patterns
python
def get_post_reference_section_keyword_patterns(): """Return a list of compiled regex patterns used to search for various keywords that can often be found after, and therefore suggest the end of, a reference section in a full-text document. @return: (list) of compiled regex patterns. """ compiled_patterns = [] patterns = [u'(' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'prepared') + ur'|' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'created') + ur').*(AAS\s*)?\sLATEX', ur'AAS\s+?LATEX\s+?' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'macros') + u'v', ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'This paper has been produced using'), ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'This article was processed by the author using Springer-Verlag') + u' LATEX'] for p in patterns: compiled_patterns.append(re.compile(p, re.I | re.UNICODE)) return compiled_patterns
[ "def", "get_post_reference_section_keyword_patterns", "(", ")", ":", "compiled_patterns", "=", "[", "]", "patterns", "=", "[", "u'('", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'prepared'", ")", "+", "ur'|'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'created'", ")", "+", "ur').*(AAS\\s*)?\\sLATEX'", ",", "ur'AAS\\s+?LATEX\\s+?'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'macros'", ")", "+", "u'v'", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'This paper has been produced using'", ")", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'This article was processed by the author using Springer-Verlag'", ")", "+", "u' LATEX'", "]", "for", "p", "in", "patterns", ":", "compiled_patterns", ".", "append", "(", "re", ".", "compile", "(", "p", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", ")", "return", "compiled_patterns" ]
Return a list of compiled regex patterns used to search for various keywords that can often be found after, and therefore suggest the end of, a reference section in a full-text document. @return: (list) of compiled regex patterns.
[ "Return", "a", "list", "of", "compiled", "regex", "patterns", "used", "to", "search", "for", "various", "keywords", "that", "can", "often", "be", "found", "after", "and", "therefore", "suggest", "the", "end", "of", "a", "reference", "section", "in", "a", "full", "-", "text", "document", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/regexs.py#L874-L894
3,753
inspirehep/refextract
refextract/references/regexs.py
regex_match_list
def regex_match_list(line, patterns): """Given a list of COMPILED regex patters, perform the "re.match" operation on the line for every pattern. Break from searching at the first match, returning the match object. In the case that no patterns match, the None type will be returned. @param line: (unicode string) to be searched in. @param patterns: (list) of compiled regex patterns to search "line" with. @return: (None or an re.match object), depending upon whether one of the patterns matched within line or not. """ m = None for ptn in patterns: m = ptn.match(line) if m is not None: break return m
python
def regex_match_list(line, patterns): """Given a list of COMPILED regex patters, perform the "re.match" operation on the line for every pattern. Break from searching at the first match, returning the match object. In the case that no patterns match, the None type will be returned. @param line: (unicode string) to be searched in. @param patterns: (list) of compiled regex patterns to search "line" with. @return: (None or an re.match object), depending upon whether one of the patterns matched within line or not. """ m = None for ptn in patterns: m = ptn.match(line) if m is not None: break return m
[ "def", "regex_match_list", "(", "line", ",", "patterns", ")", ":", "m", "=", "None", "for", "ptn", "in", "patterns", ":", "m", "=", "ptn", ".", "match", "(", "line", ")", "if", "m", "is", "not", "None", ":", "break", "return", "m" ]
Given a list of COMPILED regex patters, perform the "re.match" operation on the line for every pattern. Break from searching at the first match, returning the match object. In the case that no patterns match, the None type will be returned. @param line: (unicode string) to be searched in. @param patterns: (list) of compiled regex patterns to search "line" with. @return: (None or an re.match object), depending upon whether one of the patterns matched within line or not.
[ "Given", "a", "list", "of", "COMPILED", "regex", "patters", "perform", "the", "re", ".", "match", "operation", "on", "the", "line", "for", "every", "pattern", ".", "Break", "from", "searching", "at", "the", "first", "match", "returning", "the", "match", "object", ".", "In", "the", "case", "that", "no", "patterns", "match", "the", "None", "type", "will", "be", "returned", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/regexs.py#L897-L913
3,754
inspirehep/refextract
refextract/documents/text.py
get_url_repair_patterns
def get_url_repair_patterns(): """Initialise and return a list of precompiled regexp patterns that are used to try to re-assemble URLs that have been broken during a document's conversion to plain-text. @return: (list) of compiled re regexp patterns used for finding various broken URLs. """ file_types_list = [ ur'h\s*t\s*m', # htm ur'h\s*t\s*m\s*l', # html ur't\s*x\s*t' # txt ur'p\s*h\s*p' # php ur'a\s*s\s*p\s*' # asp ur'j\s*s\s*p', # jsp ur'p\s*y', # py (python) ur'p\s*l', # pl (perl) ur'x\s*m\s*l', # xml ur'j\s*p\s*g', # jpg ur'g\s*i\s*f' # gif ur'm\s*o\s*v' # mov ur's\s*w\s*f' # swf ur'p\s*d\s*f' # pdf ur'p\s*s' # ps ur'd\s*o\s*c', # doc ur't\s*e\s*x', # tex ur's\s*h\s*t\s*m\s*l', # shtml ] pattern_list = [ ur'(h\s*t\s*t\s*p\s*\:\s*\/\s*\/)', ur'(f\s*t\s*p\s*\:\s*\/\s*\/\s*)', ur'((http|ftp):\/\/\s*[\w\d])', ur'((http|ftp):\/\/([\w\d\s\._\-])+?\s*\/)', ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)+)', ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)*([\w\d\_\s\-]+\.\s?[\w\d]+))', ] pattern_list = [re.compile(p, re.I | re.UNICODE) for p in pattern_list] # some possible endings for URLs: p = ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*([\w\d\_\-]+\.%s))' for extension in file_types_list: p_url = re.compile(p % extension, re.I | re.UNICODE) pattern_list.append(p_url) # if url last thing in line, and only 10 letters max, concat them p_url = re.compile( r'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*\s*?([\w\d\_\.\-]\s?){1,10}\s*)$', re.I | re.UNICODE) pattern_list.append(p_url) return pattern_list
python
def get_url_repair_patterns(): """Initialise and return a list of precompiled regexp patterns that are used to try to re-assemble URLs that have been broken during a document's conversion to plain-text. @return: (list) of compiled re regexp patterns used for finding various broken URLs. """ file_types_list = [ ur'h\s*t\s*m', # htm ur'h\s*t\s*m\s*l', # html ur't\s*x\s*t' # txt ur'p\s*h\s*p' # php ur'a\s*s\s*p\s*' # asp ur'j\s*s\s*p', # jsp ur'p\s*y', # py (python) ur'p\s*l', # pl (perl) ur'x\s*m\s*l', # xml ur'j\s*p\s*g', # jpg ur'g\s*i\s*f' # gif ur'm\s*o\s*v' # mov ur's\s*w\s*f' # swf ur'p\s*d\s*f' # pdf ur'p\s*s' # ps ur'd\s*o\s*c', # doc ur't\s*e\s*x', # tex ur's\s*h\s*t\s*m\s*l', # shtml ] pattern_list = [ ur'(h\s*t\s*t\s*p\s*\:\s*\/\s*\/)', ur'(f\s*t\s*p\s*\:\s*\/\s*\/\s*)', ur'((http|ftp):\/\/\s*[\w\d])', ur'((http|ftp):\/\/([\w\d\s\._\-])+?\s*\/)', ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)+)', ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)*([\w\d\_\s\-]+\.\s?[\w\d]+))', ] pattern_list = [re.compile(p, re.I | re.UNICODE) for p in pattern_list] # some possible endings for URLs: p = ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*([\w\d\_\-]+\.%s))' for extension in file_types_list: p_url = re.compile(p % extension, re.I | re.UNICODE) pattern_list.append(p_url) # if url last thing in line, and only 10 letters max, concat them p_url = re.compile( r'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*\s*?([\w\d\_\.\-]\s?){1,10}\s*)$', re.I | re.UNICODE) pattern_list.append(p_url) return pattern_list
[ "def", "get_url_repair_patterns", "(", ")", ":", "file_types_list", "=", "[", "ur'h\\s*t\\s*m'", ",", "# htm", "ur'h\\s*t\\s*m\\s*l'", ",", "# html", "ur't\\s*x\\s*t'", "# txt", "ur'p\\s*h\\s*p'", "# php", "ur'a\\s*s\\s*p\\s*'", "# asp", "ur'j\\s*s\\s*p'", ",", "# jsp", "ur'p\\s*y'", ",", "# py (python)", "ur'p\\s*l'", ",", "# pl (perl)", "ur'x\\s*m\\s*l'", ",", "# xml", "ur'j\\s*p\\s*g'", ",", "# jpg", "ur'g\\s*i\\s*f'", "# gif", "ur'm\\s*o\\s*v'", "# mov", "ur's\\s*w\\s*f'", "# swf", "ur'p\\s*d\\s*f'", "# pdf", "ur'p\\s*s'", "# ps", "ur'd\\s*o\\s*c'", ",", "# doc", "ur't\\s*e\\s*x'", ",", "# tex", "ur's\\s*h\\s*t\\s*m\\s*l'", ",", "# shtml", "]", "pattern_list", "=", "[", "ur'(h\\s*t\\s*t\\s*p\\s*\\:\\s*\\/\\s*\\/)'", ",", "ur'(f\\s*t\\s*p\\s*\\:\\s*\\/\\s*\\/\\s*)'", ",", "ur'((http|ftp):\\/\\/\\s*[\\w\\d])'", ",", "ur'((http|ftp):\\/\\/([\\w\\d\\s\\._\\-])+?\\s*\\/)'", ",", "ur'((http|ftp):\\/\\/([\\w\\d\\_\\.\\-])+\\/(([\\w\\d\\_\\s\\.\\-])+?\\/)+)'", ",", "ur'((http|ftp):\\/\\/([\\w\\d\\_\\.\\-])+\\/(([\\w\\d\\_\\s\\.\\-])+?\\/)*([\\w\\d\\_\\s\\-]+\\.\\s?[\\w\\d]+))'", ",", "]", "pattern_list", "=", "[", "re", ".", "compile", "(", "p", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", "for", "p", "in", "pattern_list", "]", "# some possible endings for URLs:", "p", "=", "ur'((http|ftp):\\/\\/([\\w\\d\\_\\.\\-])+\\/(([\\w\\d\\_\\.\\-])+?\\/)*([\\w\\d\\_\\-]+\\.%s))'", "for", "extension", "in", "file_types_list", ":", "p_url", "=", "re", ".", "compile", "(", "p", "%", "extension", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", "pattern_list", ".", "append", "(", "p_url", ")", "# if url last thing in line, and only 10 letters max, concat them", "p_url", "=", "re", ".", "compile", "(", "r'((http|ftp):\\/\\/([\\w\\d\\_\\.\\-])+\\/(([\\w\\d\\_\\.\\-])+?\\/)*\\s*?([\\w\\d\\_\\.\\-]\\s?){1,10}\\s*)$'", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", "pattern_list", ".", "append", "(", "p_url", ")", "return", "pattern_list" ]
Initialise and return a list of precompiled regexp patterns that are used to try to re-assemble URLs that have been broken during a document's conversion to plain-text. @return: (list) of compiled re regexp patterns used for finding various broken URLs.
[ "Initialise", "and", "return", "a", "list", "of", "precompiled", "regexp", "patterns", "that", "are", "used", "to", "try", "to", "re", "-", "assemble", "URLs", "that", "have", "been", "broken", "during", "a", "document", "s", "conversion", "to", "plain", "-", "text", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L45-L95
3,755
inspirehep/refextract
refextract/documents/text.py
join_lines
def join_lines(line1, line2): """Join 2 lines of text >>> join_lines('abc', 'de') 'abcde' >>> join_lines('a-', 'b') 'ab' """ if line1 == u"": pass elif line1[-1] == u'-': # hyphenated word at the end of the # line - don't add in a space and remove hyphen line1 = line1[:-1] elif line1[-1] != u' ': # no space at the end of this # line, add in a space line1 = line1 + u' ' return line1 + line2
python
def join_lines(line1, line2): """Join 2 lines of text >>> join_lines('abc', 'de') 'abcde' >>> join_lines('a-', 'b') 'ab' """ if line1 == u"": pass elif line1[-1] == u'-': # hyphenated word at the end of the # line - don't add in a space and remove hyphen line1 = line1[:-1] elif line1[-1] != u' ': # no space at the end of this # line, add in a space line1 = line1 + u' ' return line1 + line2
[ "def", "join_lines", "(", "line1", ",", "line2", ")", ":", "if", "line1", "==", "u\"\"", ":", "pass", "elif", "line1", "[", "-", "1", "]", "==", "u'-'", ":", "# hyphenated word at the end of the", "# line - don't add in a space and remove hyphen", "line1", "=", "line1", "[", ":", "-", "1", "]", "elif", "line1", "[", "-", "1", "]", "!=", "u' '", ":", "# no space at the end of this", "# line, add in a space", "line1", "=", "line1", "+", "u' '", "return", "line1", "+", "line2" ]
Join 2 lines of text >>> join_lines('abc', 'de') 'abcde' >>> join_lines('a-', 'b') 'ab'
[ "Join", "2", "lines", "of", "text" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L102-L120
3,756
inspirehep/refextract
refextract/documents/text.py
repair_broken_urls
def repair_broken_urls(line): """Attempt to repair broken URLs in a line of text. E.g.: remove spaces from the middle of a URL; something like that. @param line: (string) the line in which to check for broken URLs. @return: (string) the line after any broken URLs have been repaired. """ def _chop_spaces_in_url_match(m): """Suppresses spaces in a matched URL.""" return m.group(1).replace(" ", "") for ptn in re_list_url_repair_patterns: line = ptn.sub(_chop_spaces_in_url_match, line) return line
python
def repair_broken_urls(line): """Attempt to repair broken URLs in a line of text. E.g.: remove spaces from the middle of a URL; something like that. @param line: (string) the line in which to check for broken URLs. @return: (string) the line after any broken URLs have been repaired. """ def _chop_spaces_in_url_match(m): """Suppresses spaces in a matched URL.""" return m.group(1).replace(" ", "") for ptn in re_list_url_repair_patterns: line = ptn.sub(_chop_spaces_in_url_match, line) return line
[ "def", "repair_broken_urls", "(", "line", ")", ":", "def", "_chop_spaces_in_url_match", "(", "m", ")", ":", "\"\"\"Suppresses spaces in a matched URL.\"\"\"", "return", "m", ".", "group", "(", "1", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "for", "ptn", "in", "re_list_url_repair_patterns", ":", "line", "=", "ptn", ".", "sub", "(", "_chop_spaces_in_url_match", ",", "line", ")", "return", "line" ]
Attempt to repair broken URLs in a line of text. E.g.: remove spaces from the middle of a URL; something like that. @param line: (string) the line in which to check for broken URLs. @return: (string) the line after any broken URLs have been repaired.
[ "Attempt", "to", "repair", "broken", "URLs", "in", "a", "line", "of", "text", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L123-L136
3,757
inspirehep/refextract
refextract/documents/text.py
remove_and_record_multiple_spaces_in_line
def remove_and_record_multiple_spaces_in_line(line): """For a given string, locate all ocurrences of multiple spaces together in the line, record the number of spaces found at each position, and replace them with a single space. @param line: (string) the text line to be processed for multiple spaces. @return: (tuple) countaining a dictionary and a string. The dictionary contains information about the number of spaces removed at given positions in the line. For example, if 3 spaces were removed from the line at index '22', the dictionary would be set as follows: { 22 : 3 } The string that is also returned in this tuple is the line after multiple-space ocurrences have replaced with single spaces. """ removed_spaces = {} # get a collection of match objects for all instances of # multiple-spaces found in the line: multispace_matches = re_group_captured_multiple_space.finditer(line) # record the number of spaces found at each match position: for multispace in multispace_matches: removed_spaces[multispace.start()] = \ (multispace.end() - multispace.start() - 1) # now remove the multiple-spaces from the line, replacing with a # single space at each position: line = re_group_captured_multiple_space.sub(u' ', line) return (removed_spaces, line)
python
def remove_and_record_multiple_spaces_in_line(line): """For a given string, locate all ocurrences of multiple spaces together in the line, record the number of spaces found at each position, and replace them with a single space. @param line: (string) the text line to be processed for multiple spaces. @return: (tuple) countaining a dictionary and a string. The dictionary contains information about the number of spaces removed at given positions in the line. For example, if 3 spaces were removed from the line at index '22', the dictionary would be set as follows: { 22 : 3 } The string that is also returned in this tuple is the line after multiple-space ocurrences have replaced with single spaces. """ removed_spaces = {} # get a collection of match objects for all instances of # multiple-spaces found in the line: multispace_matches = re_group_captured_multiple_space.finditer(line) # record the number of spaces found at each match position: for multispace in multispace_matches: removed_spaces[multispace.start()] = \ (multispace.end() - multispace.start() - 1) # now remove the multiple-spaces from the line, replacing with a # single space at each position: line = re_group_captured_multiple_space.sub(u' ', line) return (removed_spaces, line)
[ "def", "remove_and_record_multiple_spaces_in_line", "(", "line", ")", ":", "removed_spaces", "=", "{", "}", "# get a collection of match objects for all instances of", "# multiple-spaces found in the line:", "multispace_matches", "=", "re_group_captured_multiple_space", ".", "finditer", "(", "line", ")", "# record the number of spaces found at each match position:", "for", "multispace", "in", "multispace_matches", ":", "removed_spaces", "[", "multispace", ".", "start", "(", ")", "]", "=", "(", "multispace", ".", "end", "(", ")", "-", "multispace", ".", "start", "(", ")", "-", "1", ")", "# now remove the multiple-spaces from the line, replacing with a", "# single space at each position:", "line", "=", "re_group_captured_multiple_space", ".", "sub", "(", "u' '", ",", "line", ")", "return", "(", "removed_spaces", ",", "line", ")" ]
For a given string, locate all ocurrences of multiple spaces together in the line, record the number of spaces found at each position, and replace them with a single space. @param line: (string) the text line to be processed for multiple spaces. @return: (tuple) countaining a dictionary and a string. The dictionary contains information about the number of spaces removed at given positions in the line. For example, if 3 spaces were removed from the line at index '22', the dictionary would be set as follows: { 22 : 3 } The string that is also returned in this tuple is the line after multiple-space ocurrences have replaced with single spaces.
[ "For", "a", "given", "string", "locate", "all", "ocurrences", "of", "multiple", "spaces", "together", "in", "the", "line", "record", "the", "number", "of", "spaces", "found", "at", "each", "position", "and", "replace", "them", "with", "a", "single", "space", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L139-L164
3,758
inspirehep/refextract
refextract/documents/text.py
remove_page_boundary_lines
def remove_page_boundary_lines(docbody): """Try to locate page breaks, headers and footers within a document body, and remove the array cells at which they are found. @param docbody: (list) of strings, each string being a line in the document's body. @return: (list) of strings. The document body, hopefully with page- breaks, headers and footers removed. Each string in the list once more represents a line in the document. """ number_head_lines = number_foot_lines = 0 # Make sure document not just full of whitespace: if not document_contains_text(docbody): # document contains only whitespace - cannot safely # strip headers/footers return docbody # Get list of index posns of pagebreaks in document: page_break_posns = get_page_break_positions(docbody) # Get num lines making up each header if poss: number_head_lines = get_number_header_lines(docbody, page_break_posns) # Get num lines making up each footer if poss: number_foot_lines = get_number_footer_lines(docbody, page_break_posns) # Remove pagebreaks,headers,footers: docbody = strip_headers_footers_pagebreaks(docbody, page_break_posns, number_head_lines, number_foot_lines) return docbody
python
def remove_page_boundary_lines(docbody): """Try to locate page breaks, headers and footers within a document body, and remove the array cells at which they are found. @param docbody: (list) of strings, each string being a line in the document's body. @return: (list) of strings. The document body, hopefully with page- breaks, headers and footers removed. Each string in the list once more represents a line in the document. """ number_head_lines = number_foot_lines = 0 # Make sure document not just full of whitespace: if not document_contains_text(docbody): # document contains only whitespace - cannot safely # strip headers/footers return docbody # Get list of index posns of pagebreaks in document: page_break_posns = get_page_break_positions(docbody) # Get num lines making up each header if poss: number_head_lines = get_number_header_lines(docbody, page_break_posns) # Get num lines making up each footer if poss: number_foot_lines = get_number_footer_lines(docbody, page_break_posns) # Remove pagebreaks,headers,footers: docbody = strip_headers_footers_pagebreaks(docbody, page_break_posns, number_head_lines, number_foot_lines) return docbody
[ "def", "remove_page_boundary_lines", "(", "docbody", ")", ":", "number_head_lines", "=", "number_foot_lines", "=", "0", "# Make sure document not just full of whitespace:", "if", "not", "document_contains_text", "(", "docbody", ")", ":", "# document contains only whitespace - cannot safely", "# strip headers/footers", "return", "docbody", "# Get list of index posns of pagebreaks in document:", "page_break_posns", "=", "get_page_break_positions", "(", "docbody", ")", "# Get num lines making up each header if poss:", "number_head_lines", "=", "get_number_header_lines", "(", "docbody", ",", "page_break_posns", ")", "# Get num lines making up each footer if poss:", "number_foot_lines", "=", "get_number_footer_lines", "(", "docbody", ",", "page_break_posns", ")", "# Remove pagebreaks,headers,footers:", "docbody", "=", "strip_headers_footers_pagebreaks", "(", "docbody", ",", "page_break_posns", ",", "number_head_lines", ",", "number_foot_lines", ")", "return", "docbody" ]
Try to locate page breaks, headers and footers within a document body, and remove the array cells at which they are found. @param docbody: (list) of strings, each string being a line in the document's body. @return: (list) of strings. The document body, hopefully with page- breaks, headers and footers removed. Each string in the list once more represents a line in the document.
[ "Try", "to", "locate", "page", "breaks", "headers", "and", "footers", "within", "a", "document", "body", "and", "remove", "the", "array", "cells", "at", "which", "they", "are", "found", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L186-L217
3,759
inspirehep/refextract
refextract/documents/text.py
get_page_break_positions
def get_page_break_positions(docbody): """Locate page breaks in the list of document lines and create a list positions in the document body list. @param docbody: (list) of strings - each string is a line in the document. @return: (list) of integer positions, whereby each integer represents the position (in the document body) of a page-break. """ page_break_posns = [] p_break = re.compile(ur'^\s*\f\s*$', re.UNICODE) num_document_lines = len(docbody) for i in xrange(num_document_lines): if p_break.match(docbody[i]) is not None: page_break_posns.append(i) return page_break_posns
python
def get_page_break_positions(docbody): """Locate page breaks in the list of document lines and create a list positions in the document body list. @param docbody: (list) of strings - each string is a line in the document. @return: (list) of integer positions, whereby each integer represents the position (in the document body) of a page-break. """ page_break_posns = [] p_break = re.compile(ur'^\s*\f\s*$', re.UNICODE) num_document_lines = len(docbody) for i in xrange(num_document_lines): if p_break.match(docbody[i]) is not None: page_break_posns.append(i) return page_break_posns
[ "def", "get_page_break_positions", "(", "docbody", ")", ":", "page_break_posns", "=", "[", "]", "p_break", "=", "re", ".", "compile", "(", "ur'^\\s*\\f\\s*$'", ",", "re", ".", "UNICODE", ")", "num_document_lines", "=", "len", "(", "docbody", ")", "for", "i", "in", "xrange", "(", "num_document_lines", ")", ":", "if", "p_break", ".", "match", "(", "docbody", "[", "i", "]", ")", "is", "not", "None", ":", "page_break_posns", ".", "append", "(", "i", ")", "return", "page_break_posns" ]
Locate page breaks in the list of document lines and create a list positions in the document body list. @param docbody: (list) of strings - each string is a line in the document. @return: (list) of integer positions, whereby each integer represents the position (in the document body) of a page-break.
[ "Locate", "page", "breaks", "in", "the", "list", "of", "document", "lines", "and", "create", "a", "list", "positions", "in", "the", "document", "body", "list", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L237-L251
3,760
inspirehep/refextract
refextract/documents/text.py
get_number_header_lines
def get_number_header_lines(docbody, page_break_posns): """Try to guess the number of header lines each page of a document has. The positions of the page breaks in the document are used to try to guess the number of header lines. @param docbody: (list) of strings - each string being a line in the document @param page_break_posns: (list) of integers - each integer is the position of a page break in the document. @return: (int) the number of lines that make up the header of each page. """ remaining_breaks = len(page_break_posns) - 1 num_header_lines = empty_line = 0 # pattern to search for a word in a line: p_wordSearch = re.compile(ur'([A-Za-z0-9-]+)', re.UNICODE) if remaining_breaks > 2: if remaining_breaks > 3: # Only check odd page headers next_head = 2 else: # Check headers on each page next_head = 1 keep_checking = 1 while keep_checking: cur_break = 1 if docbody[(page_break_posns[cur_break] + num_header_lines + 1)].isspace(): # this is a blank line empty_line = 1 if (page_break_posns[cur_break] + num_header_lines + 1) \ == (page_break_posns[(cur_break + 1)]): # Have reached next page-break: document has no # body - only head/footers! keep_checking = 0 grps_headLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] + num_header_lines + 1)]) cur_break = cur_break + next_head while (cur_break < remaining_breaks) and keep_checking: lineno = page_break_posns[cur_break] + num_header_lines + 1 if lineno >= len(docbody): keep_checking = 0 break grps_thisLineWords = \ p_wordSearch.findall(docbody[lineno]) if empty_line: if len(grps_thisLineWords) != 0: # This line should be empty, but isn't keep_checking = 0 else: if (len(grps_thisLineWords) == 0) or \ (len(grps_headLineWords) != len(grps_thisLineWords)): # Not same num 'words' as equivilent line # in 1st header: keep_checking = 0 else: keep_checking = \ check_boundary_lines_similar(grps_headLineWords, grps_thisLineWords) # Update cur_break for nxt line to check cur_break = cur_break + next_head if keep_checking: # Line is a header line: check next num_header_lines = num_header_lines + 1 empty_line = 0 return num_header_lines
python
def get_number_header_lines(docbody, page_break_posns): """Try to guess the number of header lines each page of a document has. The positions of the page breaks in the document are used to try to guess the number of header lines. @param docbody: (list) of strings - each string being a line in the document @param page_break_posns: (list) of integers - each integer is the position of a page break in the document. @return: (int) the number of lines that make up the header of each page. """ remaining_breaks = len(page_break_posns) - 1 num_header_lines = empty_line = 0 # pattern to search for a word in a line: p_wordSearch = re.compile(ur'([A-Za-z0-9-]+)', re.UNICODE) if remaining_breaks > 2: if remaining_breaks > 3: # Only check odd page headers next_head = 2 else: # Check headers on each page next_head = 1 keep_checking = 1 while keep_checking: cur_break = 1 if docbody[(page_break_posns[cur_break] + num_header_lines + 1)].isspace(): # this is a blank line empty_line = 1 if (page_break_posns[cur_break] + num_header_lines + 1) \ == (page_break_posns[(cur_break + 1)]): # Have reached next page-break: document has no # body - only head/footers! keep_checking = 0 grps_headLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] + num_header_lines + 1)]) cur_break = cur_break + next_head while (cur_break < remaining_breaks) and keep_checking: lineno = page_break_posns[cur_break] + num_header_lines + 1 if lineno >= len(docbody): keep_checking = 0 break grps_thisLineWords = \ p_wordSearch.findall(docbody[lineno]) if empty_line: if len(grps_thisLineWords) != 0: # This line should be empty, but isn't keep_checking = 0 else: if (len(grps_thisLineWords) == 0) or \ (len(grps_headLineWords) != len(grps_thisLineWords)): # Not same num 'words' as equivilent line # in 1st header: keep_checking = 0 else: keep_checking = \ check_boundary_lines_similar(grps_headLineWords, grps_thisLineWords) # Update cur_break for nxt line to check cur_break = cur_break + next_head if keep_checking: # Line is a header line: check next num_header_lines = num_header_lines + 1 empty_line = 0 return num_header_lines
[ "def", "get_number_header_lines", "(", "docbody", ",", "page_break_posns", ")", ":", "remaining_breaks", "=", "len", "(", "page_break_posns", ")", "-", "1", "num_header_lines", "=", "empty_line", "=", "0", "# pattern to search for a word in a line:", "p_wordSearch", "=", "re", ".", "compile", "(", "ur'([A-Za-z0-9-]+)'", ",", "re", ".", "UNICODE", ")", "if", "remaining_breaks", ">", "2", ":", "if", "remaining_breaks", ">", "3", ":", "# Only check odd page headers", "next_head", "=", "2", "else", ":", "# Check headers on each page", "next_head", "=", "1", "keep_checking", "=", "1", "while", "keep_checking", ":", "cur_break", "=", "1", "if", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "+", "num_header_lines", "+", "1", ")", "]", ".", "isspace", "(", ")", ":", "# this is a blank line", "empty_line", "=", "1", "if", "(", "page_break_posns", "[", "cur_break", "]", "+", "num_header_lines", "+", "1", ")", "==", "(", "page_break_posns", "[", "(", "cur_break", "+", "1", ")", "]", ")", ":", "# Have reached next page-break: document has no", "# body - only head/footers!", "keep_checking", "=", "0", "grps_headLineWords", "=", "p_wordSearch", ".", "findall", "(", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "+", "num_header_lines", "+", "1", ")", "]", ")", "cur_break", "=", "cur_break", "+", "next_head", "while", "(", "cur_break", "<", "remaining_breaks", ")", "and", "keep_checking", ":", "lineno", "=", "page_break_posns", "[", "cur_break", "]", "+", "num_header_lines", "+", "1", "if", "lineno", ">=", "len", "(", "docbody", ")", ":", "keep_checking", "=", "0", "break", "grps_thisLineWords", "=", "p_wordSearch", ".", "findall", "(", "docbody", "[", "lineno", "]", ")", "if", "empty_line", ":", "if", "len", "(", "grps_thisLineWords", ")", "!=", "0", ":", "# This line should be empty, but isn't", "keep_checking", "=", "0", "else", ":", "if", "(", "len", "(", "grps_thisLineWords", ")", "==", "0", ")", "or", "(", "len", "(", "grps_headLineWords", ")", "!=", "len", "(", "grps_thisLineWords", ")", ")", ":", "# Not same num 'words' as equivilent line", "# in 1st header:", "keep_checking", "=", "0", "else", ":", "keep_checking", "=", "check_boundary_lines_similar", "(", "grps_headLineWords", ",", "grps_thisLineWords", ")", "# Update cur_break for nxt line to check", "cur_break", "=", "cur_break", "+", "next_head", "if", "keep_checking", ":", "# Line is a header line: check next", "num_header_lines", "=", "num_header_lines", "+", "1", "empty_line", "=", "0", "return", "num_header_lines" ]
Try to guess the number of header lines each page of a document has. The positions of the page breaks in the document are used to try to guess the number of header lines. @param docbody: (list) of strings - each string being a line in the document @param page_break_posns: (list) of integers - each integer is the position of a page break in the document. @return: (int) the number of lines that make up the header of each page.
[ "Try", "to", "guess", "the", "number", "of", "header", "lines", "each", "page", "of", "a", "document", "has", ".", "The", "positions", "of", "the", "page", "breaks", "in", "the", "document", "are", "used", "to", "try", "to", "guess", "the", "number", "of", "header", "lines", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L254-L320
3,761
inspirehep/refextract
refextract/documents/text.py
get_number_footer_lines
def get_number_footer_lines(docbody, page_break_posns): """Try to guess the number of footer lines each page of a document has. The positions of the page breaks in the document are used to try to guess the number of footer lines. @param docbody: (list) of strings - each string being a line in the document @param page_break_posns: (list) of integers - each integer is the position of a page break in the document. @return: (int) the number of lines that make up the footer of each page. """ num_breaks = len(page_break_posns) num_footer_lines = 0 empty_line = 0 keep_checking = 1 p_wordSearch = re.compile(unicode(r'([A-Za-z0-9-]+)'), re.UNICODE) if num_breaks > 2: while keep_checking: cur_break = 1 if page_break_posns[cur_break] - num_footer_lines - 1 < 0 or \ page_break_posns[cur_break] - num_footer_lines - 1 > \ len(docbody) - 1: # Be sure that the docbody list boundary wasn't overstepped: break if docbody[(page_break_posns[cur_break] - num_footer_lines - 1)].isspace(): empty_line = 1 grps_headLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] - num_footer_lines - 1)]) cur_break = cur_break + 1 while (cur_break < num_breaks) and keep_checking: grps_thisLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] - num_footer_lines - 1)]) if empty_line: if len(grps_thisLineWords) != 0: # this line should be empty, but isn't keep_checking = 0 else: if (len(grps_thisLineWords) == 0) or \ (len(grps_headLineWords) != len(grps_thisLineWords)): # Not same num 'words' as equivilent line # in 1st footer: keep_checking = 0 else: keep_checking = \ check_boundary_lines_similar(grps_headLineWords, grps_thisLineWords) # Update cur_break for nxt line to check cur_break = cur_break + 1 if keep_checking: # Line is a footer line: check next num_footer_lines = num_footer_lines + 1 empty_line = 0 return num_footer_lines
python
def get_number_footer_lines(docbody, page_break_posns): """Try to guess the number of footer lines each page of a document has. The positions of the page breaks in the document are used to try to guess the number of footer lines. @param docbody: (list) of strings - each string being a line in the document @param page_break_posns: (list) of integers - each integer is the position of a page break in the document. @return: (int) the number of lines that make up the footer of each page. """ num_breaks = len(page_break_posns) num_footer_lines = 0 empty_line = 0 keep_checking = 1 p_wordSearch = re.compile(unicode(r'([A-Za-z0-9-]+)'), re.UNICODE) if num_breaks > 2: while keep_checking: cur_break = 1 if page_break_posns[cur_break] - num_footer_lines - 1 < 0 or \ page_break_posns[cur_break] - num_footer_lines - 1 > \ len(docbody) - 1: # Be sure that the docbody list boundary wasn't overstepped: break if docbody[(page_break_posns[cur_break] - num_footer_lines - 1)].isspace(): empty_line = 1 grps_headLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] - num_footer_lines - 1)]) cur_break = cur_break + 1 while (cur_break < num_breaks) and keep_checking: grps_thisLineWords = \ p_wordSearch.findall(docbody[(page_break_posns[cur_break] - num_footer_lines - 1)]) if empty_line: if len(grps_thisLineWords) != 0: # this line should be empty, but isn't keep_checking = 0 else: if (len(grps_thisLineWords) == 0) or \ (len(grps_headLineWords) != len(grps_thisLineWords)): # Not same num 'words' as equivilent line # in 1st footer: keep_checking = 0 else: keep_checking = \ check_boundary_lines_similar(grps_headLineWords, grps_thisLineWords) # Update cur_break for nxt line to check cur_break = cur_break + 1 if keep_checking: # Line is a footer line: check next num_footer_lines = num_footer_lines + 1 empty_line = 0 return num_footer_lines
[ "def", "get_number_footer_lines", "(", "docbody", ",", "page_break_posns", ")", ":", "num_breaks", "=", "len", "(", "page_break_posns", ")", "num_footer_lines", "=", "0", "empty_line", "=", "0", "keep_checking", "=", "1", "p_wordSearch", "=", "re", ".", "compile", "(", "unicode", "(", "r'([A-Za-z0-9-]+)'", ")", ",", "re", ".", "UNICODE", ")", "if", "num_breaks", ">", "2", ":", "while", "keep_checking", ":", "cur_break", "=", "1", "if", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", "<", "0", "or", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ">", "len", "(", "docbody", ")", "-", "1", ":", "# Be sure that the docbody list boundary wasn't overstepped:", "break", "if", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ")", "]", ".", "isspace", "(", ")", ":", "empty_line", "=", "1", "grps_headLineWords", "=", "p_wordSearch", ".", "findall", "(", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ")", "]", ")", "cur_break", "=", "cur_break", "+", "1", "while", "(", "cur_break", "<", "num_breaks", ")", "and", "keep_checking", ":", "grps_thisLineWords", "=", "p_wordSearch", ".", "findall", "(", "docbody", "[", "(", "page_break_posns", "[", "cur_break", "]", "-", "num_footer_lines", "-", "1", ")", "]", ")", "if", "empty_line", ":", "if", "len", "(", "grps_thisLineWords", ")", "!=", "0", ":", "# this line should be empty, but isn't", "keep_checking", "=", "0", "else", ":", "if", "(", "len", "(", "grps_thisLineWords", ")", "==", "0", ")", "or", "(", "len", "(", "grps_headLineWords", ")", "!=", "len", "(", "grps_thisLineWords", ")", ")", ":", "# Not same num 'words' as equivilent line", "# in 1st footer:", "keep_checking", "=", "0", "else", ":", "keep_checking", "=", "check_boundary_lines_similar", "(", "grps_headLineWords", ",", "grps_thisLineWords", ")", "# Update cur_break for nxt line to check", "cur_break", "=", "cur_break", "+", "1", "if", "keep_checking", ":", "# Line is a footer line: check next", "num_footer_lines", "=", "num_footer_lines", "+", "1", "empty_line", "=", "0", "return", "num_footer_lines" ]
Try to guess the number of footer lines each page of a document has. The positions of the page breaks in the document are used to try to guess the number of footer lines. @param docbody: (list) of strings - each string being a line in the document @param page_break_posns: (list) of integers - each integer is the position of a page break in the document. @return: (int) the number of lines that make up the footer of each page.
[ "Try", "to", "guess", "the", "number", "of", "footer", "lines", "each", "page", "of", "a", "document", "has", ".", "The", "positions", "of", "the", "page", "breaks", "in", "the", "document", "are", "used", "to", "try", "to", "guess", "the", "number", "of", "footer", "lines", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L323-L377
3,762
inspirehep/refextract
refextract/documents/text.py
strip_headers_footers_pagebreaks
def strip_headers_footers_pagebreaks(docbody, page_break_posns, num_head_lines, num_foot_lines): """Remove page-break lines, header lines, and footer lines from the document. @param docbody: (list) of strings, whereby each string in the list is a line in the document. @param page_break_posns: (list) of integers, whereby each integer represents the index in docbody at which a page-break is found. @param num_head_lines: (int) the number of header lines each page in the document has. @param num_foot_lines: (int) the number of footer lines each page in the document has. @return: (list) of strings - the document body after the headers, footers, and page-break lines have been stripped from the list. """ num_breaks = len(page_break_posns) page_lens = [] for x in xrange(0, num_breaks): if x < num_breaks - 1: page_lens.append(page_break_posns[x + 1] - page_break_posns[x]) page_lens.sort() if (len(page_lens) > 0) and \ (num_head_lines + num_foot_lines + 1 < page_lens[0]): # Safe to chop hdrs & ftrs page_break_posns.reverse() first = 1 for i in xrange(0, len(page_break_posns)): # Unless this is the last page break, chop headers if not first: for dummy in xrange(1, num_head_lines + 1): docbody[page_break_posns[i] + 1:page_break_posns[i] + 2] = [] else: first = 0 # Chop page break itself docbody[page_break_posns[i]:page_break_posns[i] + 1] = [] # Chop footers (unless this is the first page break) if i != len(page_break_posns) - 1: for dummy in xrange(1, num_foot_lines + 1): docbody[page_break_posns[i] - num_foot_lines:page_break_posns[i] - num_foot_lines + 1] = [] return docbody
python
def strip_headers_footers_pagebreaks(docbody, page_break_posns, num_head_lines, num_foot_lines): """Remove page-break lines, header lines, and footer lines from the document. @param docbody: (list) of strings, whereby each string in the list is a line in the document. @param page_break_posns: (list) of integers, whereby each integer represents the index in docbody at which a page-break is found. @param num_head_lines: (int) the number of header lines each page in the document has. @param num_foot_lines: (int) the number of footer lines each page in the document has. @return: (list) of strings - the document body after the headers, footers, and page-break lines have been stripped from the list. """ num_breaks = len(page_break_posns) page_lens = [] for x in xrange(0, num_breaks): if x < num_breaks - 1: page_lens.append(page_break_posns[x + 1] - page_break_posns[x]) page_lens.sort() if (len(page_lens) > 0) and \ (num_head_lines + num_foot_lines + 1 < page_lens[0]): # Safe to chop hdrs & ftrs page_break_posns.reverse() first = 1 for i in xrange(0, len(page_break_posns)): # Unless this is the last page break, chop headers if not first: for dummy in xrange(1, num_head_lines + 1): docbody[page_break_posns[i] + 1:page_break_posns[i] + 2] = [] else: first = 0 # Chop page break itself docbody[page_break_posns[i]:page_break_posns[i] + 1] = [] # Chop footers (unless this is the first page break) if i != len(page_break_posns) - 1: for dummy in xrange(1, num_foot_lines + 1): docbody[page_break_posns[i] - num_foot_lines:page_break_posns[i] - num_foot_lines + 1] = [] return docbody
[ "def", "strip_headers_footers_pagebreaks", "(", "docbody", ",", "page_break_posns", ",", "num_head_lines", ",", "num_foot_lines", ")", ":", "num_breaks", "=", "len", "(", "page_break_posns", ")", "page_lens", "=", "[", "]", "for", "x", "in", "xrange", "(", "0", ",", "num_breaks", ")", ":", "if", "x", "<", "num_breaks", "-", "1", ":", "page_lens", ".", "append", "(", "page_break_posns", "[", "x", "+", "1", "]", "-", "page_break_posns", "[", "x", "]", ")", "page_lens", ".", "sort", "(", ")", "if", "(", "len", "(", "page_lens", ")", ">", "0", ")", "and", "(", "num_head_lines", "+", "num_foot_lines", "+", "1", "<", "page_lens", "[", "0", "]", ")", ":", "# Safe to chop hdrs & ftrs", "page_break_posns", ".", "reverse", "(", ")", "first", "=", "1", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "page_break_posns", ")", ")", ":", "# Unless this is the last page break, chop headers", "if", "not", "first", ":", "for", "dummy", "in", "xrange", "(", "1", ",", "num_head_lines", "+", "1", ")", ":", "docbody", "[", "page_break_posns", "[", "i", "]", "+", "1", ":", "page_break_posns", "[", "i", "]", "+", "2", "]", "=", "[", "]", "else", ":", "first", "=", "0", "# Chop page break itself", "docbody", "[", "page_break_posns", "[", "i", "]", ":", "page_break_posns", "[", "i", "]", "+", "1", "]", "=", "[", "]", "# Chop footers (unless this is the first page break)", "if", "i", "!=", "len", "(", "page_break_posns", ")", "-", "1", ":", "for", "dummy", "in", "xrange", "(", "1", ",", "num_foot_lines", "+", "1", ")", ":", "docbody", "[", "page_break_posns", "[", "i", "]", "-", "num_foot_lines", ":", "page_break_posns", "[", "i", "]", "-", "num_foot_lines", "+", "1", "]", "=", "[", "]", "return", "docbody" ]
Remove page-break lines, header lines, and footer lines from the document. @param docbody: (list) of strings, whereby each string in the list is a line in the document. @param page_break_posns: (list) of integers, whereby each integer represents the index in docbody at which a page-break is found. @param num_head_lines: (int) the number of header lines each page in the document has. @param num_foot_lines: (int) the number of footer lines each page in the document has. @return: (list) of strings - the document body after the headers, footers, and page-break lines have been stripped from the list.
[ "Remove", "page", "-", "break", "lines", "header", "lines", "and", "footer", "lines", "from", "the", "document", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L380-L424
3,763
inspirehep/refextract
refextract/documents/text.py
check_boundary_lines_similar
def check_boundary_lines_similar(l_1, l_2): """Compare two lists to see if their elements are roughly the same. @param l_1: (list) of strings. @param l_2: (list) of strings. @return: (int) 1/0. """ num_matches = 0 if (type(l_1) != list) or (type(l_2) != list) or (len(l_1) != len(l_2)): # these 'boundaries' are not similar return 0 num_elements = len(l_1) for i in xrange(0, num_elements): if l_1[i].isdigit() and l_2[i].isdigit(): # both lines are integers num_matches += 1 else: l1_str = l_1[i].lower() l2_str = l_2[i].lower() if (l1_str[0] == l2_str[0]) and \ (l1_str[len(l1_str) - 1] == l2_str[len(l2_str) - 1]): num_matches = num_matches + 1 if (len(l_1) == 0) or (float(num_matches) / float(len(l_1)) < 0.9): return 0 else: return 1
python
def check_boundary_lines_similar(l_1, l_2): """Compare two lists to see if their elements are roughly the same. @param l_1: (list) of strings. @param l_2: (list) of strings. @return: (int) 1/0. """ num_matches = 0 if (type(l_1) != list) or (type(l_2) != list) or (len(l_1) != len(l_2)): # these 'boundaries' are not similar return 0 num_elements = len(l_1) for i in xrange(0, num_elements): if l_1[i].isdigit() and l_2[i].isdigit(): # both lines are integers num_matches += 1 else: l1_str = l_1[i].lower() l2_str = l_2[i].lower() if (l1_str[0] == l2_str[0]) and \ (l1_str[len(l1_str) - 1] == l2_str[len(l2_str) - 1]): num_matches = num_matches + 1 if (len(l_1) == 0) or (float(num_matches) / float(len(l_1)) < 0.9): return 0 else: return 1
[ "def", "check_boundary_lines_similar", "(", "l_1", ",", "l_2", ")", ":", "num_matches", "=", "0", "if", "(", "type", "(", "l_1", ")", "!=", "list", ")", "or", "(", "type", "(", "l_2", ")", "!=", "list", ")", "or", "(", "len", "(", "l_1", ")", "!=", "len", "(", "l_2", ")", ")", ":", "# these 'boundaries' are not similar", "return", "0", "num_elements", "=", "len", "(", "l_1", ")", "for", "i", "in", "xrange", "(", "0", ",", "num_elements", ")", ":", "if", "l_1", "[", "i", "]", ".", "isdigit", "(", ")", "and", "l_2", "[", "i", "]", ".", "isdigit", "(", ")", ":", "# both lines are integers", "num_matches", "+=", "1", "else", ":", "l1_str", "=", "l_1", "[", "i", "]", ".", "lower", "(", ")", "l2_str", "=", "l_2", "[", "i", "]", ".", "lower", "(", ")", "if", "(", "l1_str", "[", "0", "]", "==", "l2_str", "[", "0", "]", ")", "and", "(", "l1_str", "[", "len", "(", "l1_str", ")", "-", "1", "]", "==", "l2_str", "[", "len", "(", "l2_str", ")", "-", "1", "]", ")", ":", "num_matches", "=", "num_matches", "+", "1", "if", "(", "len", "(", "l_1", ")", "==", "0", ")", "or", "(", "float", "(", "num_matches", ")", "/", "float", "(", "len", "(", "l_1", ")", ")", "<", "0.9", ")", ":", "return", "0", "else", ":", "return", "1" ]
Compare two lists to see if their elements are roughly the same. @param l_1: (list) of strings. @param l_2: (list) of strings. @return: (int) 1/0.
[ "Compare", "two", "lists", "to", "see", "if", "their", "elements", "are", "roughly", "the", "same", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L427-L452
3,764
inspirehep/refextract
refextract/references/kbs.py
make_cache_key
def make_cache_key(custom_kbs_files=None): """Create cache key for kbs caches instances This function generates a unique key for a given set of arguments. The files dictionary is transformed like this: {'journal': '/var/journal.kb', 'books': '/var/books.kb'} to "journal=/var/journal.kb;books=/var/books.kb" Then _inspire is appended if we are an INSPIRE site. """ if custom_kbs_files: serialized_args = ('%s=%s' % v for v in iteritems(custom_kbs_files)) serialized_args = ';'.join(serialized_args) else: serialized_args = "default" cache_key = md5(serialized_args).digest() return cache_key
python
def make_cache_key(custom_kbs_files=None): """Create cache key for kbs caches instances This function generates a unique key for a given set of arguments. The files dictionary is transformed like this: {'journal': '/var/journal.kb', 'books': '/var/books.kb'} to "journal=/var/journal.kb;books=/var/books.kb" Then _inspire is appended if we are an INSPIRE site. """ if custom_kbs_files: serialized_args = ('%s=%s' % v for v in iteritems(custom_kbs_files)) serialized_args = ';'.join(serialized_args) else: serialized_args = "default" cache_key = md5(serialized_args).digest() return cache_key
[ "def", "make_cache_key", "(", "custom_kbs_files", "=", "None", ")", ":", "if", "custom_kbs_files", ":", "serialized_args", "=", "(", "'%s=%s'", "%", "v", "for", "v", "in", "iteritems", "(", "custom_kbs_files", ")", ")", "serialized_args", "=", "';'", ".", "join", "(", "serialized_args", ")", "else", ":", "serialized_args", "=", "\"default\"", "cache_key", "=", "md5", "(", "serialized_args", ")", ".", "digest", "(", ")", "return", "cache_key" ]
Create cache key for kbs caches instances This function generates a unique key for a given set of arguments. The files dictionary is transformed like this: {'journal': '/var/journal.kb', 'books': '/var/books.kb'} to "journal=/var/journal.kb;books=/var/books.kb" Then _inspire is appended if we are an INSPIRE site.
[ "Create", "cache", "key", "for", "kbs", "caches", "instances" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/kbs.py#L112-L130
3,765
inspirehep/refextract
refextract/references/kbs.py
create_institute_numeration_group_regexp_pattern
def create_institute_numeration_group_regexp_pattern(patterns): """Using a list of regexp patterns for recognising numeration patterns for institute preprint references, ordered by length - longest to shortest - create a grouped 'OR' or of these patterns, ready to be used in a bigger regexp. @param patterns: (list) of strings. All of the numeration regexp patterns for recognising an institute's preprint reference styles. @return: (string) a grouped 'OR' regexp pattern of the numeration patterns. E.g.: (?P<num>[12]\d{3} \d\d\d|\d\d \d\d\d|[A-Za-z] \d\d\d) """ patterns_list = [institute_num_pattern_to_regex(p[1]) for p in patterns] grouped_numeration_pattern = u"(?P<numn>%s)" % u'|'.join(patterns_list) return grouped_numeration_pattern
python
def create_institute_numeration_group_regexp_pattern(patterns): """Using a list of regexp patterns for recognising numeration patterns for institute preprint references, ordered by length - longest to shortest - create a grouped 'OR' or of these patterns, ready to be used in a bigger regexp. @param patterns: (list) of strings. All of the numeration regexp patterns for recognising an institute's preprint reference styles. @return: (string) a grouped 'OR' regexp pattern of the numeration patterns. E.g.: (?P<num>[12]\d{3} \d\d\d|\d\d \d\d\d|[A-Za-z] \d\d\d) """ patterns_list = [institute_num_pattern_to_regex(p[1]) for p in patterns] grouped_numeration_pattern = u"(?P<numn>%s)" % u'|'.join(patterns_list) return grouped_numeration_pattern
[ "def", "create_institute_numeration_group_regexp_pattern", "(", "patterns", ")", ":", "patterns_list", "=", "[", "institute_num_pattern_to_regex", "(", "p", "[", "1", "]", ")", "for", "p", "in", "patterns", "]", "grouped_numeration_pattern", "=", "u\"(?P<numn>%s)\"", "%", "u'|'", ".", "join", "(", "patterns_list", ")", "return", "grouped_numeration_pattern" ]
Using a list of regexp patterns for recognising numeration patterns for institute preprint references, ordered by length - longest to shortest - create a grouped 'OR' or of these patterns, ready to be used in a bigger regexp. @param patterns: (list) of strings. All of the numeration regexp patterns for recognising an institute's preprint reference styles. @return: (string) a grouped 'OR' regexp pattern of the numeration patterns. E.g.: (?P<num>[12]\d{3} \d\d\d|\d\d \d\d\d|[A-Za-z] \d\d\d)
[ "Using", "a", "list", "of", "regexp", "patterns", "for", "recognising", "numeration", "patterns", "for", "institute", "preprint", "references", "ordered", "by", "length", "-", "longest", "to", "shortest", "-", "create", "a", "grouped", "OR", "or", "of", "these", "patterns", "ready", "to", "be", "used", "in", "a", "bigger", "regexp", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/kbs.py#L161-L174
3,766
inspirehep/refextract
refextract/references/kbs.py
build_reportnum_kb
def build_reportnum_kb(fpath): """Given the path to a knowledge base file containing the details of institutes and the patterns that their preprint report numbering schemes take, create a dictionary of regexp search patterns to recognise these preprint references in reference lines, and a dictionary of replacements for non-standard preprint categories in these references. The knowledge base file should consist only of lines that take one of the following 3 formats: #####Institute Name#### (the name of the institute to which the preprint reference patterns belong, e.g. '#####LANL#####', surrounded by 5 # on either side.) <pattern> (numeration patterns for an institute's preprints, surrounded by < and >.) seek-term --- replace-term (i.e. a seek phrase on the left hand side, a replace phrase on the right hand side, with the two phrases being separated by 3 hyphens.) E.g.: ASTRO PH ---astro-ph The left-hand side term is a non-standard version of the preprint reference category; the right-hand side term is the standard version. If the KB file cannot be read from, or an unexpected line is encountered in the KB, an error message is output to standard error and execution is halted with an error-code 0. @param fpath: (string) the path to the knowledge base file. @return: (tuple) containing 2 dictionaries. The first contains regexp search patterns used to identify preprint references in a line. This dictionary is keyed by a tuple containing the line number of the pattern in the KB and the non-standard category string. E.g.: (3, 'ASTRO PH'). The second dictionary contains the standardised category string, and is keyed by the non-standard category string. E.g.: 'astro-ph'. """ def _add_institute_preprint_patterns(preprint_classifications, preprint_numeration_ptns, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num): """For a list of preprint category strings and preprint numeration patterns for a given institute, create the regexp patterns for each of the preprint types. Add the regexp patterns to the dictionary of search patterns (preprint_reference_search_regexp_patterns), keyed by the line number of the institute in the KB, and the preprint category search string. Also add the standardised preprint category string to another dictionary, keyed by the line number of its position in the KB and its non-standardised version. @param preprint_classifications: (list) of tuples whereby each tuple contains a preprint category search string and the line number of the name of institute to which it belongs in the KB. E.g.: (45, 'ASTRO PH'). @param preprint_numeration_ptns: (list) of preprint reference numeration search patterns (strings) @param preprint_reference_search_regexp_patterns: (dictionary) of regexp patterns used to search in document lines. @param standardised_preprint_reference_categories: (dictionary) containing the standardised strings for preprint reference categories. (E.g. 'astro-ph'.) @param kb_line_num: (integer) - the line number int the KB at which a given institute name was found. @return: None """ if preprint_classifications and preprint_numeration_ptns: # the previous institute had both numeration styles and categories # for preprint references. # build regexps and add them for this institute: # First, order the numeration styles by line-length, and build a # grouped regexp for recognising numeration: ordered_patterns = \ order_reportnum_patterns_bylen(preprint_numeration_ptns) # create a grouped regexp for numeration part of # preprint reference: numeration_regexp = \ create_institute_numeration_group_regexp_pattern( ordered_patterns) # for each "classification" part of preprint references, create a # complete regex: # will be in the style "(categ)-(numatn1|numatn2|numatn3|...)" for classification in preprint_classifications: search_pattern_str = ur'(?:^|[^a-zA-Z0-9\/\.\-])([\[\(]?(?P<categ>' \ + classification[0].strip() + u')' \ + numeration_regexp + ur'[\]\)]?)' re_search_pattern = re.compile(search_pattern_str, re.UNICODE) preprint_reference_search_regexp_patterns[(kb_line_num, classification[0])] =\ re_search_pattern standardised_preprint_reference_categories[(kb_line_num, classification[0])] =\ classification[1] preprint_reference_search_regexp_patterns = {} # a dictionary of patterns # used to recognise # categories of preprints # as used by various # institutes standardised_preprint_reference_categories = {} # dictionary of # standardised category # strings for preprint cats current_institute_preprint_classifications = [] # list of tuples containing # preprint categories in # their raw & standardised # forms, as read from KB current_institute_numerations = [] # list of preprint # numeration patterns, as # read from the KB # pattern to recognise an institute name line in the KB re_institute_name = re.compile(ur'^\*{5}\s*(.+)\s*\*{5}$', re.UNICODE) # pattern to recognise an institute preprint categ line in the KB re_preprint_classification = \ re.compile(ur'^\s*(\w.*)\s*---\s*(\w.*)\s*$', re.UNICODE) # pattern to recognise a preprint numeration-style line in KB re_numeration_pattern = re.compile(ur'^\<(.+)\>$', re.UNICODE) kb_line_num = 0 # when making the dictionary of patterns, which is # keyed by the category search string, this counter # will ensure that patterns in the dictionary are not # overwritten if 2 institutes have the same category # styles. with file_resolving(fpath) as fh: for rawline in fh: if rawline.startswith('#'): continue kb_line_num += 1 m_institute_name = re_institute_name.search(rawline) if m_institute_name: # This KB line is the name of an institute # append the last institute's pattern list to the list of # institutes: _add_institute_preprint_patterns(current_institute_preprint_classifications, current_institute_numerations, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num) # Now start a new dictionary to contain the search patterns # for this institute: current_institute_preprint_classifications = [] current_institute_numerations = [] # move on to the next line continue m_preprint_classification = \ re_preprint_classification.search(rawline) if m_preprint_classification: # This KB line contains a preprint classification for # the current institute try: current_institute_preprint_classifications.append((m_preprint_classification.group(1), m_preprint_classification.group(2))) except (AttributeError, NameError): # didn't match this line correctly - skip it pass # move on to the next line continue m_numeration_pattern = re_numeration_pattern.search(rawline) if m_numeration_pattern: # This KB line contains a preprint item numeration pattern # for the current institute try: current_institute_numerations.append( m_numeration_pattern.group(1)) except (AttributeError, NameError): # didn't match the numeration pattern correctly - skip it pass continue _add_institute_preprint_patterns(current_institute_preprint_classifications, current_institute_numerations, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num) # return the preprint reference patterns and the replacement strings # for non-standard categ-strings: return (preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories)
python
def build_reportnum_kb(fpath): """Given the path to a knowledge base file containing the details of institutes and the patterns that their preprint report numbering schemes take, create a dictionary of regexp search patterns to recognise these preprint references in reference lines, and a dictionary of replacements for non-standard preprint categories in these references. The knowledge base file should consist only of lines that take one of the following 3 formats: #####Institute Name#### (the name of the institute to which the preprint reference patterns belong, e.g. '#####LANL#####', surrounded by 5 # on either side.) <pattern> (numeration patterns for an institute's preprints, surrounded by < and >.) seek-term --- replace-term (i.e. a seek phrase on the left hand side, a replace phrase on the right hand side, with the two phrases being separated by 3 hyphens.) E.g.: ASTRO PH ---astro-ph The left-hand side term is a non-standard version of the preprint reference category; the right-hand side term is the standard version. If the KB file cannot be read from, or an unexpected line is encountered in the KB, an error message is output to standard error and execution is halted with an error-code 0. @param fpath: (string) the path to the knowledge base file. @return: (tuple) containing 2 dictionaries. The first contains regexp search patterns used to identify preprint references in a line. This dictionary is keyed by a tuple containing the line number of the pattern in the KB and the non-standard category string. E.g.: (3, 'ASTRO PH'). The second dictionary contains the standardised category string, and is keyed by the non-standard category string. E.g.: 'astro-ph'. """ def _add_institute_preprint_patterns(preprint_classifications, preprint_numeration_ptns, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num): """For a list of preprint category strings and preprint numeration patterns for a given institute, create the regexp patterns for each of the preprint types. Add the regexp patterns to the dictionary of search patterns (preprint_reference_search_regexp_patterns), keyed by the line number of the institute in the KB, and the preprint category search string. Also add the standardised preprint category string to another dictionary, keyed by the line number of its position in the KB and its non-standardised version. @param preprint_classifications: (list) of tuples whereby each tuple contains a preprint category search string and the line number of the name of institute to which it belongs in the KB. E.g.: (45, 'ASTRO PH'). @param preprint_numeration_ptns: (list) of preprint reference numeration search patterns (strings) @param preprint_reference_search_regexp_patterns: (dictionary) of regexp patterns used to search in document lines. @param standardised_preprint_reference_categories: (dictionary) containing the standardised strings for preprint reference categories. (E.g. 'astro-ph'.) @param kb_line_num: (integer) - the line number int the KB at which a given institute name was found. @return: None """ if preprint_classifications and preprint_numeration_ptns: # the previous institute had both numeration styles and categories # for preprint references. # build regexps and add them for this institute: # First, order the numeration styles by line-length, and build a # grouped regexp for recognising numeration: ordered_patterns = \ order_reportnum_patterns_bylen(preprint_numeration_ptns) # create a grouped regexp for numeration part of # preprint reference: numeration_regexp = \ create_institute_numeration_group_regexp_pattern( ordered_patterns) # for each "classification" part of preprint references, create a # complete regex: # will be in the style "(categ)-(numatn1|numatn2|numatn3|...)" for classification in preprint_classifications: search_pattern_str = ur'(?:^|[^a-zA-Z0-9\/\.\-])([\[\(]?(?P<categ>' \ + classification[0].strip() + u')' \ + numeration_regexp + ur'[\]\)]?)' re_search_pattern = re.compile(search_pattern_str, re.UNICODE) preprint_reference_search_regexp_patterns[(kb_line_num, classification[0])] =\ re_search_pattern standardised_preprint_reference_categories[(kb_line_num, classification[0])] =\ classification[1] preprint_reference_search_regexp_patterns = {} # a dictionary of patterns # used to recognise # categories of preprints # as used by various # institutes standardised_preprint_reference_categories = {} # dictionary of # standardised category # strings for preprint cats current_institute_preprint_classifications = [] # list of tuples containing # preprint categories in # their raw & standardised # forms, as read from KB current_institute_numerations = [] # list of preprint # numeration patterns, as # read from the KB # pattern to recognise an institute name line in the KB re_institute_name = re.compile(ur'^\*{5}\s*(.+)\s*\*{5}$', re.UNICODE) # pattern to recognise an institute preprint categ line in the KB re_preprint_classification = \ re.compile(ur'^\s*(\w.*)\s*---\s*(\w.*)\s*$', re.UNICODE) # pattern to recognise a preprint numeration-style line in KB re_numeration_pattern = re.compile(ur'^\<(.+)\>$', re.UNICODE) kb_line_num = 0 # when making the dictionary of patterns, which is # keyed by the category search string, this counter # will ensure that patterns in the dictionary are not # overwritten if 2 institutes have the same category # styles. with file_resolving(fpath) as fh: for rawline in fh: if rawline.startswith('#'): continue kb_line_num += 1 m_institute_name = re_institute_name.search(rawline) if m_institute_name: # This KB line is the name of an institute # append the last institute's pattern list to the list of # institutes: _add_institute_preprint_patterns(current_institute_preprint_classifications, current_institute_numerations, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num) # Now start a new dictionary to contain the search patterns # for this institute: current_institute_preprint_classifications = [] current_institute_numerations = [] # move on to the next line continue m_preprint_classification = \ re_preprint_classification.search(rawline) if m_preprint_classification: # This KB line contains a preprint classification for # the current institute try: current_institute_preprint_classifications.append((m_preprint_classification.group(1), m_preprint_classification.group(2))) except (AttributeError, NameError): # didn't match this line correctly - skip it pass # move on to the next line continue m_numeration_pattern = re_numeration_pattern.search(rawline) if m_numeration_pattern: # This KB line contains a preprint item numeration pattern # for the current institute try: current_institute_numerations.append( m_numeration_pattern.group(1)) except (AttributeError, NameError): # didn't match the numeration pattern correctly - skip it pass continue _add_institute_preprint_patterns(current_institute_preprint_classifications, current_institute_numerations, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num) # return the preprint reference patterns and the replacement strings # for non-standard categ-strings: return (preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories)
[ "def", "build_reportnum_kb", "(", "fpath", ")", ":", "def", "_add_institute_preprint_patterns", "(", "preprint_classifications", ",", "preprint_numeration_ptns", ",", "preprint_reference_search_regexp_patterns", ",", "standardised_preprint_reference_categories", ",", "kb_line_num", ")", ":", "\"\"\"For a list of preprint category strings and preprint numeration\n patterns for a given institute, create the regexp patterns for\n each of the preprint types. Add the regexp patterns to the\n dictionary of search patterns\n (preprint_reference_search_regexp_patterns), keyed by the line\n number of the institute in the KB, and the preprint category\n search string. Also add the standardised preprint category string\n to another dictionary, keyed by the line number of its position\n in the KB and its non-standardised version.\n @param preprint_classifications: (list) of tuples whereby each tuple\n contains a preprint category search string and the line number of\n the name of institute to which it belongs in the KB.\n E.g.: (45, 'ASTRO PH').\n @param preprint_numeration_ptns: (list) of preprint reference\n numeration search patterns (strings)\n @param preprint_reference_search_regexp_patterns: (dictionary) of\n regexp patterns used to search in document lines.\n @param standardised_preprint_reference_categories: (dictionary)\n containing the standardised strings for preprint reference\n categories. (E.g. 'astro-ph'.)\n @param kb_line_num: (integer) - the line number int the KB at\n which a given institute name was found.\n @return: None\n \"\"\"", "if", "preprint_classifications", "and", "preprint_numeration_ptns", ":", "# the previous institute had both numeration styles and categories", "# for preprint references.", "# build regexps and add them for this institute:", "# First, order the numeration styles by line-length, and build a", "# grouped regexp for recognising numeration:", "ordered_patterns", "=", "order_reportnum_patterns_bylen", "(", "preprint_numeration_ptns", ")", "# create a grouped regexp for numeration part of", "# preprint reference:", "numeration_regexp", "=", "create_institute_numeration_group_regexp_pattern", "(", "ordered_patterns", ")", "# for each \"classification\" part of preprint references, create a", "# complete regex:", "# will be in the style \"(categ)-(numatn1|numatn2|numatn3|...)\"", "for", "classification", "in", "preprint_classifications", ":", "search_pattern_str", "=", "ur'(?:^|[^a-zA-Z0-9\\/\\.\\-])([\\[\\(]?(?P<categ>'", "+", "classification", "[", "0", "]", ".", "strip", "(", ")", "+", "u')'", "+", "numeration_regexp", "+", "ur'[\\]\\)]?)'", "re_search_pattern", "=", "re", ".", "compile", "(", "search_pattern_str", ",", "re", ".", "UNICODE", ")", "preprint_reference_search_regexp_patterns", "[", "(", "kb_line_num", ",", "classification", "[", "0", "]", ")", "]", "=", "re_search_pattern", "standardised_preprint_reference_categories", "[", "(", "kb_line_num", ",", "classification", "[", "0", "]", ")", "]", "=", "classification", "[", "1", "]", "preprint_reference_search_regexp_patterns", "=", "{", "}", "# a dictionary of patterns", "# used to recognise", "# categories of preprints", "# as used by various", "# institutes", "standardised_preprint_reference_categories", "=", "{", "}", "# dictionary of", "# standardised category", "# strings for preprint cats", "current_institute_preprint_classifications", "=", "[", "]", "# list of tuples containing", "# preprint categories in", "# their raw & standardised", "# forms, as read from KB", "current_institute_numerations", "=", "[", "]", "# list of preprint", "# numeration patterns, as", "# read from the KB", "# pattern to recognise an institute name line in the KB", "re_institute_name", "=", "re", ".", "compile", "(", "ur'^\\*{5}\\s*(.+)\\s*\\*{5}$'", ",", "re", ".", "UNICODE", ")", "# pattern to recognise an institute preprint categ line in the KB", "re_preprint_classification", "=", "re", ".", "compile", "(", "ur'^\\s*(\\w.*)\\s*---\\s*(\\w.*)\\s*$'", ",", "re", ".", "UNICODE", ")", "# pattern to recognise a preprint numeration-style line in KB", "re_numeration_pattern", "=", "re", ".", "compile", "(", "ur'^\\<(.+)\\>$'", ",", "re", ".", "UNICODE", ")", "kb_line_num", "=", "0", "# when making the dictionary of patterns, which is", "# keyed by the category search string, this counter", "# will ensure that patterns in the dictionary are not", "# overwritten if 2 institutes have the same category", "# styles.", "with", "file_resolving", "(", "fpath", ")", "as", "fh", ":", "for", "rawline", "in", "fh", ":", "if", "rawline", ".", "startswith", "(", "'#'", ")", ":", "continue", "kb_line_num", "+=", "1", "m_institute_name", "=", "re_institute_name", ".", "search", "(", "rawline", ")", "if", "m_institute_name", ":", "# This KB line is the name of an institute", "# append the last institute's pattern list to the list of", "# institutes:", "_add_institute_preprint_patterns", "(", "current_institute_preprint_classifications", ",", "current_institute_numerations", ",", "preprint_reference_search_regexp_patterns", ",", "standardised_preprint_reference_categories", ",", "kb_line_num", ")", "# Now start a new dictionary to contain the search patterns", "# for this institute:", "current_institute_preprint_classifications", "=", "[", "]", "current_institute_numerations", "=", "[", "]", "# move on to the next line", "continue", "m_preprint_classification", "=", "re_preprint_classification", ".", "search", "(", "rawline", ")", "if", "m_preprint_classification", ":", "# This KB line contains a preprint classification for", "# the current institute", "try", ":", "current_institute_preprint_classifications", ".", "append", "(", "(", "m_preprint_classification", ".", "group", "(", "1", ")", ",", "m_preprint_classification", ".", "group", "(", "2", ")", ")", ")", "except", "(", "AttributeError", ",", "NameError", ")", ":", "# didn't match this line correctly - skip it", "pass", "# move on to the next line", "continue", "m_numeration_pattern", "=", "re_numeration_pattern", ".", "search", "(", "rawline", ")", "if", "m_numeration_pattern", ":", "# This KB line contains a preprint item numeration pattern", "# for the current institute", "try", ":", "current_institute_numerations", ".", "append", "(", "m_numeration_pattern", ".", "group", "(", "1", ")", ")", "except", "(", "AttributeError", ",", "NameError", ")", ":", "# didn't match the numeration pattern correctly - skip it", "pass", "continue", "_add_institute_preprint_patterns", "(", "current_institute_preprint_classifications", ",", "current_institute_numerations", ",", "preprint_reference_search_regexp_patterns", ",", "standardised_preprint_reference_categories", ",", "kb_line_num", ")", "# return the preprint reference patterns and the replacement strings", "# for non-standard categ-strings:", "return", "(", "preprint_reference_search_regexp_patterns", ",", "standardised_preprint_reference_categories", ")" ]
Given the path to a knowledge base file containing the details of institutes and the patterns that their preprint report numbering schemes take, create a dictionary of regexp search patterns to recognise these preprint references in reference lines, and a dictionary of replacements for non-standard preprint categories in these references. The knowledge base file should consist only of lines that take one of the following 3 formats: #####Institute Name#### (the name of the institute to which the preprint reference patterns belong, e.g. '#####LANL#####', surrounded by 5 # on either side.) <pattern> (numeration patterns for an institute's preprints, surrounded by < and >.) seek-term --- replace-term (i.e. a seek phrase on the left hand side, a replace phrase on the right hand side, with the two phrases being separated by 3 hyphens.) E.g.: ASTRO PH ---astro-ph The left-hand side term is a non-standard version of the preprint reference category; the right-hand side term is the standard version. If the KB file cannot be read from, or an unexpected line is encountered in the KB, an error message is output to standard error and execution is halted with an error-code 0. @param fpath: (string) the path to the knowledge base file. @return: (tuple) containing 2 dictionaries. The first contains regexp search patterns used to identify preprint references in a line. This dictionary is keyed by a tuple containing the line number of the pattern in the KB and the non-standard category string. E.g.: (3, 'ASTRO PH'). The second dictionary contains the standardised category string, and is keyed by the non-standard category string. E.g.: 'astro-ph'.
[ "Given", "the", "path", "to", "a", "knowledge", "base", "file", "containing", "the", "details", "of", "institutes", "and", "the", "patterns", "that", "their", "preprint", "report", "numbering", "schemes", "take", "create", "a", "dictionary", "of", "regexp", "search", "patterns", "to", "recognise", "these", "preprint", "references", "in", "reference", "lines", "and", "a", "dictionary", "of", "replacements", "for", "non", "-", "standard", "preprint", "categories", "in", "these", "references", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/kbs.py#L222-L415
3,767
inspirehep/refextract
refextract/references/kbs.py
_cmp_bystrlen_reverse
def _cmp_bystrlen_reverse(a, b): """A private "cmp" function to be used by the "sort" function of a list when ordering the titles found in a knowledge base by string- length - LONGEST -> SHORTEST. @param a: (string) @param b: (string) @return: (integer) - 0 if len(a) == len(b); 1 if len(a) < len(b); -1 if len(a) > len(b); """ if len(a) > len(b): return -1 elif len(a) < len(b): return 1 else: return 0
python
def _cmp_bystrlen_reverse(a, b): """A private "cmp" function to be used by the "sort" function of a list when ordering the titles found in a knowledge base by string- length - LONGEST -> SHORTEST. @param a: (string) @param b: (string) @return: (integer) - 0 if len(a) == len(b); 1 if len(a) < len(b); -1 if len(a) > len(b); """ if len(a) > len(b): return -1 elif len(a) < len(b): return 1 else: return 0
[ "def", "_cmp_bystrlen_reverse", "(", "a", ",", "b", ")", ":", "if", "len", "(", "a", ")", ">", "len", "(", "b", ")", ":", "return", "-", "1", "elif", "len", "(", "a", ")", "<", "len", "(", "b", ")", ":", "return", "1", "else", ":", "return", "0" ]
A private "cmp" function to be used by the "sort" function of a list when ordering the titles found in a knowledge base by string- length - LONGEST -> SHORTEST. @param a: (string) @param b: (string) @return: (integer) - 0 if len(a) == len(b); 1 if len(a) < len(b); -1 if len(a) > len(b);
[ "A", "private", "cmp", "function", "to", "be", "used", "by", "the", "sort", "function", "of", "a", "list", "when", "ordering", "the", "titles", "found", "in", "a", "knowledge", "base", "by", "string", "-", "length", "-", "LONGEST", "-", ">", "SHORTEST", "." ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/kbs.py#L418-L432
3,768
inspirehep/refextract
refextract/references/kbs.py
build_special_journals_kb
def build_special_journals_kb(fpath): """Load special journals database from file Special journals are journals that have a volume which is not unique among different years. To keep the volume unique we are adding the year before the volume. """ journals = set() with file_resolving(fpath) as fh: for line in fh: # Skip commented lines if line.startswith('#'): continue # Skip empty line if not line.strip(): continue journals.add(line.strip()) return journals
python
def build_special_journals_kb(fpath): """Load special journals database from file Special journals are journals that have a volume which is not unique among different years. To keep the volume unique we are adding the year before the volume. """ journals = set() with file_resolving(fpath) as fh: for line in fh: # Skip commented lines if line.startswith('#'): continue # Skip empty line if not line.strip(): continue journals.add(line.strip()) return journals
[ "def", "build_special_journals_kb", "(", "fpath", ")", ":", "journals", "=", "set", "(", ")", "with", "file_resolving", "(", "fpath", ")", "as", "fh", ":", "for", "line", "in", "fh", ":", "# Skip commented lines", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "# Skip empty line", "if", "not", "line", ".", "strip", "(", ")", ":", "continue", "journals", ".", "add", "(", "line", ".", "strip", "(", ")", ")", "return", "journals" ]
Load special journals database from file Special journals are journals that have a volume which is not unique among different years. To keep the volume unique we are adding the year before the volume.
[ "Load", "special", "journals", "database", "from", "file" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/kbs.py#L435-L453
3,769
inspirehep/refextract
refextract/references/kbs.py
build_journals_re_kb
def build_journals_re_kb(fpath): """Load journals regexps knowledge base @see build_journals_kb """ def make_tuple(match): regexp = match.group('seek') repl = match.group('repl') return regexp, repl kb = [] with file_resolving(fpath) as fh: for rawline in fh: if rawline.startswith('#'): continue # Extract the seek->replace terms from this KB line: m_kb_line = re_kb_line.search(rawline) kb.append(make_tuple(m_kb_line)) return kb
python
def build_journals_re_kb(fpath): """Load journals regexps knowledge base @see build_journals_kb """ def make_tuple(match): regexp = match.group('seek') repl = match.group('repl') return regexp, repl kb = [] with file_resolving(fpath) as fh: for rawline in fh: if rawline.startswith('#'): continue # Extract the seek->replace terms from this KB line: m_kb_line = re_kb_line.search(rawline) kb.append(make_tuple(m_kb_line)) return kb
[ "def", "build_journals_re_kb", "(", "fpath", ")", ":", "def", "make_tuple", "(", "match", ")", ":", "regexp", "=", "match", ".", "group", "(", "'seek'", ")", "repl", "=", "match", ".", "group", "(", "'repl'", ")", "return", "regexp", ",", "repl", "kb", "=", "[", "]", "with", "file_resolving", "(", "fpath", ")", "as", "fh", ":", "for", "rawline", "in", "fh", ":", "if", "rawline", ".", "startswith", "(", "'#'", ")", ":", "continue", "# Extract the seek->replace terms from this KB line:", "m_kb_line", "=", "re_kb_line", ".", "search", "(", "rawline", ")", "kb", ".", "append", "(", "make_tuple", "(", "m_kb_line", ")", ")", "return", "kb" ]
Load journals regexps knowledge base @see build_journals_kb
[ "Load", "journals", "regexps", "knowledge", "base" ]
d70e3787be3c495a3a07d1517b53f81d51c788c7
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/kbs.py#L492-L512
3,770
brettcannon/gidgethub
gidgethub/sansio.py
_parse_content_type
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]: """Tease out the content-type and character encoding. A default character encoding of UTF-8 is used, so the content-type must be used to determine if any decoding is necessary to begin with. """ if not content_type: return None, "utf-8" else: type_, parameters = cgi.parse_header(content_type) encoding = parameters.get("charset", "utf-8") return type_, encoding
python
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]: """Tease out the content-type and character encoding. A default character encoding of UTF-8 is used, so the content-type must be used to determine if any decoding is necessary to begin with. """ if not content_type: return None, "utf-8" else: type_, parameters = cgi.parse_header(content_type) encoding = parameters.get("charset", "utf-8") return type_, encoding
[ "def", "_parse_content_type", "(", "content_type", ":", "Optional", "[", "str", "]", ")", "->", "Tuple", "[", "Optional", "[", "str", "]", ",", "str", "]", ":", "if", "not", "content_type", ":", "return", "None", ",", "\"utf-8\"", "else", ":", "type_", ",", "parameters", "=", "cgi", ".", "parse_header", "(", "content_type", ")", "encoding", "=", "parameters", ".", "get", "(", "\"charset\"", ",", "\"utf-8\"", ")", "return", "type_", ",", "encoding" ]
Tease out the content-type and character encoding. A default character encoding of UTF-8 is used, so the content-type must be used to determine if any decoding is necessary to begin with.
[ "Tease", "out", "the", "content", "-", "type", "and", "character", "encoding", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L24-L36
3,771
brettcannon/gidgethub
gidgethub/sansio.py
_decode_body
def _decode_body(content_type: Optional[str], body: bytes, *, strict: bool = False) -> Any: """Decode an HTTP body based on the specified content type. If 'strict' is true, then raise ValueError if the content type is not recognized. Otherwise simply returned the body as a decoded string. """ type_, encoding = _parse_content_type(content_type) if not len(body) or not content_type: return None decoded_body = body.decode(encoding) if type_ == "application/json": return json.loads(decoded_body) elif type_ == "application/x-www-form-urlencoded": return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0]) elif strict: raise ValueError(f"unrecognized content type: {type_!r}") return decoded_body
python
def _decode_body(content_type: Optional[str], body: bytes, *, strict: bool = False) -> Any: """Decode an HTTP body based on the specified content type. If 'strict' is true, then raise ValueError if the content type is not recognized. Otherwise simply returned the body as a decoded string. """ type_, encoding = _parse_content_type(content_type) if not len(body) or not content_type: return None decoded_body = body.decode(encoding) if type_ == "application/json": return json.loads(decoded_body) elif type_ == "application/x-www-form-urlencoded": return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0]) elif strict: raise ValueError(f"unrecognized content type: {type_!r}") return decoded_body
[ "def", "_decode_body", "(", "content_type", ":", "Optional", "[", "str", "]", ",", "body", ":", "bytes", ",", "*", ",", "strict", ":", "bool", "=", "False", ")", "->", "Any", ":", "type_", ",", "encoding", "=", "_parse_content_type", "(", "content_type", ")", "if", "not", "len", "(", "body", ")", "or", "not", "content_type", ":", "return", "None", "decoded_body", "=", "body", ".", "decode", "(", "encoding", ")", "if", "type_", "==", "\"application/json\"", ":", "return", "json", ".", "loads", "(", "decoded_body", ")", "elif", "type_", "==", "\"application/x-www-form-urlencoded\"", ":", "return", "json", ".", "loads", "(", "urllib", ".", "parse", ".", "parse_qs", "(", "decoded_body", ")", "[", "\"payload\"", "]", "[", "0", "]", ")", "elif", "strict", ":", "raise", "ValueError", "(", "f\"unrecognized content type: {type_!r}\"", ")", "return", "decoded_body" ]
Decode an HTTP body based on the specified content type. If 'strict' is true, then raise ValueError if the content type is not recognized. Otherwise simply returned the body as a decoded string.
[ "Decode", "an", "HTTP", "body", "based", "on", "the", "specified", "content", "type", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L39-L57
3,772
brettcannon/gidgethub
gidgethub/sansio.py
validate_event
def validate_event(payload: bytes, *, signature: str, secret: str) -> None: """Validate the signature of a webhook event.""" # https://developer.github.com/webhooks/securing/#validating-payloads-from-github signature_prefix = "sha1=" if not signature.startswith(signature_prefix): raise ValidationFailure("signature does not start with " f"{repr(signature_prefix)}") hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1") calculated_sig = signature_prefix + hmac_.hexdigest() if not hmac.compare_digest(signature, calculated_sig): raise ValidationFailure("payload's signature does not align " "with the secret")
python
def validate_event(payload: bytes, *, signature: str, secret: str) -> None: """Validate the signature of a webhook event.""" # https://developer.github.com/webhooks/securing/#validating-payloads-from-github signature_prefix = "sha1=" if not signature.startswith(signature_prefix): raise ValidationFailure("signature does not start with " f"{repr(signature_prefix)}") hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1") calculated_sig = signature_prefix + hmac_.hexdigest() if not hmac.compare_digest(signature, calculated_sig): raise ValidationFailure("payload's signature does not align " "with the secret")
[ "def", "validate_event", "(", "payload", ":", "bytes", ",", "*", ",", "signature", ":", "str", ",", "secret", ":", "str", ")", "->", "None", ":", "# https://developer.github.com/webhooks/securing/#validating-payloads-from-github", "signature_prefix", "=", "\"sha1=\"", "if", "not", "signature", ".", "startswith", "(", "signature_prefix", ")", ":", "raise", "ValidationFailure", "(", "\"signature does not start with \"", "f\"{repr(signature_prefix)}\"", ")", "hmac_", "=", "hmac", ".", "new", "(", "secret", ".", "encode", "(", "\"UTF-8\"", ")", ",", "msg", "=", "payload", ",", "digestmod", "=", "\"sha1\"", ")", "calculated_sig", "=", "signature_prefix", "+", "hmac_", ".", "hexdigest", "(", ")", "if", "not", "hmac", ".", "compare_digest", "(", "signature", ",", "calculated_sig", ")", ":", "raise", "ValidationFailure", "(", "\"payload's signature does not align \"", "\"with the secret\"", ")" ]
Validate the signature of a webhook event.
[ "Validate", "the", "signature", "of", "a", "webhook", "event", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L60-L71
3,773
brettcannon/gidgethub
gidgethub/sansio.py
accept_format
def accept_format(*, version: str = "v3", media: Optional[str] = None, json: bool = True) -> str: """Construct the specification of the format that a request should return. The version argument defaults to v3 of the GitHub API and is applicable to all requests. The media argument along with 'json' specifies what format the request should return, e.g. requesting the rendered HTML of a comment. Do note that not all of GitHub's API supports alternative formats. The default arguments of this function will always return the latest stable version of the GitHub API in the default format that this library is designed to support. """ # https://developer.github.com/v3/media/ # https://developer.github.com/v3/#current-version accept = f"application/vnd.github.{version}" if media is not None: accept += f".{media}" if json: accept += "+json" return accept
python
def accept_format(*, version: str = "v3", media: Optional[str] = None, json: bool = True) -> str: """Construct the specification of the format that a request should return. The version argument defaults to v3 of the GitHub API and is applicable to all requests. The media argument along with 'json' specifies what format the request should return, e.g. requesting the rendered HTML of a comment. Do note that not all of GitHub's API supports alternative formats. The default arguments of this function will always return the latest stable version of the GitHub API in the default format that this library is designed to support. """ # https://developer.github.com/v3/media/ # https://developer.github.com/v3/#current-version accept = f"application/vnd.github.{version}" if media is not None: accept += f".{media}" if json: accept += "+json" return accept
[ "def", "accept_format", "(", "*", ",", "version", ":", "str", "=", "\"v3\"", ",", "media", ":", "Optional", "[", "str", "]", "=", "None", ",", "json", ":", "bool", "=", "True", ")", "->", "str", ":", "# https://developer.github.com/v3/media/", "# https://developer.github.com/v3/#current-version", "accept", "=", "f\"application/vnd.github.{version}\"", "if", "media", "is", "not", "None", ":", "accept", "+=", "f\".{media}\"", "if", "json", ":", "accept", "+=", "\"+json\"", "return", "accept" ]
Construct the specification of the format that a request should return. The version argument defaults to v3 of the GitHub API and is applicable to all requests. The media argument along with 'json' specifies what format the request should return, e.g. requesting the rendered HTML of a comment. Do note that not all of GitHub's API supports alternative formats. The default arguments of this function will always return the latest stable version of the GitHub API in the default format that this library is designed to support.
[ "Construct", "the", "specification", "of", "the", "format", "that", "a", "request", "should", "return", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L125-L145
3,774
brettcannon/gidgethub
gidgethub/sansio.py
create_headers
def create_headers(requester: str, *, accept: str = accept_format(), oauth_token: Optional[str] = None, jwt: Optional[str] = None) -> Dict[str, str]: """Create a dict representing GitHub-specific header fields. The user agent is set according to who the requester is. GitHub asks it be either a username or project name. The 'accept' argument corresponds to the 'accept' field and defaults to the default result of accept_format(). You should only need to change this value if you are using a different version of the API -- e.g. one that is under development -- or if you are looking for a different format return type, e.g. wanting the rendered HTML of a Markdown file. The 'oauth_token' allows making an authenticated request using a personal access token. This can be important if you need the expanded rate limit provided by an authenticated request. The 'jwt' allows authenticating as a GitHub App by passing in the bearer token. You can only supply only one of oauth_token or jwt, not both. For consistency, all keys in the returned dict will be lowercased. """ # user-agent: https://developer.github.com/v3/#user-agent-required # accept: https://developer.github.com/v3/#current-version # https://developer.github.com/v3/media/ # authorization: https://developer.github.com/v3/#authentication # authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app if oauth_token is not None and jwt is not None: raise ValueError("Cannot pass both oauth_token and jwt.") headers = {"user-agent": requester, "accept": accept} if oauth_token is not None: headers["authorization"] = f"token {oauth_token}" elif jwt is not None: headers["authorization"] = f"bearer {jwt}" return headers
python
def create_headers(requester: str, *, accept: str = accept_format(), oauth_token: Optional[str] = None, jwt: Optional[str] = None) -> Dict[str, str]: """Create a dict representing GitHub-specific header fields. The user agent is set according to who the requester is. GitHub asks it be either a username or project name. The 'accept' argument corresponds to the 'accept' field and defaults to the default result of accept_format(). You should only need to change this value if you are using a different version of the API -- e.g. one that is under development -- or if you are looking for a different format return type, e.g. wanting the rendered HTML of a Markdown file. The 'oauth_token' allows making an authenticated request using a personal access token. This can be important if you need the expanded rate limit provided by an authenticated request. The 'jwt' allows authenticating as a GitHub App by passing in the bearer token. You can only supply only one of oauth_token or jwt, not both. For consistency, all keys in the returned dict will be lowercased. """ # user-agent: https://developer.github.com/v3/#user-agent-required # accept: https://developer.github.com/v3/#current-version # https://developer.github.com/v3/media/ # authorization: https://developer.github.com/v3/#authentication # authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app if oauth_token is not None and jwt is not None: raise ValueError("Cannot pass both oauth_token and jwt.") headers = {"user-agent": requester, "accept": accept} if oauth_token is not None: headers["authorization"] = f"token {oauth_token}" elif jwt is not None: headers["authorization"] = f"bearer {jwt}" return headers
[ "def", "create_headers", "(", "requester", ":", "str", ",", "*", ",", "accept", ":", "str", "=", "accept_format", "(", ")", ",", "oauth_token", ":", "Optional", "[", "str", "]", "=", "None", ",", "jwt", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "# user-agent: https://developer.github.com/v3/#user-agent-required", "# accept: https://developer.github.com/v3/#current-version", "# https://developer.github.com/v3/media/", "# authorization: https://developer.github.com/v3/#authentication", "# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app", "if", "oauth_token", "is", "not", "None", "and", "jwt", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot pass both oauth_token and jwt.\"", ")", "headers", "=", "{", "\"user-agent\"", ":", "requester", ",", "\"accept\"", ":", "accept", "}", "if", "oauth_token", "is", "not", "None", ":", "headers", "[", "\"authorization\"", "]", "=", "f\"token {oauth_token}\"", "elif", "jwt", "is", "not", "None", ":", "headers", "[", "\"authorization\"", "]", "=", "f\"bearer {jwt}\"", "return", "headers" ]
Create a dict representing GitHub-specific header fields. The user agent is set according to who the requester is. GitHub asks it be either a username or project name. The 'accept' argument corresponds to the 'accept' field and defaults to the default result of accept_format(). You should only need to change this value if you are using a different version of the API -- e.g. one that is under development -- or if you are looking for a different format return type, e.g. wanting the rendered HTML of a Markdown file. The 'oauth_token' allows making an authenticated request using a personal access token. This can be important if you need the expanded rate limit provided by an authenticated request. The 'jwt' allows authenticating as a GitHub App by passing in the bearer token. You can only supply only one of oauth_token or jwt, not both. For consistency, all keys in the returned dict will be lowercased.
[ "Create", "a", "dict", "representing", "GitHub", "-", "specific", "header", "fields", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L148-L186
3,775
brettcannon/gidgethub
gidgethub/sansio.py
decipher_response
def decipher_response(status_code: int, headers: Mapping[str, str], body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]: """Decipher an HTTP response for a GitHub API request. The mapping providing the headers is expected to support lowercase keys. The parameters of this function correspond to the three main parts of an HTTP response: the status code, headers, and body. Assuming no errors which lead to an exception being raised, a 3-item tuple is returned. The first item is the decoded body (typically a JSON object, but possibly None or a string depending on the content type of the body). The second item is an instance of RateLimit based on what the response specified. The last item of the tuple is the URL where to request the next part of results. If there are no more results then None is returned. Do be aware that the URL can be a URI template and so may need to be expanded. If the status code is anything other than 200, 201, or 204, then an HTTPException is raised. """ data = _decode_body(headers.get("content-type"), body) if status_code in {200, 201, 204}: return data, RateLimit.from_http(headers), _next_link(headers.get("link")) else: try: message = data["message"] except (TypeError, KeyError): message = None exc_type: Type[HTTPException] if status_code >= 500: exc_type = GitHubBroken elif status_code >= 400: exc_type = BadRequest if status_code == 403: rate_limit = RateLimit.from_http(headers) if rate_limit and not rate_limit.remaining: raise RateLimitExceeded(rate_limit, message) elif status_code == 422: errors = data.get("errors", None) if errors: fields = ", ".join(repr(e["field"]) for e in errors) message = f"{message} for {fields}" else: message = data["message"] raise InvalidField(errors, message) elif status_code >= 300: exc_type = RedirectionException else: exc_type = HTTPException status_code_enum = http.HTTPStatus(status_code) args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]] if message: args = status_code_enum, message else: args = status_code_enum, raise exc_type(*args)
python
def decipher_response(status_code: int, headers: Mapping[str, str], body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]: """Decipher an HTTP response for a GitHub API request. The mapping providing the headers is expected to support lowercase keys. The parameters of this function correspond to the three main parts of an HTTP response: the status code, headers, and body. Assuming no errors which lead to an exception being raised, a 3-item tuple is returned. The first item is the decoded body (typically a JSON object, but possibly None or a string depending on the content type of the body). The second item is an instance of RateLimit based on what the response specified. The last item of the tuple is the URL where to request the next part of results. If there are no more results then None is returned. Do be aware that the URL can be a URI template and so may need to be expanded. If the status code is anything other than 200, 201, or 204, then an HTTPException is raised. """ data = _decode_body(headers.get("content-type"), body) if status_code in {200, 201, 204}: return data, RateLimit.from_http(headers), _next_link(headers.get("link")) else: try: message = data["message"] except (TypeError, KeyError): message = None exc_type: Type[HTTPException] if status_code >= 500: exc_type = GitHubBroken elif status_code >= 400: exc_type = BadRequest if status_code == 403: rate_limit = RateLimit.from_http(headers) if rate_limit and not rate_limit.remaining: raise RateLimitExceeded(rate_limit, message) elif status_code == 422: errors = data.get("errors", None) if errors: fields = ", ".join(repr(e["field"]) for e in errors) message = f"{message} for {fields}" else: message = data["message"] raise InvalidField(errors, message) elif status_code >= 300: exc_type = RedirectionException else: exc_type = HTTPException status_code_enum = http.HTTPStatus(status_code) args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]] if message: args = status_code_enum, message else: args = status_code_enum, raise exc_type(*args)
[ "def", "decipher_response", "(", "status_code", ":", "int", ",", "headers", ":", "Mapping", "[", "str", ",", "str", "]", ",", "body", ":", "bytes", ")", "->", "Tuple", "[", "Any", ",", "Optional", "[", "RateLimit", "]", ",", "Optional", "[", "str", "]", "]", ":", "data", "=", "_decode_body", "(", "headers", ".", "get", "(", "\"content-type\"", ")", ",", "body", ")", "if", "status_code", "in", "{", "200", ",", "201", ",", "204", "}", ":", "return", "data", ",", "RateLimit", ".", "from_http", "(", "headers", ")", ",", "_next_link", "(", "headers", ".", "get", "(", "\"link\"", ")", ")", "else", ":", "try", ":", "message", "=", "data", "[", "\"message\"", "]", "except", "(", "TypeError", ",", "KeyError", ")", ":", "message", "=", "None", "exc_type", ":", "Type", "[", "HTTPException", "]", "if", "status_code", ">=", "500", ":", "exc_type", "=", "GitHubBroken", "elif", "status_code", ">=", "400", ":", "exc_type", "=", "BadRequest", "if", "status_code", "==", "403", ":", "rate_limit", "=", "RateLimit", ".", "from_http", "(", "headers", ")", "if", "rate_limit", "and", "not", "rate_limit", ".", "remaining", ":", "raise", "RateLimitExceeded", "(", "rate_limit", ",", "message", ")", "elif", "status_code", "==", "422", ":", "errors", "=", "data", ".", "get", "(", "\"errors\"", ",", "None", ")", "if", "errors", ":", "fields", "=", "\", \"", ".", "join", "(", "repr", "(", "e", "[", "\"field\"", "]", ")", "for", "e", "in", "errors", ")", "message", "=", "f\"{message} for {fields}\"", "else", ":", "message", "=", "data", "[", "\"message\"", "]", "raise", "InvalidField", "(", "errors", ",", "message", ")", "elif", "status_code", ">=", "300", ":", "exc_type", "=", "RedirectionException", "else", ":", "exc_type", "=", "HTTPException", "status_code_enum", "=", "http", ".", "HTTPStatus", "(", "status_code", ")", "args", ":", "Union", "[", "Tuple", "[", "http", ".", "HTTPStatus", ",", "str", "]", ",", "Tuple", "[", "http", ".", "HTTPStatus", "]", "]", "if", "message", ":", "args", "=", "status_code_enum", ",", "message", "else", ":", "args", "=", "status_code_enum", ",", "raise", "exc_type", "(", "*", "args", ")" ]
Decipher an HTTP response for a GitHub API request. The mapping providing the headers is expected to support lowercase keys. The parameters of this function correspond to the three main parts of an HTTP response: the status code, headers, and body. Assuming no errors which lead to an exception being raised, a 3-item tuple is returned. The first item is the decoded body (typically a JSON object, but possibly None or a string depending on the content type of the body). The second item is an instance of RateLimit based on what the response specified. The last item of the tuple is the URL where to request the next part of results. If there are no more results then None is returned. Do be aware that the URL can be a URI template and so may need to be expanded. If the status code is anything other than 200, 201, or 204, then an HTTPException is raised.
[ "Decipher", "an", "HTTP", "response", "for", "a", "GitHub", "API", "request", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L269-L326
3,776
brettcannon/gidgethub
gidgethub/sansio.py
format_url
def format_url(url: str, url_vars: Mapping[str, Any]) -> str: """Construct a URL for the GitHub API. The URL may be absolute or relative. In the latter case the appropriate domain will be added. This is to help when copying the relative URL directly from the GitHub developer documentation. The dict provided in url_vars is used in URI template formatting. """ url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified. expanded_url: str = uritemplate.expand(url, var_dict=url_vars) return expanded_url
python
def format_url(url: str, url_vars: Mapping[str, Any]) -> str: """Construct a URL for the GitHub API. The URL may be absolute or relative. In the latter case the appropriate domain will be added. This is to help when copying the relative URL directly from the GitHub developer documentation. The dict provided in url_vars is used in URI template formatting. """ url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified. expanded_url: str = uritemplate.expand(url, var_dict=url_vars) return expanded_url
[ "def", "format_url", "(", "url", ":", "str", ",", "url_vars", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "str", ":", "url", "=", "urllib", ".", "parse", ".", "urljoin", "(", "DOMAIN", ",", "url", ")", "# Works even if 'url' is fully-qualified.", "expanded_url", ":", "str", "=", "uritemplate", ".", "expand", "(", "url", ",", "var_dict", "=", "url_vars", ")", "return", "expanded_url" ]
Construct a URL for the GitHub API. The URL may be absolute or relative. In the latter case the appropriate domain will be added. This is to help when copying the relative URL directly from the GitHub developer documentation. The dict provided in url_vars is used in URI template formatting.
[ "Construct", "a", "URL", "for", "the", "GitHub", "API", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L331-L342
3,777
brettcannon/gidgethub
gidgethub/sansio.py
Event.from_http
def from_http(cls, headers: Mapping[str, str], body: bytes, *, secret: Optional[str] = None) -> "Event": """Construct an event from HTTP headers and JSON body data. The mapping providing the headers is expected to support lowercase keys. Since this method assumes the body of the HTTP request is JSON, a check is performed for a content-type of "application/json" (GitHub does support other content-types). If the content-type does not match, BadRequest is raised. If the appropriate headers are provided for event validation, then it will be performed unconditionally. Any failure in validation (including not providing a secret) will lead to ValidationFailure being raised. """ if "x-hub-signature" in headers: if secret is None: raise ValidationFailure("secret not provided") validate_event(body, signature=headers["x-hub-signature"], secret=secret) elif secret is not None: raise ValidationFailure("signature is missing") try: data = _decode_body(headers["content-type"], body, strict=True) except (KeyError, ValueError) as exc: raise BadRequest(http.HTTPStatus(415), "expected a content-type of " "'application/json' or " "'application/x-www-form-urlencoded'") from exc return cls(data, event=headers["x-github-event"], delivery_id=headers["x-github-delivery"])
python
def from_http(cls, headers: Mapping[str, str], body: bytes, *, secret: Optional[str] = None) -> "Event": """Construct an event from HTTP headers and JSON body data. The mapping providing the headers is expected to support lowercase keys. Since this method assumes the body of the HTTP request is JSON, a check is performed for a content-type of "application/json" (GitHub does support other content-types). If the content-type does not match, BadRequest is raised. If the appropriate headers are provided for event validation, then it will be performed unconditionally. Any failure in validation (including not providing a secret) will lead to ValidationFailure being raised. """ if "x-hub-signature" in headers: if secret is None: raise ValidationFailure("secret not provided") validate_event(body, signature=headers["x-hub-signature"], secret=secret) elif secret is not None: raise ValidationFailure("signature is missing") try: data = _decode_body(headers["content-type"], body, strict=True) except (KeyError, ValueError) as exc: raise BadRequest(http.HTTPStatus(415), "expected a content-type of " "'application/json' or " "'application/x-www-form-urlencoded'") from exc return cls(data, event=headers["x-github-event"], delivery_id=headers["x-github-delivery"])
[ "def", "from_http", "(", "cls", ",", "headers", ":", "Mapping", "[", "str", ",", "str", "]", ",", "body", ":", "bytes", ",", "*", ",", "secret", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "\"Event\"", ":", "if", "\"x-hub-signature\"", "in", "headers", ":", "if", "secret", "is", "None", ":", "raise", "ValidationFailure", "(", "\"secret not provided\"", ")", "validate_event", "(", "body", ",", "signature", "=", "headers", "[", "\"x-hub-signature\"", "]", ",", "secret", "=", "secret", ")", "elif", "secret", "is", "not", "None", ":", "raise", "ValidationFailure", "(", "\"signature is missing\"", ")", "try", ":", "data", "=", "_decode_body", "(", "headers", "[", "\"content-type\"", "]", ",", "body", ",", "strict", "=", "True", ")", "except", "(", "KeyError", ",", "ValueError", ")", "as", "exc", ":", "raise", "BadRequest", "(", "http", ".", "HTTPStatus", "(", "415", ")", ",", "\"expected a content-type of \"", "\"'application/json' or \"", "\"'application/x-www-form-urlencoded'\"", ")", "from", "exc", "return", "cls", "(", "data", ",", "event", "=", "headers", "[", "\"x-github-event\"", "]", ",", "delivery_id", "=", "headers", "[", "\"x-github-delivery\"", "]", ")" ]
Construct an event from HTTP headers and JSON body data. The mapping providing the headers is expected to support lowercase keys. Since this method assumes the body of the HTTP request is JSON, a check is performed for a content-type of "application/json" (GitHub does support other content-types). If the content-type does not match, BadRequest is raised. If the appropriate headers are provided for event validation, then it will be performed unconditionally. Any failure in validation (including not providing a secret) will lead to ValidationFailure being raised.
[ "Construct", "an", "event", "from", "HTTP", "headers", "and", "JSON", "body", "data", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L90-L122
3,778
brettcannon/gidgethub
gidgethub/sansio.py
RateLimit.from_http
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]: """Gather rate limit information from HTTP headers. The mapping providing the headers is expected to support lowercase keys. Returns ``None`` if ratelimit info is not found in the headers. """ try: limit = int(headers["x-ratelimit-limit"]) remaining = int(headers["x-ratelimit-remaining"]) reset_epoch = float(headers["x-ratelimit-reset"]) except KeyError: return None else: return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
python
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]: """Gather rate limit information from HTTP headers. The mapping providing the headers is expected to support lowercase keys. Returns ``None`` if ratelimit info is not found in the headers. """ try: limit = int(headers["x-ratelimit-limit"]) remaining = int(headers["x-ratelimit-remaining"]) reset_epoch = float(headers["x-ratelimit-reset"]) except KeyError: return None else: return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
[ "def", "from_http", "(", "cls", ",", "headers", ":", "Mapping", "[", "str", ",", "str", "]", ")", "->", "Optional", "[", "\"RateLimit\"", "]", ":", "try", ":", "limit", "=", "int", "(", "headers", "[", "\"x-ratelimit-limit\"", "]", ")", "remaining", "=", "int", "(", "headers", "[", "\"x-ratelimit-remaining\"", "]", ")", "reset_epoch", "=", "float", "(", "headers", "[", "\"x-ratelimit-reset\"", "]", ")", "except", "KeyError", ":", "return", "None", "else", ":", "return", "cls", "(", "limit", "=", "limit", ",", "remaining", "=", "remaining", ",", "reset_epoch", "=", "reset_epoch", ")" ]
Gather rate limit information from HTTP headers. The mapping providing the headers is expected to support lowercase keys. Returns ``None`` if ratelimit info is not found in the headers.
[ "Gather", "rate", "limit", "information", "from", "HTTP", "headers", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L237-L250
3,779
brettcannon/gidgethub
gidgethub/routing.py
Router.add
def add(self, func: AsyncCallback, event_type: str, **data_detail: Any) -> None: """Add a new route. After registering 'func' for the specified event_type, an optional data_detail may be provided. By providing an extra keyword argument, dispatching can occur based on a top-level key of the data in the event being dispatched. """ if len(data_detail) > 1: msg = () raise TypeError("dispatching based on data details is only " "supported up to one level deep; " f"{len(data_detail)} levels specified") elif not data_detail: callbacks = self._shallow_routes.setdefault(event_type, []) callbacks.append(func) else: data_key, data_value = data_detail.popitem() data_details = self._deep_routes.setdefault(event_type, {}) specific_detail = data_details.setdefault(data_key, {}) callbacks = specific_detail.setdefault(data_value, []) callbacks.append(func)
python
def add(self, func: AsyncCallback, event_type: str, **data_detail: Any) -> None: """Add a new route. After registering 'func' for the specified event_type, an optional data_detail may be provided. By providing an extra keyword argument, dispatching can occur based on a top-level key of the data in the event being dispatched. """ if len(data_detail) > 1: msg = () raise TypeError("dispatching based on data details is only " "supported up to one level deep; " f"{len(data_detail)} levels specified") elif not data_detail: callbacks = self._shallow_routes.setdefault(event_type, []) callbacks.append(func) else: data_key, data_value = data_detail.popitem() data_details = self._deep_routes.setdefault(event_type, {}) specific_detail = data_details.setdefault(data_key, {}) callbacks = specific_detail.setdefault(data_value, []) callbacks.append(func)
[ "def", "add", "(", "self", ",", "func", ":", "AsyncCallback", ",", "event_type", ":", "str", ",", "*", "*", "data_detail", ":", "Any", ")", "->", "None", ":", "if", "len", "(", "data_detail", ")", ">", "1", ":", "msg", "=", "(", ")", "raise", "TypeError", "(", "\"dispatching based on data details is only \"", "\"supported up to one level deep; \"", "f\"{len(data_detail)} levels specified\"", ")", "elif", "not", "data_detail", ":", "callbacks", "=", "self", ".", "_shallow_routes", ".", "setdefault", "(", "event_type", ",", "[", "]", ")", "callbacks", ".", "append", "(", "func", ")", "else", ":", "data_key", ",", "data_value", "=", "data_detail", ".", "popitem", "(", ")", "data_details", "=", "self", ".", "_deep_routes", ".", "setdefault", "(", "event_type", ",", "{", "}", ")", "specific_detail", "=", "data_details", ".", "setdefault", "(", "data_key", ",", "{", "}", ")", "callbacks", "=", "specific_detail", ".", "setdefault", "(", "data_value", ",", "[", "]", ")", "callbacks", ".", "append", "(", "func", ")" ]
Add a new route. After registering 'func' for the specified event_type, an optional data_detail may be provided. By providing an extra keyword argument, dispatching can occur based on a top-level key of the data in the event being dispatched.
[ "Add", "a", "new", "route", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/routing.py#L29-L50
3,780
brettcannon/gidgethub
gidgethub/abc.py
GitHubAPI._make_request
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str], data: Any, accept: str, jwt: Opt[str] = None, oauth_token: Opt[str] = None, ) -> Tuple[bytes, Opt[str]]: """Construct and make an HTTP request.""" if oauth_token is not None and jwt is not None: raise ValueError("Cannot pass both oauth_token and jwt.") filled_url = sansio.format_url(url, url_vars) if jwt is not None: request_headers = sansio.create_headers( self.requester, accept=accept, jwt=jwt) elif oauth_token is not None: request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=oauth_token) else: # fallback to using oauth_token request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=self.oauth_token) cached = cacheable = False # Can't use None as a "no body" sentinel as it's a legitimate JSON type. if data == b"": body = b"" request_headers["content-length"] = "0" if method == "GET" and self._cache is not None: cacheable = True try: etag, last_modified, data, more = self._cache[filled_url] cached = True except KeyError: pass else: if etag is not None: request_headers["if-none-match"] = etag if last_modified is not None: request_headers["if-modified-since"] = last_modified else: charset = "utf-8" body = json.dumps(data).encode(charset) request_headers['content-type'] = f"application/json; charset={charset}" request_headers['content-length'] = str(len(body)) if self.rate_limit is not None: self.rate_limit.remaining -= 1 response = await self._request(method, filled_url, request_headers, body) if not (response[0] == 304 and cached): data, self.rate_limit, more = sansio.decipher_response(*response) has_cache_details = ("etag" in response[1] or "last-modified" in response[1]) if self._cache is not None and cacheable and has_cache_details: etag = response[1].get("etag") last_modified = response[1].get("last-modified") self._cache[filled_url] = etag, last_modified, data, more return data, more
python
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str], data: Any, accept: str, jwt: Opt[str] = None, oauth_token: Opt[str] = None, ) -> Tuple[bytes, Opt[str]]: """Construct and make an HTTP request.""" if oauth_token is not None and jwt is not None: raise ValueError("Cannot pass both oauth_token and jwt.") filled_url = sansio.format_url(url, url_vars) if jwt is not None: request_headers = sansio.create_headers( self.requester, accept=accept, jwt=jwt) elif oauth_token is not None: request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=oauth_token) else: # fallback to using oauth_token request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=self.oauth_token) cached = cacheable = False # Can't use None as a "no body" sentinel as it's a legitimate JSON type. if data == b"": body = b"" request_headers["content-length"] = "0" if method == "GET" and self._cache is not None: cacheable = True try: etag, last_modified, data, more = self._cache[filled_url] cached = True except KeyError: pass else: if etag is not None: request_headers["if-none-match"] = etag if last_modified is not None: request_headers["if-modified-since"] = last_modified else: charset = "utf-8" body = json.dumps(data).encode(charset) request_headers['content-type'] = f"application/json; charset={charset}" request_headers['content-length'] = str(len(body)) if self.rate_limit is not None: self.rate_limit.remaining -= 1 response = await self._request(method, filled_url, request_headers, body) if not (response[0] == 304 and cached): data, self.rate_limit, more = sansio.decipher_response(*response) has_cache_details = ("etag" in response[1] or "last-modified" in response[1]) if self._cache is not None and cacheable and has_cache_details: etag = response[1].get("etag") last_modified = response[1].get("last-modified") self._cache[filled_url] = etag, last_modified, data, more return data, more
[ "async", "def", "_make_request", "(", "self", ",", "method", ":", "str", ",", "url", ":", "str", ",", "url_vars", ":", "Dict", "[", "str", ",", "str", "]", ",", "data", ":", "Any", ",", "accept", ":", "str", ",", "jwt", ":", "Opt", "[", "str", "]", "=", "None", ",", "oauth_token", ":", "Opt", "[", "str", "]", "=", "None", ",", ")", "->", "Tuple", "[", "bytes", ",", "Opt", "[", "str", "]", "]", ":", "if", "oauth_token", "is", "not", "None", "and", "jwt", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot pass both oauth_token and jwt.\"", ")", "filled_url", "=", "sansio", ".", "format_url", "(", "url", ",", "url_vars", ")", "if", "jwt", "is", "not", "None", ":", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "jwt", "=", "jwt", ")", "elif", "oauth_token", "is", "not", "None", ":", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "oauth_token", "=", "oauth_token", ")", "else", ":", "# fallback to using oauth_token", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "oauth_token", "=", "self", ".", "oauth_token", ")", "cached", "=", "cacheable", "=", "False", "# Can't use None as a \"no body\" sentinel as it's a legitimate JSON type.", "if", "data", "==", "b\"\"", ":", "body", "=", "b\"\"", "request_headers", "[", "\"content-length\"", "]", "=", "\"0\"", "if", "method", "==", "\"GET\"", "and", "self", ".", "_cache", "is", "not", "None", ":", "cacheable", "=", "True", "try", ":", "etag", ",", "last_modified", ",", "data", ",", "more", "=", "self", ".", "_cache", "[", "filled_url", "]", "cached", "=", "True", "except", "KeyError", ":", "pass", "else", ":", "if", "etag", "is", "not", "None", ":", "request_headers", "[", "\"if-none-match\"", "]", "=", "etag", "if", "last_modified", "is", "not", "None", ":", "request_headers", "[", "\"if-modified-since\"", "]", "=", "last_modified", "else", ":", "charset", "=", "\"utf-8\"", "body", "=", "json", ".", "dumps", "(", "data", ")", ".", "encode", "(", "charset", ")", "request_headers", "[", "'content-type'", "]", "=", "f\"application/json; charset={charset}\"", "request_headers", "[", "'content-length'", "]", "=", "str", "(", "len", "(", "body", ")", ")", "if", "self", ".", "rate_limit", "is", "not", "None", ":", "self", ".", "rate_limit", ".", "remaining", "-=", "1", "response", "=", "await", "self", ".", "_request", "(", "method", ",", "filled_url", ",", "request_headers", ",", "body", ")", "if", "not", "(", "response", "[", "0", "]", "==", "304", "and", "cached", ")", ":", "data", ",", "self", ".", "rate_limit", ",", "more", "=", "sansio", ".", "decipher_response", "(", "*", "response", ")", "has_cache_details", "=", "(", "\"etag\"", "in", "response", "[", "1", "]", "or", "\"last-modified\"", "in", "response", "[", "1", "]", ")", "if", "self", ".", "_cache", "is", "not", "None", "and", "cacheable", "and", "has_cache_details", ":", "etag", "=", "response", "[", "1", "]", ".", "get", "(", "\"etag\"", ")", "last_modified", "=", "response", "[", "1", "]", ".", "get", "(", "\"last-modified\"", ")", "self", ".", "_cache", "[", "filled_url", "]", "=", "etag", ",", "last_modified", ",", "data", ",", "more", "return", "data", ",", "more" ]
Construct and make an HTTP request.
[ "Construct", "and", "make", "an", "HTTP", "request", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L34-L89
3,781
brettcannon/gidgethub
gidgethub/abc.py
GitHubAPI.getitem
async def getitem(self, url: str, url_vars: Dict[str, str] = {}, *, accept: str = sansio.accept_format(), jwt: Opt[str] = None, oauth_token: Opt[str] = None ) -> Any: """Send a GET request for a single item to the specified endpoint.""" data, _ = await self._make_request("GET", url, url_vars, b"", accept, jwt=jwt, oauth_token=oauth_token) return data
python
async def getitem(self, url: str, url_vars: Dict[str, str] = {}, *, accept: str = sansio.accept_format(), jwt: Opt[str] = None, oauth_token: Opt[str] = None ) -> Any: """Send a GET request for a single item to the specified endpoint.""" data, _ = await self._make_request("GET", url, url_vars, b"", accept, jwt=jwt, oauth_token=oauth_token) return data
[ "async", "def", "getitem", "(", "self", ",", "url", ":", "str", ",", "url_vars", ":", "Dict", "[", "str", ",", "str", "]", "=", "{", "}", ",", "*", ",", "accept", ":", "str", "=", "sansio", ".", "accept_format", "(", ")", ",", "jwt", ":", "Opt", "[", "str", "]", "=", "None", ",", "oauth_token", ":", "Opt", "[", "str", "]", "=", "None", ")", "->", "Any", ":", "data", ",", "_", "=", "await", "self", ".", "_make_request", "(", "\"GET\"", ",", "url", ",", "url_vars", ",", "b\"\"", ",", "accept", ",", "jwt", "=", "jwt", ",", "oauth_token", "=", "oauth_token", ")", "return", "data" ]
Send a GET request for a single item to the specified endpoint.
[ "Send", "a", "GET", "request", "for", "a", "single", "item", "to", "the", "specified", "endpoint", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L91-L100
3,782
brettcannon/gidgethub
gidgethub/abc.py
GitHubAPI.getiter
async def getiter(self, url: str, url_vars: Dict[str, str] = {}, *, accept: str = sansio.accept_format(), jwt: Opt[str] = None, oauth_token: Opt[str] = None ) -> AsyncGenerator[Any, None]: """Return an async iterable for all the items at a specified endpoint.""" data, more = await self._make_request("GET", url, url_vars, b"", accept, jwt=jwt, oauth_token=oauth_token) if isinstance(data, dict) and "items" in data: data = data["items"] for item in data: yield item if more: # `yield from` is not supported in coroutines. async for item in self.getiter(more, url_vars, accept=accept, jwt=jwt, oauth_token=oauth_token): yield item
python
async def getiter(self, url: str, url_vars: Dict[str, str] = {}, *, accept: str = sansio.accept_format(), jwt: Opt[str] = None, oauth_token: Opt[str] = None ) -> AsyncGenerator[Any, None]: """Return an async iterable for all the items at a specified endpoint.""" data, more = await self._make_request("GET", url, url_vars, b"", accept, jwt=jwt, oauth_token=oauth_token) if isinstance(data, dict) and "items" in data: data = data["items"] for item in data: yield item if more: # `yield from` is not supported in coroutines. async for item in self.getiter(more, url_vars, accept=accept, jwt=jwt, oauth_token=oauth_token): yield item
[ "async", "def", "getiter", "(", "self", ",", "url", ":", "str", ",", "url_vars", ":", "Dict", "[", "str", ",", "str", "]", "=", "{", "}", ",", "*", ",", "accept", ":", "str", "=", "sansio", ".", "accept_format", "(", ")", ",", "jwt", ":", "Opt", "[", "str", "]", "=", "None", ",", "oauth_token", ":", "Opt", "[", "str", "]", "=", "None", ")", "->", "AsyncGenerator", "[", "Any", ",", "None", "]", ":", "data", ",", "more", "=", "await", "self", ".", "_make_request", "(", "\"GET\"", ",", "url", ",", "url_vars", ",", "b\"\"", ",", "accept", ",", "jwt", "=", "jwt", ",", "oauth_token", "=", "oauth_token", ")", "if", "isinstance", "(", "data", ",", "dict", ")", "and", "\"items\"", "in", "data", ":", "data", "=", "data", "[", "\"items\"", "]", "for", "item", "in", "data", ":", "yield", "item", "if", "more", ":", "# `yield from` is not supported in coroutines.", "async", "for", "item", "in", "self", ".", "getiter", "(", "more", ",", "url_vars", ",", "accept", "=", "accept", ",", "jwt", "=", "jwt", ",", "oauth_token", "=", "oauth_token", ")", ":", "yield", "item" ]
Return an async iterable for all the items at a specified endpoint.
[ "Return", "an", "async", "iterable", "for", "all", "the", "items", "at", "a", "specified", "endpoint", "." ]
24feb6c35bba3966c6cc9ec2896729578f6d7ccc
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L102-L120
3,783
rkargon/pixelsorter
pixelsorter/images2gif.py
NeuQuant.quantize
def quantize(self, image): """ Use a kdtree to quickly find the closest palette colors for the pixels """ if get_cKDTree(): return self.quantize_with_scipy(image) else: print('Scipy not available, falling back to slower version.') return self.quantize_without_scipy(image)
python
def quantize(self, image): """ Use a kdtree to quickly find the closest palette colors for the pixels """ if get_cKDTree(): return self.quantize_with_scipy(image) else: print('Scipy not available, falling back to slower version.') return self.quantize_without_scipy(image)
[ "def", "quantize", "(", "self", ",", "image", ")", ":", "if", "get_cKDTree", "(", ")", ":", "return", "self", ".", "quantize_with_scipy", "(", "image", ")", "else", ":", "print", "(", "'Scipy not available, falling back to slower version.'", ")", "return", "self", ".", "quantize_without_scipy", "(", "image", ")" ]
Use a kdtree to quickly find the closest palette colors for the pixels
[ "Use", "a", "kdtree", "to", "quickly", "find", "the", "closest", "palette", "colors", "for", "the", "pixels" ]
0775d1e487fbcb023e411e1818ba3290b0e8665e
https://github.com/rkargon/pixelsorter/blob/0775d1e487fbcb023e411e1818ba3290b0e8665e/pixelsorter/images2gif.py#L1017-L1023
3,784
rkargon/pixelsorter
pixelsorter/images2gif.py
NeuQuant.inxsearch
def inxsearch(self, r, g, b): """Search for BGR values 0..255 and return colour index""" dists = (self.colormap[:, :3] - np.array([r, g, b])) a = np.argmin((dists * dists).sum(1)) return a
python
def inxsearch(self, r, g, b): """Search for BGR values 0..255 and return colour index""" dists = (self.colormap[:, :3] - np.array([r, g, b])) a = np.argmin((dists * dists).sum(1)) return a
[ "def", "inxsearch", "(", "self", ",", "r", ",", "g", ",", "b", ")", ":", "dists", "=", "(", "self", ".", "colormap", "[", ":", ",", ":", "3", "]", "-", "np", ".", "array", "(", "[", "r", ",", "g", ",", "b", "]", ")", ")", "a", "=", "np", ".", "argmin", "(", "(", "dists", "*", "dists", ")", ".", "sum", "(", "1", ")", ")", "return", "a" ]
Search for BGR values 0..255 and return colour index
[ "Search", "for", "BGR", "values", "0", "..", "255", "and", "return", "colour", "index" ]
0775d1e487fbcb023e411e1818ba3290b0e8665e
https://github.com/rkargon/pixelsorter/blob/0775d1e487fbcb023e411e1818ba3290b0e8665e/pixelsorter/images2gif.py#L1061-L1065
3,785
European-XFEL/karabo-bridge-py
karabo_bridge/cli/glimpse.py
gen_filename
def gen_filename(endpoint): """Generate a filename from endpoint with timestamp. return: str hostname_port_YearMonthDay_HourMinSecFrac.h5 """ now = datetime.now().strftime('%Y%m%d_%H%M%S%f')[:-4] base = endpoint.split('://', 1)[1] if base.startswith('localhost:'): base = gethostname().split('.')[0] + base[9:] base = base.replace(':', '_').replace('/', '_') return '{}_{}.h5'.format(base, now)
python
def gen_filename(endpoint): """Generate a filename from endpoint with timestamp. return: str hostname_port_YearMonthDay_HourMinSecFrac.h5 """ now = datetime.now().strftime('%Y%m%d_%H%M%S%f')[:-4] base = endpoint.split('://', 1)[1] if base.startswith('localhost:'): base = gethostname().split('.')[0] + base[9:] base = base.replace(':', '_').replace('/', '_') return '{}_{}.h5'.format(base, now)
[ "def", "gen_filename", "(", "endpoint", ")", ":", "now", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y%m%d_%H%M%S%f'", ")", "[", ":", "-", "4", "]", "base", "=", "endpoint", ".", "split", "(", "'://'", ",", "1", ")", "[", "1", "]", "if", "base", ".", "startswith", "(", "'localhost:'", ")", ":", "base", "=", "gethostname", "(", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "base", "[", "9", ":", "]", "base", "=", "base", ".", "replace", "(", "':'", ",", "'_'", ")", ".", "replace", "(", "'/'", ",", "'_'", ")", "return", "'{}_{}.h5'", ".", "format", "(", "base", ",", "now", ")" ]
Generate a filename from endpoint with timestamp. return: str hostname_port_YearMonthDay_HourMinSecFrac.h5
[ "Generate", "a", "filename", "from", "endpoint", "with", "timestamp", "." ]
ca20d72b8beb0039649d10cb01d027db42efd91c
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/cli/glimpse.py#L15-L26
3,786
European-XFEL/karabo-bridge-py
karabo_bridge/cli/glimpse.py
dict_to_hdf5
def dict_to_hdf5(dic, endpoint): """Dump a dict to an HDF5 file. """ filename = gen_filename(endpoint) with h5py.File(filename, 'w') as handler: walk_dict_to_hdf5(dic, handler) print('dumped to', filename)
python
def dict_to_hdf5(dic, endpoint): """Dump a dict to an HDF5 file. """ filename = gen_filename(endpoint) with h5py.File(filename, 'w') as handler: walk_dict_to_hdf5(dic, handler) print('dumped to', filename)
[ "def", "dict_to_hdf5", "(", "dic", ",", "endpoint", ")", ":", "filename", "=", "gen_filename", "(", "endpoint", ")", "with", "h5py", ".", "File", "(", "filename", ",", "'w'", ")", "as", "handler", ":", "walk_dict_to_hdf5", "(", "dic", ",", "handler", ")", "print", "(", "'dumped to'", ",", "filename", ")" ]
Dump a dict to an HDF5 file.
[ "Dump", "a", "dict", "to", "an", "HDF5", "file", "." ]
ca20d72b8beb0039649d10cb01d027db42efd91c
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/cli/glimpse.py#L29-L35
3,787
European-XFEL/karabo-bridge-py
karabo_bridge/cli/glimpse.py
hdf5_to_dict
def hdf5_to_dict(filepath, group='/'): """load the content of an hdf5 file to a dict. # TODO: how to split domain_type_dev : parameter : value ? """ if not h5py.is_hdf5(filepath): raise RuntimeError(filepath, 'is not a valid HDF5 file.') with h5py.File(filepath, 'r') as handler: dic = walk_hdf5_to_dict(handler[group]) return dic
python
def hdf5_to_dict(filepath, group='/'): """load the content of an hdf5 file to a dict. # TODO: how to split domain_type_dev : parameter : value ? """ if not h5py.is_hdf5(filepath): raise RuntimeError(filepath, 'is not a valid HDF5 file.') with h5py.File(filepath, 'r') as handler: dic = walk_hdf5_to_dict(handler[group]) return dic
[ "def", "hdf5_to_dict", "(", "filepath", ",", "group", "=", "'/'", ")", ":", "if", "not", "h5py", ".", "is_hdf5", "(", "filepath", ")", ":", "raise", "RuntimeError", "(", "filepath", ",", "'is not a valid HDF5 file.'", ")", "with", "h5py", ".", "File", "(", "filepath", ",", "'r'", ")", "as", "handler", ":", "dic", "=", "walk_hdf5_to_dict", "(", "handler", "[", "group", "]", ")", "return", "dic" ]
load the content of an hdf5 file to a dict. # TODO: how to split domain_type_dev : parameter : value ?
[ "load", "the", "content", "of", "an", "hdf5", "file", "to", "a", "dict", "." ]
ca20d72b8beb0039649d10cb01d027db42efd91c
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/cli/glimpse.py#L38-L48
3,788
European-XFEL/karabo-bridge-py
karabo_bridge/cli/glimpse.py
print_one_train
def print_one_train(client, verbosity=0): """Retrieve data for one train and print it. Returns the (data, metadata) dicts from the client. This is used by the -glimpse and -monitor command line tools. """ ts_before = time() data, meta = client.next() ts_after = time() if not data: print("Empty data") return train_id = list(meta.values())[0].get('timestamp.tid', 0) print("Train ID:", train_id, "--------------------------") delta = ts_after - ts_before print('Data from {} sources, REQ-REP took {:.2f} ms' .format(len(data), delta)) print() for i, (source, src_data) in enumerate(sorted(data.items()), start=1): src_metadata = meta.get(source, {}) tid = src_metadata.get('timestamp.tid', 0) print("Source {}: {!r} @ {}".format(i, source, tid)) try: ts = src_metadata['timestamp'] except KeyError: print("No timestamp") else: dt = strftime('%Y-%m-%d %H:%M:%S', localtime(ts)) delay = (ts_after - ts) * 1000 print('timestamp: {} ({}) | delay: {:.2f} ms' .format(dt, ts, delay)) if verbosity < 1: print("- data:", sorted(src_data)) print("- metadata:", sorted(src_metadata)) else: print('data:') pretty_print(src_data, verbosity=verbosity - 1) if src_metadata: print('metadata:') pretty_print(src_metadata) print() return data, meta
python
def print_one_train(client, verbosity=0): """Retrieve data for one train and print it. Returns the (data, metadata) dicts from the client. This is used by the -glimpse and -monitor command line tools. """ ts_before = time() data, meta = client.next() ts_after = time() if not data: print("Empty data") return train_id = list(meta.values())[0].get('timestamp.tid', 0) print("Train ID:", train_id, "--------------------------") delta = ts_after - ts_before print('Data from {} sources, REQ-REP took {:.2f} ms' .format(len(data), delta)) print() for i, (source, src_data) in enumerate(sorted(data.items()), start=1): src_metadata = meta.get(source, {}) tid = src_metadata.get('timestamp.tid', 0) print("Source {}: {!r} @ {}".format(i, source, tid)) try: ts = src_metadata['timestamp'] except KeyError: print("No timestamp") else: dt = strftime('%Y-%m-%d %H:%M:%S', localtime(ts)) delay = (ts_after - ts) * 1000 print('timestamp: {} ({}) | delay: {:.2f} ms' .format(dt, ts, delay)) if verbosity < 1: print("- data:", sorted(src_data)) print("- metadata:", sorted(src_metadata)) else: print('data:') pretty_print(src_data, verbosity=verbosity - 1) if src_metadata: print('metadata:') pretty_print(src_metadata) print() return data, meta
[ "def", "print_one_train", "(", "client", ",", "verbosity", "=", "0", ")", ":", "ts_before", "=", "time", "(", ")", "data", ",", "meta", "=", "client", ".", "next", "(", ")", "ts_after", "=", "time", "(", ")", "if", "not", "data", ":", "print", "(", "\"Empty data\"", ")", "return", "train_id", "=", "list", "(", "meta", ".", "values", "(", ")", ")", "[", "0", "]", ".", "get", "(", "'timestamp.tid'", ",", "0", ")", "print", "(", "\"Train ID:\"", ",", "train_id", ",", "\"--------------------------\"", ")", "delta", "=", "ts_after", "-", "ts_before", "print", "(", "'Data from {} sources, REQ-REP took {:.2f} ms'", ".", "format", "(", "len", "(", "data", ")", ",", "delta", ")", ")", "print", "(", ")", "for", "i", ",", "(", "source", ",", "src_data", ")", "in", "enumerate", "(", "sorted", "(", "data", ".", "items", "(", ")", ")", ",", "start", "=", "1", ")", ":", "src_metadata", "=", "meta", ".", "get", "(", "source", ",", "{", "}", ")", "tid", "=", "src_metadata", ".", "get", "(", "'timestamp.tid'", ",", "0", ")", "print", "(", "\"Source {}: {!r} @ {}\"", ".", "format", "(", "i", ",", "source", ",", "tid", ")", ")", "try", ":", "ts", "=", "src_metadata", "[", "'timestamp'", "]", "except", "KeyError", ":", "print", "(", "\"No timestamp\"", ")", "else", ":", "dt", "=", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "localtime", "(", "ts", ")", ")", "delay", "=", "(", "ts_after", "-", "ts", ")", "*", "1000", "print", "(", "'timestamp: {} ({}) | delay: {:.2f} ms'", ".", "format", "(", "dt", ",", "ts", ",", "delay", ")", ")", "if", "verbosity", "<", "1", ":", "print", "(", "\"- data:\"", ",", "sorted", "(", "src_data", ")", ")", "print", "(", "\"- metadata:\"", ",", "sorted", "(", "src_metadata", ")", ")", "else", ":", "print", "(", "'data:'", ")", "pretty_print", "(", "src_data", ",", "verbosity", "=", "verbosity", "-", "1", ")", "if", "src_metadata", ":", "print", "(", "'metadata:'", ")", "pretty_print", "(", "src_metadata", ")", "print", "(", ")", "return", "data", ",", "meta" ]
Retrieve data for one train and print it. Returns the (data, metadata) dicts from the client. This is used by the -glimpse and -monitor command line tools.
[ "Retrieve", "data", "for", "one", "train", "and", "print", "it", "." ]
ca20d72b8beb0039649d10cb01d027db42efd91c
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/cli/glimpse.py#L92-L140
3,789
European-XFEL/karabo-bridge-py
karabo_bridge/cli/glimpse.py
pretty_print
def pretty_print(d, ind='', verbosity=0): """Pretty print a data dictionary from the bridge client """ assert isinstance(d, dict) for k, v in sorted(d.items()): str_base = '{} - [{}] {}'.format(ind, type(v).__name__, k) if isinstance(v, dict): print(str_base.replace('-', '+', 1)) pretty_print(v, ind=ind+' ', verbosity=verbosity) continue elif isinstance(v, np.ndarray): node = '{}, {}, {}'.format(str_base, v.dtype, v.shape) if verbosity >= 2: node += '\n{}'.format(v) elif isinstance(v, Sequence): if v and isinstance(v, (list, tuple)): itemtype = ' of ' + type(v[0]).__name__ pos = str_base.find(']') str_base = str_base[:pos] + itemtype + str_base[pos:] node = '{}, {}'.format(str_base, v) if verbosity < 1 and len(node) > 80: node = node[:77] + '...' else: node = '{}, {}'.format(str_base, v) print(node)
python
def pretty_print(d, ind='', verbosity=0): """Pretty print a data dictionary from the bridge client """ assert isinstance(d, dict) for k, v in sorted(d.items()): str_base = '{} - [{}] {}'.format(ind, type(v).__name__, k) if isinstance(v, dict): print(str_base.replace('-', '+', 1)) pretty_print(v, ind=ind+' ', verbosity=verbosity) continue elif isinstance(v, np.ndarray): node = '{}, {}, {}'.format(str_base, v.dtype, v.shape) if verbosity >= 2: node += '\n{}'.format(v) elif isinstance(v, Sequence): if v and isinstance(v, (list, tuple)): itemtype = ' of ' + type(v[0]).__name__ pos = str_base.find(']') str_base = str_base[:pos] + itemtype + str_base[pos:] node = '{}, {}'.format(str_base, v) if verbosity < 1 and len(node) > 80: node = node[:77] + '...' else: node = '{}, {}'.format(str_base, v) print(node)
[ "def", "pretty_print", "(", "d", ",", "ind", "=", "''", ",", "verbosity", "=", "0", ")", ":", "assert", "isinstance", "(", "d", ",", "dict", ")", "for", "k", ",", "v", "in", "sorted", "(", "d", ".", "items", "(", ")", ")", ":", "str_base", "=", "'{} - [{}] {}'", ".", "format", "(", "ind", ",", "type", "(", "v", ")", ".", "__name__", ",", "k", ")", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "print", "(", "str_base", ".", "replace", "(", "'-'", ",", "'+'", ",", "1", ")", ")", "pretty_print", "(", "v", ",", "ind", "=", "ind", "+", "' '", ",", "verbosity", "=", "verbosity", ")", "continue", "elif", "isinstance", "(", "v", ",", "np", ".", "ndarray", ")", ":", "node", "=", "'{}, {}, {}'", ".", "format", "(", "str_base", ",", "v", ".", "dtype", ",", "v", ".", "shape", ")", "if", "verbosity", ">=", "2", ":", "node", "+=", "'\\n{}'", ".", "format", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "Sequence", ")", ":", "if", "v", "and", "isinstance", "(", "v", ",", "(", "list", ",", "tuple", ")", ")", ":", "itemtype", "=", "' of '", "+", "type", "(", "v", "[", "0", "]", ")", ".", "__name__", "pos", "=", "str_base", ".", "find", "(", "']'", ")", "str_base", "=", "str_base", "[", ":", "pos", "]", "+", "itemtype", "+", "str_base", "[", "pos", ":", "]", "node", "=", "'{}, {}'", ".", "format", "(", "str_base", ",", "v", ")", "if", "verbosity", "<", "1", "and", "len", "(", "node", ")", ">", "80", ":", "node", "=", "node", "[", ":", "77", "]", "+", "'...'", "else", ":", "node", "=", "'{}, {}'", ".", "format", "(", "str_base", ",", "v", ")", "print", "(", "node", ")" ]
Pretty print a data dictionary from the bridge client
[ "Pretty", "print", "a", "data", "dictionary", "from", "the", "bridge", "client" ]
ca20d72b8beb0039649d10cb01d027db42efd91c
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/cli/glimpse.py#L143-L168
3,790
European-XFEL/karabo-bridge-py
karabo_bridge/simulation.py
start_gen
def start_gen(port, ser='msgpack', version='2.2', detector='AGIPD', raw=False, nsources=1, datagen='random', *, debug=True): """"Karabo bridge server simulation. Simulate a Karabo Bridge server and send random data from a detector, either AGIPD or LPD. Parameters ---------- port: str The port to on which the server is bound. ser: str, optional The serialization algorithm, default is msgpack. version: str, optional The container version of the serialized data. detector: str, optional The data format to send, default is AGIPD detector. raw: bool, optional Generate raw data output if True, else CORRECTED. Default is False. nsources: int, optional Number of sources. datagen: string, optional Generator function used to generate detector data. Default is random. """ context = zmq.Context() socket = context.socket(zmq.REP) socket.setsockopt(zmq.LINGER, 0) socket.bind('tcp://*:{}'.format(port)) if ser != 'msgpack': raise ValueError("Unknown serialisation format %s" % ser) serialize = partial(msgpack.dumps, use_bin_type=True) det = Detector.getDetector(detector, raw=raw, gen=datagen) generator = generate(det, nsources) print('Simulated Karabo-bridge server started on:\ntcp://{}:{}'.format( uname().nodename, port)) t_prev = time() n = 0 try: while True: msg = socket.recv() if msg == b'next': train = next(generator) msg = containize(train, ser, serialize, version) socket.send_multipart(msg, copy=False) if debug: print('Server : emitted train:', train[1][list(train[1].keys())[0]]['timestamp.tid']) n += 1 if n % TIMING_INTERVAL == 0: t_now = time() print('Sent {} trains in {:.2f} seconds ({:.2f} Hz)' ''.format(TIMING_INTERVAL, t_now - t_prev, TIMING_INTERVAL / (t_now - t_prev))) t_prev = t_now else: print('wrong request') break except KeyboardInterrupt: print('\nStopped.') finally: socket.close() context.destroy()
python
def start_gen(port, ser='msgpack', version='2.2', detector='AGIPD', raw=False, nsources=1, datagen='random', *, debug=True): """"Karabo bridge server simulation. Simulate a Karabo Bridge server and send random data from a detector, either AGIPD or LPD. Parameters ---------- port: str The port to on which the server is bound. ser: str, optional The serialization algorithm, default is msgpack. version: str, optional The container version of the serialized data. detector: str, optional The data format to send, default is AGIPD detector. raw: bool, optional Generate raw data output if True, else CORRECTED. Default is False. nsources: int, optional Number of sources. datagen: string, optional Generator function used to generate detector data. Default is random. """ context = zmq.Context() socket = context.socket(zmq.REP) socket.setsockopt(zmq.LINGER, 0) socket.bind('tcp://*:{}'.format(port)) if ser != 'msgpack': raise ValueError("Unknown serialisation format %s" % ser) serialize = partial(msgpack.dumps, use_bin_type=True) det = Detector.getDetector(detector, raw=raw, gen=datagen) generator = generate(det, nsources) print('Simulated Karabo-bridge server started on:\ntcp://{}:{}'.format( uname().nodename, port)) t_prev = time() n = 0 try: while True: msg = socket.recv() if msg == b'next': train = next(generator) msg = containize(train, ser, serialize, version) socket.send_multipart(msg, copy=False) if debug: print('Server : emitted train:', train[1][list(train[1].keys())[0]]['timestamp.tid']) n += 1 if n % TIMING_INTERVAL == 0: t_now = time() print('Sent {} trains in {:.2f} seconds ({:.2f} Hz)' ''.format(TIMING_INTERVAL, t_now - t_prev, TIMING_INTERVAL / (t_now - t_prev))) t_prev = t_now else: print('wrong request') break except KeyboardInterrupt: print('\nStopped.') finally: socket.close() context.destroy()
[ "def", "start_gen", "(", "port", ",", "ser", "=", "'msgpack'", ",", "version", "=", "'2.2'", ",", "detector", "=", "'AGIPD'", ",", "raw", "=", "False", ",", "nsources", "=", "1", ",", "datagen", "=", "'random'", ",", "*", ",", "debug", "=", "True", ")", ":", "context", "=", "zmq", ".", "Context", "(", ")", "socket", "=", "context", ".", "socket", "(", "zmq", ".", "REP", ")", "socket", ".", "setsockopt", "(", "zmq", ".", "LINGER", ",", "0", ")", "socket", ".", "bind", "(", "'tcp://*:{}'", ".", "format", "(", "port", ")", ")", "if", "ser", "!=", "'msgpack'", ":", "raise", "ValueError", "(", "\"Unknown serialisation format %s\"", "%", "ser", ")", "serialize", "=", "partial", "(", "msgpack", ".", "dumps", ",", "use_bin_type", "=", "True", ")", "det", "=", "Detector", ".", "getDetector", "(", "detector", ",", "raw", "=", "raw", ",", "gen", "=", "datagen", ")", "generator", "=", "generate", "(", "det", ",", "nsources", ")", "print", "(", "'Simulated Karabo-bridge server started on:\\ntcp://{}:{}'", ".", "format", "(", "uname", "(", ")", ".", "nodename", ",", "port", ")", ")", "t_prev", "=", "time", "(", ")", "n", "=", "0", "try", ":", "while", "True", ":", "msg", "=", "socket", ".", "recv", "(", ")", "if", "msg", "==", "b'next'", ":", "train", "=", "next", "(", "generator", ")", "msg", "=", "containize", "(", "train", ",", "ser", ",", "serialize", ",", "version", ")", "socket", ".", "send_multipart", "(", "msg", ",", "copy", "=", "False", ")", "if", "debug", ":", "print", "(", "'Server : emitted train:'", ",", "train", "[", "1", "]", "[", "list", "(", "train", "[", "1", "]", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "[", "'timestamp.tid'", "]", ")", "n", "+=", "1", "if", "n", "%", "TIMING_INTERVAL", "==", "0", ":", "t_now", "=", "time", "(", ")", "print", "(", "'Sent {} trains in {:.2f} seconds ({:.2f} Hz)'", "''", ".", "format", "(", "TIMING_INTERVAL", ",", "t_now", "-", "t_prev", ",", "TIMING_INTERVAL", "/", "(", "t_now", "-", "t_prev", ")", ")", ")", "t_prev", "=", "t_now", "else", ":", "print", "(", "'wrong request'", ")", "break", "except", "KeyboardInterrupt", ":", "print", "(", "'\\nStopped.'", ")", "finally", ":", "socket", ".", "close", "(", ")", "context", ".", "destroy", "(", ")" ]
Karabo bridge server simulation. Simulate a Karabo Bridge server and send random data from a detector, either AGIPD or LPD. Parameters ---------- port: str The port to on which the server is bound. ser: str, optional The serialization algorithm, default is msgpack. version: str, optional The container version of the serialized data. detector: str, optional The data format to send, default is AGIPD detector. raw: bool, optional Generate raw data output if True, else CORRECTED. Default is False. nsources: int, optional Number of sources. datagen: string, optional Generator function used to generate detector data. Default is random.
[ "Karabo", "bridge", "server", "simulation", "." ]
ca20d72b8beb0039649d10cb01d027db42efd91c
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/simulation.py#L262-L328
3,791
European-XFEL/karabo-bridge-py
karabo_bridge/client.py
Client.next
def next(self): """Request next data container. This function call is blocking. Returns ------- data : dict The data for this train, keyed by source name. meta : dict The metadata for this train, keyed by source name. This dictionary is populated for protocol version 1.0 and 2.2. For other protocol versions, metadata information is available in `data` dict. Raises ------ TimeoutError If timeout is reached before receiving data. """ if self._pattern == zmq.REQ and not self._recv_ready: self._socket.send(b'next') self._recv_ready = True try: msg = self._socket.recv_multipart(copy=False) except zmq.error.Again: raise TimeoutError( 'No data received from {} in the last {} ms'.format( self._socket.getsockopt_string(zmq.LAST_ENDPOINT), self._socket.getsockopt(zmq.RCVTIMEO))) self._recv_ready = False return self._deserialize(msg)
python
def next(self): """Request next data container. This function call is blocking. Returns ------- data : dict The data for this train, keyed by source name. meta : dict The metadata for this train, keyed by source name. This dictionary is populated for protocol version 1.0 and 2.2. For other protocol versions, metadata information is available in `data` dict. Raises ------ TimeoutError If timeout is reached before receiving data. """ if self._pattern == zmq.REQ and not self._recv_ready: self._socket.send(b'next') self._recv_ready = True try: msg = self._socket.recv_multipart(copy=False) except zmq.error.Again: raise TimeoutError( 'No data received from {} in the last {} ms'.format( self._socket.getsockopt_string(zmq.LAST_ENDPOINT), self._socket.getsockopt(zmq.RCVTIMEO))) self._recv_ready = False return self._deserialize(msg)
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_pattern", "==", "zmq", ".", "REQ", "and", "not", "self", ".", "_recv_ready", ":", "self", ".", "_socket", ".", "send", "(", "b'next'", ")", "self", ".", "_recv_ready", "=", "True", "try", ":", "msg", "=", "self", ".", "_socket", ".", "recv_multipart", "(", "copy", "=", "False", ")", "except", "zmq", ".", "error", ".", "Again", ":", "raise", "TimeoutError", "(", "'No data received from {} in the last {} ms'", ".", "format", "(", "self", ".", "_socket", ".", "getsockopt_string", "(", "zmq", ".", "LAST_ENDPOINT", ")", ",", "self", ".", "_socket", ".", "getsockopt", "(", "zmq", ".", "RCVTIMEO", ")", ")", ")", "self", ".", "_recv_ready", "=", "False", "return", "self", ".", "_deserialize", "(", "msg", ")" ]
Request next data container. This function call is blocking. Returns ------- data : dict The data for this train, keyed by source name. meta : dict The metadata for this train, keyed by source name. This dictionary is populated for protocol version 1.0 and 2.2. For other protocol versions, metadata information is available in `data` dict. Raises ------ TimeoutError If timeout is reached before receiving data.
[ "Request", "next", "data", "container", "." ]
ca20d72b8beb0039649d10cb01d027db42efd91c
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/client.py#L90-L122
3,792
materialsvirtuallab/monty
monty/io.py
zopen
def zopen(filename, *args, **kwargs): """ This function wraps around the bz2, gzip and standard python's open function to deal intelligently with bzipped, gzipped or standard text files. Args: filename (str/Path): filename or pathlib.Path. \*args: Standard args for python open(..). E.g., 'r' for read, 'w' for write. \*\*kwargs: Standard kwargs for python open(..). Returns: File-like object. Supports with context. """ if Path is not None and isinstance(filename, Path): filename = str(filename) name, ext = os.path.splitext(filename) ext = ext.upper() if ext == ".BZ2": if PY_VERSION[0] >= 3: return bz2.open(filename, *args, **kwargs) else: args = list(args) if len(args) > 0: args[0] = "".join([c for c in args[0] if c != "t"]) if "mode" in kwargs: kwargs["mode"] = "".join([c for c in kwargs["mode"] if c != "t"]) return bz2.BZ2File(filename, *args, **kwargs) elif ext in (".GZ", ".Z"): return gzip.open(filename, *args, **kwargs) else: return io.open(filename, *args, **kwargs)
python
def zopen(filename, *args, **kwargs): """ This function wraps around the bz2, gzip and standard python's open function to deal intelligently with bzipped, gzipped or standard text files. Args: filename (str/Path): filename or pathlib.Path. \*args: Standard args for python open(..). E.g., 'r' for read, 'w' for write. \*\*kwargs: Standard kwargs for python open(..). Returns: File-like object. Supports with context. """ if Path is not None and isinstance(filename, Path): filename = str(filename) name, ext = os.path.splitext(filename) ext = ext.upper() if ext == ".BZ2": if PY_VERSION[0] >= 3: return bz2.open(filename, *args, **kwargs) else: args = list(args) if len(args) > 0: args[0] = "".join([c for c in args[0] if c != "t"]) if "mode" in kwargs: kwargs["mode"] = "".join([c for c in kwargs["mode"] if c != "t"]) return bz2.BZ2File(filename, *args, **kwargs) elif ext in (".GZ", ".Z"): return gzip.open(filename, *args, **kwargs) else: return io.open(filename, *args, **kwargs)
[ "def", "zopen", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "Path", "is", "not", "None", "and", "isinstance", "(", "filename", ",", "Path", ")", ":", "filename", "=", "str", "(", "filename", ")", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "ext", "=", "ext", ".", "upper", "(", ")", "if", "ext", "==", "\".BZ2\"", ":", "if", "PY_VERSION", "[", "0", "]", ">=", "3", ":", "return", "bz2", ".", "open", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "args", "=", "list", "(", "args", ")", "if", "len", "(", "args", ")", ">", "0", ":", "args", "[", "0", "]", "=", "\"\"", ".", "join", "(", "[", "c", "for", "c", "in", "args", "[", "0", "]", "if", "c", "!=", "\"t\"", "]", ")", "if", "\"mode\"", "in", "kwargs", ":", "kwargs", "[", "\"mode\"", "]", "=", "\"\"", ".", "join", "(", "[", "c", "for", "c", "in", "kwargs", "[", "\"mode\"", "]", "if", "c", "!=", "\"t\"", "]", ")", "return", "bz2", ".", "BZ2File", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "ext", "in", "(", "\".GZ\"", ",", "\".Z\"", ")", ":", "return", "gzip", ".", "open", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "io", ".", "open", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
This function wraps around the bz2, gzip and standard python's open function to deal intelligently with bzipped, gzipped or standard text files. Args: filename (str/Path): filename or pathlib.Path. \*args: Standard args for python open(..). E.g., 'r' for read, 'w' for write. \*\*kwargs: Standard kwargs for python open(..). Returns: File-like object. Supports with context.
[ "This", "function", "wraps", "around", "the", "bz2", "gzip", "and", "standard", "python", "s", "open", "function", "to", "deal", "intelligently", "with", "bzipped", "gzipped", "or", "standard", "text", "files", "." ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/io.py#L38-L72
3,793
materialsvirtuallab/monty
monty/io.py
reverse_readline
def reverse_readline(m_file, blk_size=4096, max_mem=4000000): """ Generator method to read a file line-by-line, but backwards. This allows one to efficiently get data at the end of a file. Based on code by Peter Astrand <[email protected]>, using modifications by Raymond Hettinger and Kevin German. http://code.activestate.com/recipes/439045-read-a-text-file-backwards -yet-another-implementat/ Reads file forwards and reverses in memory for files smaller than the max_mem parameter, or for gzip files where reverse seeks are not supported. Files larger than max_mem are dynamically read backwards. Args: m_file (File): File stream to read (backwards) blk_size (int): The buffer size. Defaults to 4096. max_mem (int): The maximum amount of memory to involve in this operation. This is used to determine when to reverse a file in-memory versus seeking portions of a file. For bz2 files, this sets the maximum block size. Returns: Generator that returns lines from the file. Similar behavior to the file.readline() method, except the lines are returned from the back of the file. """ # Check if the file stream is a bit stream or not is_text = isinstance(m_file, io.TextIOWrapper) try: file_size = os.path.getsize(m_file.name) except AttributeError: # Bz2 files do not have name attribute. Just set file_size to above # max_mem for now. file_size = max_mem + 1 # If the file size is within our desired RAM use, just reverse it in memory # GZip files must use this method because there is no way to negative seek if file_size < max_mem or isinstance(m_file, gzip.GzipFile): for line in reversed(m_file.readlines()): yield line.rstrip() else: if isinstance(m_file, bz2.BZ2File): # for bz2 files, seeks are expensive. It is therefore in our best # interest to maximize the blk_size within limits of desired RAM # use. blk_size = min(max_mem, file_size) buf = "" m_file.seek(0, 2) if is_text: lastchar = m_file.read(1) else: lastchar = m_file.read(1).decode("utf-8") trailing_newline = (lastchar == "\n") while 1: newline_pos = buf.rfind("\n") pos = m_file.tell() if newline_pos != -1: # Found a newline line = buf[newline_pos + 1:] buf = buf[:newline_pos] if pos or newline_pos or trailing_newline: line += "\n" yield line elif pos: # Need to fill buffer toread = min(blk_size, pos) m_file.seek(pos - toread, 0) if is_text: buf = m_file.read(toread) + buf else: buf = m_file.read(toread).decode("utf-8") + buf m_file.seek(pos - toread, 0) if pos == toread: buf = "\n" + buf else: # Start-of-file return
python
def reverse_readline(m_file, blk_size=4096, max_mem=4000000): """ Generator method to read a file line-by-line, but backwards. This allows one to efficiently get data at the end of a file. Based on code by Peter Astrand <[email protected]>, using modifications by Raymond Hettinger and Kevin German. http://code.activestate.com/recipes/439045-read-a-text-file-backwards -yet-another-implementat/ Reads file forwards and reverses in memory for files smaller than the max_mem parameter, or for gzip files where reverse seeks are not supported. Files larger than max_mem are dynamically read backwards. Args: m_file (File): File stream to read (backwards) blk_size (int): The buffer size. Defaults to 4096. max_mem (int): The maximum amount of memory to involve in this operation. This is used to determine when to reverse a file in-memory versus seeking portions of a file. For bz2 files, this sets the maximum block size. Returns: Generator that returns lines from the file. Similar behavior to the file.readline() method, except the lines are returned from the back of the file. """ # Check if the file stream is a bit stream or not is_text = isinstance(m_file, io.TextIOWrapper) try: file_size = os.path.getsize(m_file.name) except AttributeError: # Bz2 files do not have name attribute. Just set file_size to above # max_mem for now. file_size = max_mem + 1 # If the file size is within our desired RAM use, just reverse it in memory # GZip files must use this method because there is no way to negative seek if file_size < max_mem or isinstance(m_file, gzip.GzipFile): for line in reversed(m_file.readlines()): yield line.rstrip() else: if isinstance(m_file, bz2.BZ2File): # for bz2 files, seeks are expensive. It is therefore in our best # interest to maximize the blk_size within limits of desired RAM # use. blk_size = min(max_mem, file_size) buf = "" m_file.seek(0, 2) if is_text: lastchar = m_file.read(1) else: lastchar = m_file.read(1).decode("utf-8") trailing_newline = (lastchar == "\n") while 1: newline_pos = buf.rfind("\n") pos = m_file.tell() if newline_pos != -1: # Found a newline line = buf[newline_pos + 1:] buf = buf[:newline_pos] if pos or newline_pos or trailing_newline: line += "\n" yield line elif pos: # Need to fill buffer toread = min(blk_size, pos) m_file.seek(pos - toread, 0) if is_text: buf = m_file.read(toread) + buf else: buf = m_file.read(toread).decode("utf-8") + buf m_file.seek(pos - toread, 0) if pos == toread: buf = "\n" + buf else: # Start-of-file return
[ "def", "reverse_readline", "(", "m_file", ",", "blk_size", "=", "4096", ",", "max_mem", "=", "4000000", ")", ":", "# Check if the file stream is a bit stream or not", "is_text", "=", "isinstance", "(", "m_file", ",", "io", ".", "TextIOWrapper", ")", "try", ":", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "m_file", ".", "name", ")", "except", "AttributeError", ":", "# Bz2 files do not have name attribute. Just set file_size to above", "# max_mem for now.", "file_size", "=", "max_mem", "+", "1", "# If the file size is within our desired RAM use, just reverse it in memory", "# GZip files must use this method because there is no way to negative seek", "if", "file_size", "<", "max_mem", "or", "isinstance", "(", "m_file", ",", "gzip", ".", "GzipFile", ")", ":", "for", "line", "in", "reversed", "(", "m_file", ".", "readlines", "(", ")", ")", ":", "yield", "line", ".", "rstrip", "(", ")", "else", ":", "if", "isinstance", "(", "m_file", ",", "bz2", ".", "BZ2File", ")", ":", "# for bz2 files, seeks are expensive. It is therefore in our best", "# interest to maximize the blk_size within limits of desired RAM", "# use.", "blk_size", "=", "min", "(", "max_mem", ",", "file_size", ")", "buf", "=", "\"\"", "m_file", ".", "seek", "(", "0", ",", "2", ")", "if", "is_text", ":", "lastchar", "=", "m_file", ".", "read", "(", "1", ")", "else", ":", "lastchar", "=", "m_file", ".", "read", "(", "1", ")", ".", "decode", "(", "\"utf-8\"", ")", "trailing_newline", "=", "(", "lastchar", "==", "\"\\n\"", ")", "while", "1", ":", "newline_pos", "=", "buf", ".", "rfind", "(", "\"\\n\"", ")", "pos", "=", "m_file", ".", "tell", "(", ")", "if", "newline_pos", "!=", "-", "1", ":", "# Found a newline", "line", "=", "buf", "[", "newline_pos", "+", "1", ":", "]", "buf", "=", "buf", "[", ":", "newline_pos", "]", "if", "pos", "or", "newline_pos", "or", "trailing_newline", ":", "line", "+=", "\"\\n\"", "yield", "line", "elif", "pos", ":", "# Need to fill buffer", "toread", "=", "min", "(", "blk_size", ",", "pos", ")", "m_file", ".", "seek", "(", "pos", "-", "toread", ",", "0", ")", "if", "is_text", ":", "buf", "=", "m_file", ".", "read", "(", "toread", ")", "+", "buf", "else", ":", "buf", "=", "m_file", ".", "read", "(", "toread", ")", ".", "decode", "(", "\"utf-8\"", ")", "+", "buf", "m_file", ".", "seek", "(", "pos", "-", "toread", ",", "0", ")", "if", "pos", "==", "toread", ":", "buf", "=", "\"\\n\"", "+", "buf", "else", ":", "# Start-of-file", "return" ]
Generator method to read a file line-by-line, but backwards. This allows one to efficiently get data at the end of a file. Based on code by Peter Astrand <[email protected]>, using modifications by Raymond Hettinger and Kevin German. http://code.activestate.com/recipes/439045-read-a-text-file-backwards -yet-another-implementat/ Reads file forwards and reverses in memory for files smaller than the max_mem parameter, or for gzip files where reverse seeks are not supported. Files larger than max_mem are dynamically read backwards. Args: m_file (File): File stream to read (backwards) blk_size (int): The buffer size. Defaults to 4096. max_mem (int): The maximum amount of memory to involve in this operation. This is used to determine when to reverse a file in-memory versus seeking portions of a file. For bz2 files, this sets the maximum block size. Returns: Generator that returns lines from the file. Similar behavior to the file.readline() method, except the lines are returned from the back of the file.
[ "Generator", "method", "to", "read", "a", "file", "line", "-", "by", "-", "line", "but", "backwards", ".", "This", "allows", "one", "to", "efficiently", "get", "data", "at", "the", "end", "of", "a", "file", "." ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/io.py#L105-L187
3,794
materialsvirtuallab/monty
monty/io.py
get_open_fds
def get_open_fds(): """ Return the number of open file descriptors for current process .. warning: will only work on UNIX-like OS-es. """ pid = os.getpid() procs = subprocess.check_output(["lsof", '-w', '-Ff', "-p", str(pid)]) procs = procs.decode("utf-8") return len([s for s in procs.split('\n') if s and s[0] == 'f' and s[1:].isdigit()])
python
def get_open_fds(): """ Return the number of open file descriptors for current process .. warning: will only work on UNIX-like OS-es. """ pid = os.getpid() procs = subprocess.check_output(["lsof", '-w', '-Ff', "-p", str(pid)]) procs = procs.decode("utf-8") return len([s for s in procs.split('\n') if s and s[0] == 'f' and s[1:].isdigit()])
[ "def", "get_open_fds", "(", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "procs", "=", "subprocess", ".", "check_output", "(", "[", "\"lsof\"", ",", "'-w'", ",", "'-Ff'", ",", "\"-p\"", ",", "str", "(", "pid", ")", "]", ")", "procs", "=", "procs", ".", "decode", "(", "\"utf-8\"", ")", "return", "len", "(", "[", "s", "for", "s", "in", "procs", ".", "split", "(", "'\\n'", ")", "if", "s", "and", "s", "[", "0", "]", "==", "'f'", "and", "s", "[", "1", ":", "]", ".", "isdigit", "(", ")", "]", ")" ]
Return the number of open file descriptors for current process .. warning: will only work on UNIX-like OS-es.
[ "Return", "the", "number", "of", "open", "file", "descriptors", "for", "current", "process" ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/io.py#L287-L298
3,795
materialsvirtuallab/monty
monty/io.py
FileLock.acquire
def acquire(self): """ Acquire the lock, if possible. If the lock is in use, it check again every `delay` seconds. It does this until it either gets the lock or exceeds `timeout` number of seconds, in which case it throws an exception. """ start_time = time.time() while True: try: self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) break except (OSError,) as e: if e.errno != errno.EEXIST: raise if (time.time() - start_time) >= self.timeout: raise FileLockException("%s: Timeout occured." % self.lockfile) time.sleep(self.delay) self.is_locked = True
python
def acquire(self): """ Acquire the lock, if possible. If the lock is in use, it check again every `delay` seconds. It does this until it either gets the lock or exceeds `timeout` number of seconds, in which case it throws an exception. """ start_time = time.time() while True: try: self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) break except (OSError,) as e: if e.errno != errno.EEXIST: raise if (time.time() - start_time) >= self.timeout: raise FileLockException("%s: Timeout occured." % self.lockfile) time.sleep(self.delay) self.is_locked = True
[ "def", "acquire", "(", "self", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "while", "True", ":", "try", ":", "self", ".", "fd", "=", "os", ".", "open", "(", "self", ".", "lockfile", ",", "os", ".", "O_CREAT", "|", "os", ".", "O_EXCL", "|", "os", ".", "O_RDWR", ")", "break", "except", "(", "OSError", ",", ")", "as", "e", ":", "if", "e", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "if", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ">=", "self", ".", "timeout", ":", "raise", "FileLockException", "(", "\"%s: Timeout occured.\"", "%", "self", ".", "lockfile", ")", "time", ".", "sleep", "(", "self", ".", "delay", ")", "self", ".", "is_locked", "=", "True" ]
Acquire the lock, if possible. If the lock is in use, it check again every `delay` seconds. It does this until it either gets the lock or exceeds `timeout` number of seconds, in which case it throws an exception.
[ "Acquire", "the", "lock", "if", "possible", ".", "If", "the", "lock", "is", "in", "use", "it", "check", "again", "every", "delay", "seconds", ".", "It", "does", "this", "until", "it", "either", "gets", "the", "lock", "or", "exceeds", "timeout", "number", "of", "seconds", "in", "which", "case", "it", "throws", "an", "exception", "." ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/io.py#L229-L250
3,796
materialsvirtuallab/monty
monty/io.py
FileLock.release
def release(self): """ Get rid of the lock by deleting the lockfile. When working in a `with` statement, this gets automatically called at the end. """ if self.is_locked: os.close(self.fd) os.unlink(self.lockfile) self.is_locked = False
python
def release(self): """ Get rid of the lock by deleting the lockfile. When working in a `with` statement, this gets automatically called at the end. """ if self.is_locked: os.close(self.fd) os.unlink(self.lockfile) self.is_locked = False
[ "def", "release", "(", "self", ")", ":", "if", "self", ".", "is_locked", ":", "os", ".", "close", "(", "self", ".", "fd", ")", "os", ".", "unlink", "(", "self", ".", "lockfile", ")", "self", ".", "is_locked", "=", "False" ]
Get rid of the lock by deleting the lockfile. When working in a `with` statement, this gets automatically called at the end.
[ "Get", "rid", "of", "the", "lock", "by", "deleting", "the", "lockfile", ".", "When", "working", "in", "a", "with", "statement", "this", "gets", "automatically", "called", "at", "the", "end", "." ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/io.py#L252-L260
3,797
materialsvirtuallab/monty
monty/fnmatch.py
WildCard.filter
def filter(self, names): """ Returns a list with the names matching the pattern. """ names = list_strings(names) fnames = [] for f in names: for pat in self.pats: if fnmatch.fnmatch(f, pat): fnames.append(f) return fnames
python
def filter(self, names): """ Returns a list with the names matching the pattern. """ names = list_strings(names) fnames = [] for f in names: for pat in self.pats: if fnmatch.fnmatch(f, pat): fnames.append(f) return fnames
[ "def", "filter", "(", "self", ",", "names", ")", ":", "names", "=", "list_strings", "(", "names", ")", "fnames", "=", "[", "]", "for", "f", "in", "names", ":", "for", "pat", "in", "self", ".", "pats", ":", "if", "fnmatch", ".", "fnmatch", "(", "f", ",", "pat", ")", ":", "fnames", ".", "append", "(", "f", ")", "return", "fnames" ]
Returns a list with the names matching the pattern.
[ "Returns", "a", "list", "with", "the", "names", "matching", "the", "pattern", "." ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/fnmatch.py#L41-L53
3,798
materialsvirtuallab/monty
monty/fnmatch.py
WildCard.match
def match(self, name): """ Returns True if name matches one of the patterns. """ for pat in self.pats: if fnmatch.fnmatch(name, pat): return True return False
python
def match(self, name): """ Returns True if name matches one of the patterns. """ for pat in self.pats: if fnmatch.fnmatch(name, pat): return True return False
[ "def", "match", "(", "self", ",", "name", ")", ":", "for", "pat", "in", "self", ".", "pats", ":", "if", "fnmatch", ".", "fnmatch", "(", "name", ",", "pat", ")", ":", "return", "True", "return", "False" ]
Returns True if name matches one of the patterns.
[ "Returns", "True", "if", "name", "matches", "one", "of", "the", "patterns", "." ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/fnmatch.py#L55-L63
3,799
materialsvirtuallab/monty
monty/dev.py
deprecated
def deprecated(replacement=None, message=None): """ Decorator to mark classes or functions as deprecated, with a possible replacement. Args: replacement (callable): A replacement class or method. message (str): A warning message to be displayed. Returns: Original function, but with a warning to use the updated class. """ def wrap(old): def wrapped(*args, **kwargs): msg = "%s is deprecated" % old.__name__ if replacement is not None: if isinstance(replacement, property): r = replacement.fget elif isinstance(replacement, (classmethod, staticmethod)): r = replacement.__func__ else: r = replacement msg += "; use %s in %s instead." % (r.__name__, r.__module__) if message is not None: msg += "\n" + message warnings.simplefilter('default') warnings.warn(msg, DeprecationWarning, stacklevel=2) return old(*args, **kwargs) return wrapped return wrap
python
def deprecated(replacement=None, message=None): """ Decorator to mark classes or functions as deprecated, with a possible replacement. Args: replacement (callable): A replacement class or method. message (str): A warning message to be displayed. Returns: Original function, but with a warning to use the updated class. """ def wrap(old): def wrapped(*args, **kwargs): msg = "%s is deprecated" % old.__name__ if replacement is not None: if isinstance(replacement, property): r = replacement.fget elif isinstance(replacement, (classmethod, staticmethod)): r = replacement.__func__ else: r = replacement msg += "; use %s in %s instead." % (r.__name__, r.__module__) if message is not None: msg += "\n" + message warnings.simplefilter('default') warnings.warn(msg, DeprecationWarning, stacklevel=2) return old(*args, **kwargs) return wrapped return wrap
[ "def", "deprecated", "(", "replacement", "=", "None", ",", "message", "=", "None", ")", ":", "def", "wrap", "(", "old", ")", ":", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "msg", "=", "\"%s is deprecated\"", "%", "old", ".", "__name__", "if", "replacement", "is", "not", "None", ":", "if", "isinstance", "(", "replacement", ",", "property", ")", ":", "r", "=", "replacement", ".", "fget", "elif", "isinstance", "(", "replacement", ",", "(", "classmethod", ",", "staticmethod", ")", ")", ":", "r", "=", "replacement", ".", "__func__", "else", ":", "r", "=", "replacement", "msg", "+=", "\"; use %s in %s instead.\"", "%", "(", "r", ".", "__name__", ",", "r", ".", "__module__", ")", "if", "message", "is", "not", "None", ":", "msg", "+=", "\"\\n\"", "+", "message", "warnings", ".", "simplefilter", "(", "'default'", ")", "warnings", ".", "warn", "(", "msg", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "old", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped", "return", "wrap" ]
Decorator to mark classes or functions as deprecated, with a possible replacement. Args: replacement (callable): A replacement class or method. message (str): A warning message to be displayed. Returns: Original function, but with a warning to use the updated class.
[ "Decorator", "to", "mark", "classes", "or", "functions", "as", "deprecated", "with", "a", "possible", "replacement", "." ]
d99d6f3c68372d83489d28ff515566c93cd569e2
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/dev.py#L26-L58