repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
39
1.84M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
spry-group/python-vultr
vultr/v1_firewall.py
VultrFirewall.group_list
def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
python
def group_list(self, params=None): params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
[ "def", "group_list", "(", "self", ",", "params", "=", "None", ")", ":", "params", "=", "params", "if", "params", "else", "dict", "(", ")", "return", "self", ".", "request", "(", "'/v1/firewall/group_list'", ",", "params", ",", "'GET'", ")" ]
/v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list
[ "/", "v1", "/", "firewall", "/", "group_list", "GET", "-", "account", "List", "all", "firewall", "groups", "on", "the", "current", "account", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_firewall.py#L10-L18
spry-group/python-vultr
vultr/v1_account.py
VultrAccount.info
def info(self, params=None): ''' /v1/account/info GET - account Retrieve information about the current account Link: https://www.vultr.com/api/#account_info ''' params = params if params else dict() return self.request('/v1/account/info', params, 'GET')
python
def info(self, params=None): params = params if params else dict() return self.request('/v1/account/info', params, 'GET')
[ "def", "info", "(", "self", ",", "params", "=", "None", ")", ":", "params", "=", "params", "if", "params", "else", "dict", "(", ")", "return", "self", ".", "request", "(", "'/v1/account/info'", ",", "params", ",", "'GET'", ")" ]
/v1/account/info GET - account Retrieve information about the current account Link: https://www.vultr.com/api/#account_info
[ "/", "v1", "/", "account", "/", "info", "GET", "-", "account", "Retrieve", "information", "about", "the", "current", "account" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_account.py#L10-L18
spry-group/python-vultr
examples/basic_list.py
dump_info
def dump_info(): '''Shows various details about the account & servers''' vultr = Vultr(API_KEY) try: logging.info('Listing account info:\n%s', dumps( vultr.account.info(), indent=2 )) logging.info('Listing apps:\n%s', dumps( vultr.app.list(), indent=2 )) logging.info('Listing backups:\n%s', dumps( vultr.backup.list(), indent=2 )) logging.info('Listing DNS:\n%s', dumps( vultr.dns.list(), indent=2 )) logging.info('Listing ISOs:\n%s', dumps( vultr.iso.list(), indent=2 )) logging.info('Listing OSs:\n%s', dumps( vultr.os.list(), indent=2 )) logging.info('Listing plans:\n%s', dumps( vultr.plans.list(), indent=2 )) logging.info('Listing regions:\n%s', dumps( vultr.regions.list(), indent=2 )) logging.info('Listing servers:\n%s', dumps( vultr.server.list(), indent=2 )) logging.info('Listing snapshots:\n%s', dumps( vultr.snapshot.list(), indent=2 )) logging.info('Listing SSH keys:\n%s', dumps( vultr.sshkey.list(), indent=2 )) logging.info('Listing startup scripts:\n%s', dumps( vultr.startupscript.list(), indent=2 )) except VultrError as ex: logging.error('VultrError: %s', ex)
python
def dump_info(): vultr = Vultr(API_KEY) try: logging.info('Listing account info:\n%s', dumps( vultr.account.info(), indent=2 )) logging.info('Listing apps:\n%s', dumps( vultr.app.list(), indent=2 )) logging.info('Listing backups:\n%s', dumps( vultr.backup.list(), indent=2 )) logging.info('Listing DNS:\n%s', dumps( vultr.dns.list(), indent=2 )) logging.info('Listing ISOs:\n%s', dumps( vultr.iso.list(), indent=2 )) logging.info('Listing OSs:\n%s', dumps( vultr.os.list(), indent=2 )) logging.info('Listing plans:\n%s', dumps( vultr.plans.list(), indent=2 )) logging.info('Listing regions:\n%s', dumps( vultr.regions.list(), indent=2 )) logging.info('Listing servers:\n%s', dumps( vultr.server.list(), indent=2 )) logging.info('Listing snapshots:\n%s', dumps( vultr.snapshot.list(), indent=2 )) logging.info('Listing SSH keys:\n%s', dumps( vultr.sshkey.list(), indent=2 )) logging.info('Listing startup scripts:\n%s', dumps( vultr.startupscript.list(), indent=2 )) except VultrError as ex: logging.error('VultrError: %s', ex)
[ "def", "dump_info", "(", ")", ":", "vultr", "=", "Vultr", "(", "API_KEY", ")", "try", ":", "logging", ".", "info", "(", "'Listing account info:\\n%s'", ",", "dumps", "(", "vultr", ".", "account", ".", "info", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing apps:\\n%s'", ",", "dumps", "(", "vultr", ".", "app", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing backups:\\n%s'", ",", "dumps", "(", "vultr", ".", "backup", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing DNS:\\n%s'", ",", "dumps", "(", "vultr", ".", "dns", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing ISOs:\\n%s'", ",", "dumps", "(", "vultr", ".", "iso", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing OSs:\\n%s'", ",", "dumps", "(", "vultr", ".", "os", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing plans:\\n%s'", ",", "dumps", "(", "vultr", ".", "plans", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing regions:\\n%s'", ",", "dumps", "(", "vultr", ".", "regions", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing servers:\\n%s'", ",", "dumps", "(", "vultr", ".", "server", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing snapshots:\\n%s'", ",", "dumps", "(", "vultr", ".", "snapshot", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing SSH keys:\\n%s'", ",", "dumps", "(", "vultr", ".", "sshkey", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "logging", ".", "info", "(", "'Listing startup scripts:\\n%s'", ",", "dumps", "(", "vultr", ".", "startupscript", ".", "list", "(", ")", ",", "indent", "=", "2", ")", ")", "except", "VultrError", "as", "ex", ":", "logging", ".", "error", "(", "'VultrError: %s'", ",", "ex", ")" ]
Shows various details about the account & servers
[ "Shows", "various", "details", "about", "the", "account", "&", "servers" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/examples/basic_list.py#L19-L72
spry-group/python-vultr
vultr/v1_regions.py
VultrRegions.availability
def availability(self, dcid, params=None): ''' /v1/regions/availability GET - public Retrieve a list of the VPSPLANIDs currently available in this location. If your account has special plans available, you will need to pass your api_key in in order to see them. For all other accounts, the API key is not optional. Link: https://www.vultr.com/api/#regions_region_available ''' params = update_params(params, {'DCID': dcid}) return self.request('/v1/regions/availability', params, 'GET')
python
def availability(self, dcid, params=None): params = update_params(params, {'DCID': dcid}) return self.request('/v1/regions/availability', params, 'GET')
[ "def", "availability", "(", "self", ",", "dcid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'DCID'", ":", "dcid", "}", ")", "return", "self", ".", "request", "(", "'/v1/regions/availability'", ",", "params", ",", "'GET'", ")" ]
/v1/regions/availability GET - public Retrieve a list of the VPSPLANIDs currently available in this location. If your account has special plans available, you will need to pass your api_key in in order to see them. For all other accounts, the API key is not optional. Link: https://www.vultr.com/api/#regions_region_available
[ "/", "v1", "/", "regions", "/", "availability", "GET", "-", "public", "Retrieve", "a", "list", "of", "the", "VPSPLANIDs", "currently", "available", "in", "this", "location", ".", "If", "your", "account", "has", "special", "plans", "available", "you", "will", "need", "to", "pass", "your", "api_key", "in", "in", "order", "to", "see", "them", ".", "For", "all", "other", "accounts", "the", "API", "key", "is", "not", "optional", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_regions.py#L10-L21
spry-group/python-vultr
vultr/utils.py
update_params
def update_params(params, updates): '''Merges updates into params''' params = params.copy() if isinstance(params, dict) else dict() params.update(updates) return params
python
def update_params(params, updates): params = params.copy() if isinstance(params, dict) else dict() params.update(updates) return params
[ "def", "update_params", "(", "params", ",", "updates", ")", ":", "params", "=", "params", ".", "copy", "(", ")", "if", "isinstance", "(", "params", ",", "dict", ")", "else", "dict", "(", ")", "params", ".", "update", "(", "updates", ")", "return", "params" ]
Merges updates into params
[ "Merges", "updates", "into", "params" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L94-L98
spry-group/python-vultr
vultr/utils.py
VultrBase.set_requests_per_second
def set_requests_per_second(self, req_per_second): '''Adjusts the request/second at run-time''' self.req_per_second = req_per_second self.req_duration = 1 / self.req_per_second
python
def set_requests_per_second(self, req_per_second): self.req_per_second = req_per_second self.req_duration = 1 / self.req_per_second
[ "def", "set_requests_per_second", "(", "self", ",", "req_per_second", ")", ":", "self", ".", "req_per_second", "=", "req_per_second", "self", ".", "req_duration", "=", "1", "/", "self", ".", "req_per_second" ]
Adjusts the request/second at run-time
[ "Adjusts", "the", "request", "/", "second", "at", "run", "-", "time" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L21-L24
spry-group/python-vultr
vultr/utils.py
VultrBase._request_get_helper
def _request_get_helper(self, url, params=None): '''API GET request helper''' if not isinstance(params, dict): params = dict() if self.api_key: params['api_key'] = self.api_key return requests.get(url, params=params, timeout=60)
python
def _request_get_helper(self, url, params=None): if not isinstance(params, dict): params = dict() if self.api_key: params['api_key'] = self.api_key return requests.get(url, params=params, timeout=60)
[ "def", "_request_get_helper", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "if", "not", "isinstance", "(", "params", ",", "dict", ")", ":", "params", "=", "dict", "(", ")", "if", "self", ".", "api_key", ":", "params", "[", "'api_key'", "]", "=", "self", ".", "api_key", "return", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "timeout", "=", "60", ")" ]
API GET request helper
[ "API", "GET", "request", "helper" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L26-L33
spry-group/python-vultr
vultr/utils.py
VultrBase._request_post_helper
def _request_post_helper(self, url, params=None): '''API POST helper''' if self.api_key: query = {'api_key': self.api_key} return requests.post(url, params=query, data=params, timeout=60)
python
def _request_post_helper(self, url, params=None): if self.api_key: query = {'api_key': self.api_key} return requests.post(url, params=query, data=params, timeout=60)
[ "def", "_request_post_helper", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "if", "self", ".", "api_key", ":", "query", "=", "{", "'api_key'", ":", "self", ".", "api_key", "}", "return", "requests", ".", "post", "(", "url", ",", "params", "=", "query", ",", "data", "=", "params", ",", "timeout", "=", "60", ")" ]
API POST helper
[ "API", "POST", "helper" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L35-L39
spry-group/python-vultr
vultr/utils.py
VultrBase._request_helper
def _request_helper(self, url, params, method): '''API request helper method''' try: if method == 'POST': return self._request_post_helper(url, params) elif method == 'GET': return self._request_get_helper(url, params) raise VultrError('Unsupported method %s' % method) except requests.RequestException as ex: raise RuntimeError(ex)
python
def _request_helper(self, url, params, method): try: if method == 'POST': return self._request_post_helper(url, params) elif method == 'GET': return self._request_get_helper(url, params) raise VultrError('Unsupported method %s' % method) except requests.RequestException as ex: raise RuntimeError(ex)
[ "def", "_request_helper", "(", "self", ",", "url", ",", "params", ",", "method", ")", ":", "try", ":", "if", "method", "==", "'POST'", ":", "return", "self", ".", "_request_post_helper", "(", "url", ",", "params", ")", "elif", "method", "==", "'GET'", ":", "return", "self", ".", "_request_get_helper", "(", "url", ",", "params", ")", "raise", "VultrError", "(", "'Unsupported method %s'", "%", "method", ")", "except", "requests", ".", "RequestException", "as", "ex", ":", "raise", "RuntimeError", "(", "ex", ")" ]
API request helper method
[ "API", "request", "helper", "method" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L41-L50
spry-group/python-vultr
vultr/utils.py
VultrBase.request
def request(self, path, params=None, method='GET'): '''API request / call method''' _start = time.time() if not path.startswith('/'): path = '/' + path resp = self._request_helper(self.api_endpoint + path, params, method) if resp.status_code != 200: if resp.status_code == 400: raise VultrError('Invalid API location. Check the URL that' + ' you are using') elif resp.status_code == 403: raise VultrError('Invalid or missing API key. Check that' + ' your API key is present and matches' + ' your assigned key') elif resp.status_code == 405: raise VultrError('Invalid HTTP method. Check that the' + ' method (POST|GET) matches what the' + ' documentation indicates') elif resp.status_code == 412: raise VultrError('Request failed. Check the response body ' + 'for a more detailed description. Body: \n' + resp.text) elif resp.status_code == 500: raise VultrError('Internal server error. Try again at a' + ' later time') elif resp.status_code == 503: raise VultrError('Rate limit hit. API requests are limited' + ' to an average of 1/s. Try your request' + ' again later.') # very simplistic synchronous rate limiting; _elapsed = time.time() - _start if _elapsed < self.req_duration: time.sleep(self.req_duration - _elapsed) # return an empty json object if the API doesn't respond with a value. return resp.json() if resp.text else json_module.loads('{}')
python
def request(self, path, params=None, method='GET'): _start = time.time() if not path.startswith('/'): path = '/' + path resp = self._request_helper(self.api_endpoint + path, params, method) if resp.status_code != 200: if resp.status_code == 400: raise VultrError('Invalid API location. Check the URL that' + ' you are using') elif resp.status_code == 403: raise VultrError('Invalid or missing API key. Check that' + ' your API key is present and matches' + ' your assigned key') elif resp.status_code == 405: raise VultrError('Invalid HTTP method. Check that the' + ' method (POST|GET) matches what the' + ' documentation indicates') elif resp.status_code == 412: raise VultrError('Request failed. Check the response body ' + 'for a more detailed description. Body: \n' + resp.text) elif resp.status_code == 500: raise VultrError('Internal server error. Try again at a' + ' later time') elif resp.status_code == 503: raise VultrError('Rate limit hit. API requests are limited' + ' to an average of 1/s. Try your request' + ' again later.') _elapsed = time.time() - _start if _elapsed < self.req_duration: time.sleep(self.req_duration - _elapsed) return resp.json() if resp.text else json_module.loads('{}')
[ "def", "request", "(", "self", ",", "path", ",", "params", "=", "None", ",", "method", "=", "'GET'", ")", ":", "_start", "=", "time", ".", "time", "(", ")", "if", "not", "path", ".", "startswith", "(", "'/'", ")", ":", "path", "=", "'/'", "+", "path", "resp", "=", "self", ".", "_request_helper", "(", "self", ".", "api_endpoint", "+", "path", ",", "params", ",", "method", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "if", "resp", ".", "status_code", "==", "400", ":", "raise", "VultrError", "(", "'Invalid API location. Check the URL that'", "+", "' you are using'", ")", "elif", "resp", ".", "status_code", "==", "403", ":", "raise", "VultrError", "(", "'Invalid or missing API key. Check that'", "+", "' your API key is present and matches'", "+", "' your assigned key'", ")", "elif", "resp", ".", "status_code", "==", "405", ":", "raise", "VultrError", "(", "'Invalid HTTP method. Check that the'", "+", "' method (POST|GET) matches what the'", "+", "' documentation indicates'", ")", "elif", "resp", ".", "status_code", "==", "412", ":", "raise", "VultrError", "(", "'Request failed. Check the response body '", "+", "'for a more detailed description. Body: \\n'", "+", "resp", ".", "text", ")", "elif", "resp", ".", "status_code", "==", "500", ":", "raise", "VultrError", "(", "'Internal server error. Try again at a'", "+", "' later time'", ")", "elif", "resp", ".", "status_code", "==", "503", ":", "raise", "VultrError", "(", "'Rate limit hit. API requests are limited'", "+", "' to an average of 1/s. Try your request'", "+", "' again later.'", ")", "# very simplistic synchronous rate limiting;", "_elapsed", "=", "time", ".", "time", "(", ")", "-", "_start", "if", "_elapsed", "<", "self", ".", "req_duration", ":", "time", ".", "sleep", "(", "self", ".", "req_duration", "-", "_elapsed", ")", "# return an empty json object if the API doesn't respond with a value.", "return", "resp", ".", "json", "(", ")", "if", "resp", ".", "text", "else", "json_module", ".", "loads", "(", "'{}'", ")" ]
API request / call method
[ "API", "request", "/", "call", "method" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L52-L91
spry-group/python-vultr
vultr/v1_snapshot.py
VultrSnapshot.create
def create(self, subid, params=None): ''' /v1/snapshot/create POST - account Create a snapshot from an existing virtual machine. The virtual machine does not need to be stopped. Link: https://www.vultr.com/api/#snapshot_create ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/snapshot/create', params, 'POST')
python
def create(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/snapshot/create', params, 'POST')
[ "def", "create", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/snapshot/create'", ",", "params", ",", "'POST'", ")" ]
/v1/snapshot/create POST - account Create a snapshot from an existing virtual machine. The virtual machine does not need to be stopped. Link: https://www.vultr.com/api/#snapshot_create
[ "/", "v1", "/", "snapshot", "/", "create", "POST", "-", "account", "Create", "a", "snapshot", "from", "an", "existing", "virtual", "machine", ".", "The", "virtual", "machine", "does", "not", "need", "to", "be", "stopped", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_snapshot.py#L10-L19
spry-group/python-vultr
vultr/v1_snapshot.py
VultrSnapshot.destroy
def destroy(self, snapshotid, params=None): ''' /v1/snapshot/destroy POST - account Destroy (delete) a snapshot. There is no going back from this call. Link: https://www.vultr.com/api/#snapshot_destroy ''' params = update_params(params, {'SNAPSHOTID': snapshotid}) return self.request('/v1/snapshot/destroy', params, 'POST')
python
def destroy(self, snapshotid, params=None): params = update_params(params, {'SNAPSHOTID': snapshotid}) return self.request('/v1/snapshot/destroy', params, 'POST')
[ "def", "destroy", "(", "self", ",", "snapshotid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SNAPSHOTID'", ":", "snapshotid", "}", ")", "return", "self", ".", "request", "(", "'/v1/snapshot/destroy'", ",", "params", ",", "'POST'", ")" ]
/v1/snapshot/destroy POST - account Destroy (delete) a snapshot. There is no going back from this call. Link: https://www.vultr.com/api/#snapshot_destroy
[ "/", "v1", "/", "snapshot", "/", "destroy", "POST", "-", "account", "Destroy", "(", "delete", ")", "a", "snapshot", ".", "There", "is", "no", "going", "back", "from", "this", "call", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_snapshot.py#L21-L30
spry-group/python-vultr
vultr/v1_server_ipv4.py
VultrServerIPv4.destroy
def destroy(self, subid, ipaddr, params=None): ''' /v1/server/destroy_ipv4 POST - account Removes a secondary IPv4 address from a server. Your server will be hard-restarted. We suggest halting the machine gracefully before removing IPs. Link: https://www.vultr.com/api/#server_destroy_ipv4 ''' params = update_params(params, { 'SUBID': subid, 'ip': ipaddr }) return self.request('/v1/server/destroy_ipv4', params, 'POST')
python
def destroy(self, subid, ipaddr, params=None): params = update_params(params, { 'SUBID': subid, 'ip': ipaddr }) return self.request('/v1/server/destroy_ipv4', params, 'POST')
[ "def", "destroy", "(", "self", ",", "subid", ",", "ipaddr", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'ip'", ":", "ipaddr", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/destroy_ipv4'", ",", "params", ",", "'POST'", ")" ]
/v1/server/destroy_ipv4 POST - account Removes a secondary IPv4 address from a server. Your server will be hard-restarted. We suggest halting the machine gracefully before removing IPs. Link: https://www.vultr.com/api/#server_destroy_ipv4
[ "/", "v1", "/", "server", "/", "destroy_ipv4", "POST", "-", "account", "Removes", "a", "secondary", "IPv4", "address", "from", "a", "server", ".", "Your", "server", "will", "be", "hard", "-", "restarted", ".", "We", "suggest", "halting", "the", "machine", "gracefully", "before", "removing", "IPs", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv4.py#L23-L36
spry-group/python-vultr
vultr/v1_server_ipv4.py
VultrServerIPv4.list
def list(self, subid, params=None): ''' /v1/server/list_ipv4 GET - account List the IPv4 information of a virtual machine. IP information is only available for virtual machines in the "active" state. Link: https://www.vultr.com/api/#server_list_ipv4 ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/list_ipv4', params, 'GET')
python
def list(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/list_ipv4', params, 'GET')
[ "def", "list", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/list_ipv4'", ",", "params", ",", "'GET'", ")" ]
/v1/server/list_ipv4 GET - account List the IPv4 information of a virtual machine. IP information is only available for virtual machines in the "active" state. Link: https://www.vultr.com/api/#server_list_ipv4
[ "/", "v1", "/", "server", "/", "list_ipv4", "GET", "-", "account", "List", "the", "IPv4", "information", "of", "a", "virtual", "machine", ".", "IP", "information", "is", "only", "available", "for", "virtual", "machines", "in", "the", "active", "state", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv4.py#L38-L47
spry-group/python-vultr
vultr/v1_server_ipv4.py
VultrServerIPv4.reverse_default
def reverse_default(self, subid, ipaddr, params=None): ''' /v1/server/reverse_default_ipv4 POST - account Set a reverse DNS entry for an IPv4 address of a virtual machine to the original setting. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_default_ipv4 ''' params = update_params(params, { 'SUBID': subid, 'ip': ipaddr }) return self.request('/v1/server/reverse_default_ipv4', params, 'POST')
python
def reverse_default(self, subid, ipaddr, params=None): params = update_params(params, { 'SUBID': subid, 'ip': ipaddr }) return self.request('/v1/server/reverse_default_ipv4', params, 'POST')
[ "def", "reverse_default", "(", "self", ",", "subid", ",", "ipaddr", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'ip'", ":", "ipaddr", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/reverse_default_ipv4'", ",", "params", ",", "'POST'", ")" ]
/v1/server/reverse_default_ipv4 POST - account Set a reverse DNS entry for an IPv4 address of a virtual machine to the original setting. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_default_ipv4
[ "/", "v1", "/", "server", "/", "reverse_default_ipv4", "POST", "-", "account", "Set", "a", "reverse", "DNS", "entry", "for", "an", "IPv4", "address", "of", "a", "virtual", "machine", "to", "the", "original", "setting", ".", "Upon", "success", "DNS", "changes", "may", "take", "6", "-", "12", "hours", "to", "become", "active", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv4.py#L49-L62
spry-group/python-vultr
vultr/v1_server_ipv4.py
VultrServerIPv4.reverse_set
def reverse_set(self, subid, ipaddr, entry, params=None): ''' /v1/server/reverse_set_ipv4 POST - account Set a reverse DNS entry for an IPv4 address of a virtual machine. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_set_ipv4 ''' params = update_params(params, { 'SUBID': subid, 'ip': ipaddr, 'entry': entry }) return self.request('/v1/server/reverse_set_ipv4', params, 'POST')
python
def reverse_set(self, subid, ipaddr, entry, params=None): params = update_params(params, { 'SUBID': subid, 'ip': ipaddr, 'entry': entry }) return self.request('/v1/server/reverse_set_ipv4', params, 'POST')
[ "def", "reverse_set", "(", "self", ",", "subid", ",", "ipaddr", ",", "entry", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'ip'", ":", "ipaddr", ",", "'entry'", ":", "entry", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/reverse_set_ipv4'", ",", "params", ",", "'POST'", ")" ]
/v1/server/reverse_set_ipv4 POST - account Set a reverse DNS entry for an IPv4 address of a virtual machine. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_set_ipv4
[ "/", "v1", "/", "server", "/", "reverse_set_ipv4", "POST", "-", "account", "Set", "a", "reverse", "DNS", "entry", "for", "an", "IPv4", "address", "of", "a", "virtual", "machine", ".", "Upon", "success", "DNS", "changes", "may", "take", "6", "-", "12", "hours", "to", "become", "active", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv4.py#L64-L77
spry-group/python-vultr
vultr/v1_server.py
VultrServer.bandwidth
def bandwidth(self, subid, params=None): ''' /v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/bandwidth', params, 'GET')
python
def bandwidth(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/bandwidth', params, 'GET')
[ "def", "bandwidth", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/bandwidth'", ",", "params", ",", "'GET'", ")" ]
/v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth
[ "/", "v1", "/", "server", "/", "bandwidth", "GET", "-", "account", "Get", "the", "bandwidth", "used", "by", "a", "virtual", "machine" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L14-L22
spry-group/python-vultr
vultr/v1_server.py
VultrServer.create
def create(self, dcid, vpsplanid, osid, params=None): ''' /v1/server/create POST - account Create a new virtual machine. You will start being billed for this immediately. The response only contains the SUBID for the new machine. You should use v1/server/list to poll and wait for the machine to be created (as this does not happen instantly). Link: https://www.vultr.com/api/#server_create ''' params = update_params(params, { 'DCID': dcid, 'VPSPLANID': vpsplanid, 'OSID': osid }) return self.request('/v1/server/create', params, 'POST')
python
def create(self, dcid, vpsplanid, osid, params=None): params = update_params(params, { 'DCID': dcid, 'VPSPLANID': vpsplanid, 'OSID': osid }) return self.request('/v1/server/create', params, 'POST')
[ "def", "create", "(", "self", ",", "dcid", ",", "vpsplanid", ",", "osid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'DCID'", ":", "dcid", ",", "'VPSPLANID'", ":", "vpsplanid", ",", "'OSID'", ":", "osid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/create'", ",", "params", ",", "'POST'", ")" ]
/v1/server/create POST - account Create a new virtual machine. You will start being billed for this immediately. The response only contains the SUBID for the new machine. You should use v1/server/list to poll and wait for the machine to be created (as this does not happen instantly). Link: https://www.vultr.com/api/#server_create
[ "/", "v1", "/", "server", "/", "create", "POST", "-", "account", "Create", "a", "new", "virtual", "machine", ".", "You", "will", "start", "being", "billed", "for", "this", "immediately", ".", "The", "response", "only", "contains", "the", "SUBID", "for", "the", "new", "machine", ".", "You", "should", "use", "v1", "/", "server", "/", "list", "to", "poll", "and", "wait", "for", "the", "machine", "to", "be", "created", "(", "as", "this", "does", "not", "happen", "instantly", ")", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L24-L39
spry-group/python-vultr
vultr/v1_server.py
VultrServer.get_user_data
def get_user_data(self, subid, params=None): ''' /v1/server/get_user_data GET - account Retrieves the (base64 encoded) user-data for this subscription. Link: https://www.vultr.com/api/#server_get_user_data ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/get_user_data', params, 'GET')
python
def get_user_data(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/get_user_data', params, 'GET')
[ "def", "get_user_data", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/get_user_data'", ",", "params", ",", "'GET'", ")" ]
/v1/server/get_user_data GET - account Retrieves the (base64 encoded) user-data for this subscription. Link: https://www.vultr.com/api/#server_get_user_data
[ "/", "v1", "/", "server", "/", "get_user_data", "GET", "-", "account", "Retrieves", "the", "(", "base64", "encoded", ")", "user", "-", "data", "for", "this", "subscription", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L53-L61
spry-group/python-vultr
vultr/v1_server.py
VultrServer.halt
def halt(self, subid, params=None): ''' /v1/server/halt POST - account Halt a virtual machine. This is a hard power off (basically, unplugging the machine). The data on the machine will not be modified, and you will still be billed for the machine. To completely delete a machine, see v1/server/destroy Link: https://www.vultr.com/api/#server_halt ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/halt', params, 'POST')
python
def halt(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/halt', params, 'POST')
[ "def", "halt", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/halt'", ",", "params", ",", "'POST'", ")" ]
/v1/server/halt POST - account Halt a virtual machine. This is a hard power off (basically, unplugging the machine). The data on the machine will not be modified, and you will still be billed for the machine. To completely delete a machine, see v1/server/destroy Link: https://www.vultr.com/api/#server_halt
[ "/", "v1", "/", "server", "/", "halt", "POST", "-", "account", "Halt", "a", "virtual", "machine", ".", "This", "is", "a", "hard", "power", "off", "(", "basically", "unplugging", "the", "machine", ")", ".", "The", "data", "on", "the", "machine", "will", "not", "be", "modified", "and", "you", "will", "still", "be", "billed", "for", "the", "machine", ".", "To", "completely", "delete", "a", "machine", "see", "v1", "/", "server", "/", "destroy" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L63-L74
spry-group/python-vultr
vultr/v1_server.py
VultrServer.label_set
def label_set(self, subid, label, params=None): ''' /v1/server/label_set POST - account Set the label of a virtual machine. Link: https://www.vultr.com/api/#server_label_set ''' params = update_params(params, { 'SUBID': subid, 'label': label }) return self.request('/v1/server/label_set', params, 'POST')
python
def label_set(self, subid, label, params=None): params = update_params(params, { 'SUBID': subid, 'label': label }) return self.request('/v1/server/label_set', params, 'POST')
[ "def", "label_set", "(", "self", ",", "subid", ",", "label", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'label'", ":", "label", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/label_set'", ",", "params", ",", "'POST'", ")" ]
/v1/server/label_set POST - account Set the label of a virtual machine. Link: https://www.vultr.com/api/#server_label_set
[ "/", "v1", "/", "server", "/", "label_set", "POST", "-", "account", "Set", "the", "label", "of", "a", "virtual", "machine", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L76-L87
spry-group/python-vultr
vultr/v1_server.py
VultrServer.neighbors
def neighbors(self, subid, params=None): ''' v1/server/neighbors GET - account Determine what other subscriptions are hosted on the same physical host as a given subscription. Link: https://www.vultr.com/api/#server_neighbors ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/neighbors', params, 'GET')
python
def neighbors(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/neighbors', params, 'GET')
[ "def", "neighbors", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/neighbors'", ",", "params", ",", "'GET'", ")" ]
v1/server/neighbors GET - account Determine what other subscriptions are hosted on the same physical host as a given subscription. Link: https://www.vultr.com/api/#server_neighbors
[ "v1", "/", "server", "/", "neighbors", "GET", "-", "account", "Determine", "what", "other", "subscriptions", "are", "hosted", "on", "the", "same", "physical", "host", "as", "a", "given", "subscription", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L107-L116
spry-group/python-vultr
vultr/v1_server.py
VultrServer.os_change
def os_change(self, subid, osid, params=None): ''' /v1/server/os_change POST - account Changes the operating system of a virtual machine. All data will be permanently lost. Link: https://www.vultr.com/api/#server_os_change ''' params = update_params(params, { 'SUBID': subid, 'OSID': osid }) return self.request('/v1/server/os_change', params, 'POST')
python
def os_change(self, subid, osid, params=None): params = update_params(params, { 'SUBID': subid, 'OSID': osid }) return self.request('/v1/server/os_change', params, 'POST')
[ "def", "os_change", "(", "self", ",", "subid", ",", "osid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'OSID'", ":", "osid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/os_change'", ",", "params", ",", "'POST'", ")" ]
/v1/server/os_change POST - account Changes the operating system of a virtual machine. All data will be permanently lost. Link: https://www.vultr.com/api/#server_os_change
[ "/", "v1", "/", "server", "/", "os_change", "POST", "-", "account", "Changes", "the", "operating", "system", "of", "a", "virtual", "machine", ".", "All", "data", "will", "be", "permanently", "lost", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L118-L130
spry-group/python-vultr
vultr/v1_server.py
VultrServer.os_change_list
def os_change_list(self, subid, params=None): ''' /v1/server/os_change_list GET - account Retrieves a list of operating systems to which this server can be changed. Link: https://www.vultr.com/api/#server_os_change_list ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/os_change_list', params, 'GET')
python
def os_change_list(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/os_change_list', params, 'GET')
[ "def", "os_change_list", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/os_change_list'", ",", "params", ",", "'GET'", ")" ]
/v1/server/os_change_list GET - account Retrieves a list of operating systems to which this server can be changed. Link: https://www.vultr.com/api/#server_os_change_list
[ "/", "v1", "/", "server", "/", "os_change_list", "GET", "-", "account", "Retrieves", "a", "list", "of", "operating", "systems", "to", "which", "this", "server", "can", "be", "changed", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L132-L141
spry-group/python-vultr
vultr/v1_server.py
VultrServer.reboot
def reboot(self, subid, params=None): ''' /v1/server/reboot POST - account Reboot a virtual machine. This is a hard reboot (basically, unplugging the machine). Link: https://www.vultr.com/api/#server_reboot ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/reboot', params, 'POST')
python
def reboot(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/reboot', params, 'POST')
[ "def", "reboot", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/reboot'", ",", "params", ",", "'POST'", ")" ]
/v1/server/reboot POST - account Reboot a virtual machine. This is a hard reboot (basically, unplugging the machine). Link: https://www.vultr.com/api/#server_reboot
[ "/", "v1", "/", "server", "/", "reboot", "POST", "-", "account", "Reboot", "a", "virtual", "machine", ".", "This", "is", "a", "hard", "reboot", "(", "basically", "unplugging", "the", "machine", ")", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L143-L152
spry-group/python-vultr
vultr/v1_server.py
VultrServer.reinstall
def reinstall(self, subid, params=None): ''' /v1/server/reinstall POST - account Reinstall the operating system on a virtual machine. All data will be permanently lost, but the IP address will remain the same There is no going back from this call. Link: https://www.vultr.com/api/#server_reinstall ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/reinstall', params, 'POST')
python
def reinstall(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/reinstall', params, 'POST')
[ "def", "reinstall", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/reinstall'", ",", "params", ",", "'POST'", ")" ]
/v1/server/reinstall POST - account Reinstall the operating system on a virtual machine. All data will be permanently lost, but the IP address will remain the same There is no going back from this call. Link: https://www.vultr.com/api/#server_reinstall
[ "/", "v1", "/", "server", "/", "reinstall", "POST", "-", "account", "Reinstall", "the", "operating", "system", "on", "a", "virtual", "machine", ".", "All", "data", "will", "be", "permanently", "lost", "but", "the", "IP", "address", "will", "remain", "the", "same", "There", "is", "no", "going", "back", "from", "this", "call", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L154-L164
spry-group/python-vultr
vultr/v1_server.py
VultrServer.restore_backup
def restore_backup(self, subid, backupid, params=None): ''' /v1/server/restore_backup POST - account Restore the specified backup to the virtual machine. Any data already on the virtual machine will be lost. Link: https://www.vultr.com/api/#server_restore_backup ''' params = update_params(params, { 'SUBID': subid, 'BACKUPID': backupid }) return self.request('/v1/server/restore_backup', params, 'POST')
python
def restore_backup(self, subid, backupid, params=None): params = update_params(params, { 'SUBID': subid, 'BACKUPID': backupid }) return self.request('/v1/server/restore_backup', params, 'POST')
[ "def", "restore_backup", "(", "self", ",", "subid", ",", "backupid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'BACKUPID'", ":", "backupid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/restore_backup'", ",", "params", ",", "'POST'", ")" ]
/v1/server/restore_backup POST - account Restore the specified backup to the virtual machine. Any data already on the virtual machine will be lost. Link: https://www.vultr.com/api/#server_restore_backup
[ "/", "v1", "/", "server", "/", "restore_backup", "POST", "-", "account", "Restore", "the", "specified", "backup", "to", "the", "virtual", "machine", ".", "Any", "data", "already", "on", "the", "virtual", "machine", "will", "be", "lost", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L166-L178
spry-group/python-vultr
vultr/v1_server.py
VultrServer.restore_snapshot
def restore_snapshot(self, subid, snapshotid, params=None): ''' /v1/server/restore_snapshot POST - account Restore the specificed snapshot to the virtual machine. Any data already on the virtual machine will be lost. Link: https://www.vultr.com/api/#server_restore_snapshot ''' params = update_params(params, { 'SUBID': subid, 'SNAPSHOTID': snapshotid }) return self.request('/v1/server/restore_snapshot', params, 'POST')
python
def restore_snapshot(self, subid, snapshotid, params=None): params = update_params(params, { 'SUBID': subid, 'SNAPSHOTID': snapshotid }) return self.request('/v1/server/restore_snapshot', params, 'POST')
[ "def", "restore_snapshot", "(", "self", ",", "subid", ",", "snapshotid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'SNAPSHOTID'", ":", "snapshotid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/restore_snapshot'", ",", "params", ",", "'POST'", ")" ]
/v1/server/restore_snapshot POST - account Restore the specificed snapshot to the virtual machine. Any data already on the virtual machine will be lost. Link: https://www.vultr.com/api/#server_restore_snapshot
[ "/", "v1", "/", "server", "/", "restore_snapshot", "POST", "-", "account", "Restore", "the", "specificed", "snapshot", "to", "the", "virtual", "machine", ".", "Any", "data", "already", "on", "the", "virtual", "machine", "will", "be", "lost", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L180-L192
spry-group/python-vultr
vultr/v1_server.py
VultrServer.set_user_data
def set_user_data(self, subid, userdata, params=None): ''' /v1/server/set_user_data POST - account Sets the cloud-init user-data (base64) for this subscription. Note that user-data is not supported on every operating system, and is generally only provided on instance startup. Link: https://www.vultr.com/api/#server_set_user_data ''' params = update_params(params, { 'SUBID': subid, 'userdata': userdata }) return self.request('/v1/server/set_user_data', params, 'POST')
python
def set_user_data(self, subid, userdata, params=None): params = update_params(params, { 'SUBID': subid, 'userdata': userdata }) return self.request('/v1/server/set_user_data', params, 'POST')
[ "def", "set_user_data", "(", "self", ",", "subid", ",", "userdata", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'userdata'", ":", "userdata", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/set_user_data'", ",", "params", ",", "'POST'", ")" ]
/v1/server/set_user_data POST - account Sets the cloud-init user-data (base64) for this subscription. Note that user-data is not supported on every operating system, and is generally only provided on instance startup. Link: https://www.vultr.com/api/#server_set_user_data
[ "/", "v1", "/", "server", "/", "set_user_data", "POST", "-", "account", "Sets", "the", "cloud", "-", "init", "user", "-", "data", "(", "base64", ")", "for", "this", "subscription", ".", "Note", "that", "user", "-", "data", "is", "not", "supported", "on", "every", "operating", "system", "and", "is", "generally", "only", "provided", "on", "instance", "startup", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L194-L207
spry-group/python-vultr
vultr/v1_server.py
VultrServer.start
def start(self, subid, params=None): ''' /v1/server/start POST - account Start a virtual machine. If the machine is already running, it will be restarted. Link: https://www.vultr.com/api/#server_start ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/start', params, 'POST')
python
def start(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/start', params, 'POST')
[ "def", "start", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/start'", ",", "params", ",", "'POST'", ")" ]
/v1/server/start POST - account Start a virtual machine. If the machine is already running, it will be restarted. Link: https://www.vultr.com/api/#server_start
[ "/", "v1", "/", "server", "/", "start", "POST", "-", "account", "Start", "a", "virtual", "machine", ".", "If", "the", "machine", "is", "already", "running", "it", "will", "be", "restarted", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L209-L218
spry-group/python-vultr
vultr/v1_server.py
VultrServer.upgrade_plan
def upgrade_plan(self, subid, vpsplanid, params=None): ''' /v1/server/upgrade_plan POST - account Upgrade the plan of a virtual machine. The virtual machine will be rebooted upon a successful upgrade. Link: https://www.vultr.com/api/#server_upgrade_plan ''' params = update_params(params, { 'SUBID': subid, 'VPSPLANID': vpsplanid }) return self.request('/v1/server/upgrade_plan', params, 'POST')
python
def upgrade_plan(self, subid, vpsplanid, params=None): params = update_params(params, { 'SUBID': subid, 'VPSPLANID': vpsplanid }) return self.request('/v1/server/upgrade_plan', params, 'POST')
[ "def", "upgrade_plan", "(", "self", ",", "subid", ",", "vpsplanid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'VPSPLANID'", ":", "vpsplanid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/upgrade_plan'", ",", "params", ",", "'POST'", ")" ]
/v1/server/upgrade_plan POST - account Upgrade the plan of a virtual machine. The virtual machine will be rebooted upon a successful upgrade. Link: https://www.vultr.com/api/#server_upgrade_plan
[ "/", "v1", "/", "server", "/", "upgrade_plan", "POST", "-", "account", "Upgrade", "the", "plan", "of", "a", "virtual", "machine", ".", "The", "virtual", "machine", "will", "be", "rebooted", "upon", "a", "successful", "upgrade", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L220-L232
spry-group/python-vultr
vultr/v1_server.py
VultrServer.upgrade_plan_list
def upgrade_plan_list(self, subid, params=None): ''' /v1/server/upgrade_plan_list GET - account Retrieve a list of the VPSPLANIDs for which a virtual machine can be upgraded. An empty response array means that there are currently no upgrades available. Link: https://www.vultr.com/api/#server_upgrade_plan_list ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/upgrade_plan_list', params, 'GET')
python
def upgrade_plan_list(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/upgrade_plan_list', params, 'GET')
[ "def", "upgrade_plan_list", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/upgrade_plan_list'", ",", "params", ",", "'GET'", ")" ]
/v1/server/upgrade_plan_list GET - account Retrieve a list of the VPSPLANIDs for which a virtual machine can be upgraded. An empty response array means that there are currently no upgrades available. Link: https://www.vultr.com/api/#server_upgrade_plan_list
[ "/", "v1", "/", "server", "/", "upgrade_plan_list", "GET", "-", "account", "Retrieve", "a", "list", "of", "the", "VPSPLANIDs", "for", "which", "a", "virtual", "machine", "can", "be", "upgraded", ".", "An", "empty", "response", "array", "means", "that", "there", "are", "currently", "no", "upgrades", "available", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L234-L244
spry-group/python-vultr
vultr/v1_sshkey.py
VultrSSHKey.create
def create(self, name, ssh_key, params=None): ''' /v1/sshkey/create POST - account Create a new SSH Key Link: https://www.vultr.com/api/#sshkey_create ''' params = update_params(params, { 'name': name, 'ssh_key': ssh_key }) return self.request('/v1/sshkey/create', params, 'POST')
python
def create(self, name, ssh_key, params=None): params = update_params(params, { 'name': name, 'ssh_key': ssh_key }) return self.request('/v1/sshkey/create', params, 'POST')
[ "def", "create", "(", "self", ",", "name", ",", "ssh_key", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'name'", ":", "name", ",", "'ssh_key'", ":", "ssh_key", "}", ")", "return", "self", ".", "request", "(", "'/v1/sshkey/create'", ",", "params", ",", "'POST'", ")" ]
/v1/sshkey/create POST - account Create a new SSH Key Link: https://www.vultr.com/api/#sshkey_create
[ "/", "v1", "/", "sshkey", "/", "create", "POST", "-", "account", "Create", "a", "new", "SSH", "Key" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_sshkey.py#L10-L21
spry-group/python-vultr
vultr/v1_sshkey.py
VultrSSHKey.destroy
def destroy(self, sshkeyid, params=None): ''' /v1/sshkey/destroy POST - account Remove a SSH key. Note that this will not remove the key from any machines that already have it. Link: https://www.vultr.com/api/#sshkey_destroy ''' params = update_params(params, {'SSHKEYID': sshkeyid}) return self.request('/v1/sshkey/destroy', params, 'POST')
python
def destroy(self, sshkeyid, params=None): params = update_params(params, {'SSHKEYID': sshkeyid}) return self.request('/v1/sshkey/destroy', params, 'POST')
[ "def", "destroy", "(", "self", ",", "sshkeyid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SSHKEYID'", ":", "sshkeyid", "}", ")", "return", "self", ".", "request", "(", "'/v1/sshkey/destroy'", ",", "params", ",", "'POST'", ")" ]
/v1/sshkey/destroy POST - account Remove a SSH key. Note that this will not remove the key from any machines that already have it. Link: https://www.vultr.com/api/#sshkey_destroy
[ "/", "v1", "/", "sshkey", "/", "destroy", "POST", "-", "account", "Remove", "a", "SSH", "key", ".", "Note", "that", "this", "will", "not", "remove", "the", "key", "from", "any", "machines", "that", "already", "have", "it", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_sshkey.py#L23-L32
spry-group/python-vultr
vultr/v1_sshkey.py
VultrSSHKey.list
def list(self, params=None): ''' /v1/sshkey/list GET - account List all the SSH keys on the current account Link: https://www.vultr.com/api/#sshkey_list ''' params = params if params else dict() return self.request('/v1/sshkey/list', params, 'GET')
python
def list(self, params=None): params = params if params else dict() return self.request('/v1/sshkey/list', params, 'GET')
[ "def", "list", "(", "self", ",", "params", "=", "None", ")", ":", "params", "=", "params", "if", "params", "else", "dict", "(", ")", "return", "self", ".", "request", "(", "'/v1/sshkey/list'", ",", "params", ",", "'GET'", ")" ]
/v1/sshkey/list GET - account List all the SSH keys on the current account Link: https://www.vultr.com/api/#sshkey_list
[ "/", "v1", "/", "sshkey", "/", "list", "GET", "-", "account", "List", "all", "the", "SSH", "keys", "on", "the", "current", "account" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_sshkey.py#L34-L42
spry-group/python-vultr
vultr/v1_sshkey.py
VultrSSHKey.update
def update(self, sshkeyid, params=None): ''' /v1/sshkey/update POST - account Update an existing SSH Key. Note that this will only update newly installed machines. The key will not be updated on any existing machines. Link: https://www.vultr.com/api/#sshkey_update ''' params = update_params(params, {'SSHKEYID': sshkeyid}) return self.request('/v1/sshkey/update', params, 'POST')
python
def update(self, sshkeyid, params=None): params = update_params(params, {'SSHKEYID': sshkeyid}) return self.request('/v1/sshkey/update', params, 'POST')
[ "def", "update", "(", "self", ",", "sshkeyid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SSHKEYID'", ":", "sshkeyid", "}", ")", "return", "self", ".", "request", "(", "'/v1/sshkey/update'", ",", "params", ",", "'POST'", ")" ]
/v1/sshkey/update POST - account Update an existing SSH Key. Note that this will only update newly installed machines. The key will not be updated on any existing machines. Link: https://www.vultr.com/api/#sshkey_update
[ "/", "v1", "/", "sshkey", "/", "update", "POST", "-", "account", "Update", "an", "existing", "SSH", "Key", ".", "Note", "that", "this", "will", "only", "update", "newly", "installed", "machines", ".", "The", "key", "will", "not", "be", "updated", "on", "any", "existing", "machines", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_sshkey.py#L44-L54
spry-group/python-vultr
vultr/v1_iso.py
VultrISO.create_from_url
def create_from_url(self, url, params=None): ''' /vi/iso/create_from_url POST - account Create a new ISO image on the current account. The ISO image will be downloaded from a given URL. Download status can be checked with the v1/iso/list call. Link: https://www.vultr.com/api/#iso_create_from_url ''' params = update_params(params, { 'url': url, }) return self.request('/v1/iso/create_from_url', params, 'POST')
python
def create_from_url(self, url, params=None): params = update_params(params, { 'url': url, }) return self.request('/v1/iso/create_from_url', params, 'POST')
[ "def", "create_from_url", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'url'", ":", "url", ",", "}", ")", "return", "self", ".", "request", "(", "'/v1/iso/create_from_url'", ",", "params", ",", "'POST'", ")" ]
/vi/iso/create_from_url POST - account Create a new ISO image on the current account. The ISO image will be downloaded from a given URL. Download status can be checked with the v1/iso/list call. Link: https://www.vultr.com/api/#iso_create_from_url
[ "/", "vi", "/", "iso", "/", "create_from_url", "POST", "-", "account", "Create", "a", "new", "ISO", "image", "on", "the", "current", "account", ".", "The", "ISO", "image", "will", "be", "downloaded", "from", "a", "given", "URL", ".", "Download", "status", "can", "be", "checked", "with", "the", "v1", "/", "iso", "/", "list", "call", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_iso.py#L20-L32
spry-group/python-vultr
vultr/v1_server_ipv6.py
VultrServerIPv6.list_ipv6
def list_ipv6(self, subid, params=None): ''' /v1/server/list_ipv6 GET - account List the IPv6 information of a virtual machine. IP information is only available for virtual machines in the "active" state. If the virtual machine does not have IPv6 enabled, then an empty array is returned. Link: https://www.vultr.com/api/#server_list_ipv6 ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/list_ipv6', params, 'GET')
python
def list_ipv6(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/list_ipv6', params, 'GET')
[ "def", "list_ipv6", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/list_ipv6'", ",", "params", ",", "'GET'", ")" ]
/v1/server/list_ipv6 GET - account List the IPv6 information of a virtual machine. IP information is only available for virtual machines in the "active" state. If the virtual machine does not have IPv6 enabled, then an empty array is returned. Link: https://www.vultr.com/api/#server_list_ipv6
[ "/", "v1", "/", "server", "/", "list_ipv6", "GET", "-", "account", "List", "the", "IPv6", "information", "of", "a", "virtual", "machine", ".", "IP", "information", "is", "only", "available", "for", "virtual", "machines", "in", "the", "active", "state", ".", "If", "the", "virtual", "machine", "does", "not", "have", "IPv6", "enabled", "then", "an", "empty", "array", "is", "returned", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv6.py#L10-L20
spry-group/python-vultr
vultr/v1_server_ipv6.py
VultrServerIPv6.reverse_delete_ipv6
def reverse_delete_ipv6(self, subid, ipaddr, params=None): ''' /v1/server/reverse_delete_ipv6 POST - account Remove a reverse DNS entry for an IPv6 address of a virtual machine. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_delete_ipv6 ''' params = update_params(params, { 'SUBID': subid, 'ip': ipaddr }) return self.request('/v1/server/reverse_delete_ipv6', params, 'POST')
python
def reverse_delete_ipv6(self, subid, ipaddr, params=None): params = update_params(params, { 'SUBID': subid, 'ip': ipaddr }) return self.request('/v1/server/reverse_delete_ipv6', params, 'POST')
[ "def", "reverse_delete_ipv6", "(", "self", ",", "subid", ",", "ipaddr", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'ip'", ":", "ipaddr", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/reverse_delete_ipv6'", ",", "params", ",", "'POST'", ")" ]
/v1/server/reverse_delete_ipv6 POST - account Remove a reverse DNS entry for an IPv6 address of a virtual machine. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_delete_ipv6
[ "/", "v1", "/", "server", "/", "reverse_delete_ipv6", "POST", "-", "account", "Remove", "a", "reverse", "DNS", "entry", "for", "an", "IPv6", "address", "of", "a", "virtual", "machine", ".", "Upon", "success", "DNS", "changes", "may", "take", "6", "-", "12", "hours", "to", "become", "active", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv6.py#L22-L34
spry-group/python-vultr
vultr/v1_server_ipv6.py
VultrServerIPv6.reverse_list_ipv6
def reverse_list_ipv6(self, subid, params=None): ''' /v1/server/reverse_list_ipv6 GET - account List the IPv6 reverse DNS entries of a virtual machine. Reverse DNS entries are only available for virtual machines in the "active" state. If the virtual machine does not have IPv6 enabled, then an empty array is returned. Link: https://www.vultr.com/api/#server_reverse_list_ipv6 ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/reverse_list_ipv6', params, 'GET')
python
def reverse_list_ipv6(self, subid, params=None): params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/reverse_list_ipv6', params, 'GET')
[ "def", "reverse_list_ipv6", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/reverse_list_ipv6'", ",", "params", ",", "'GET'", ")" ]
/v1/server/reverse_list_ipv6 GET - account List the IPv6 reverse DNS entries of a virtual machine. Reverse DNS entries are only available for virtual machines in the "active" state. If the virtual machine does not have IPv6 enabled, then an empty array is returned. Link: https://www.vultr.com/api/#server_reverse_list_ipv6
[ "/", "v1", "/", "server", "/", "reverse_list_ipv6", "GET", "-", "account", "List", "the", "IPv6", "reverse", "DNS", "entries", "of", "a", "virtual", "machine", ".", "Reverse", "DNS", "entries", "are", "only", "available", "for", "virtual", "machines", "in", "the", "active", "state", ".", "If", "the", "virtual", "machine", "does", "not", "have", "IPv6", "enabled", "then", "an", "empty", "array", "is", "returned", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv6.py#L36-L47
spry-group/python-vultr
vultr/v1_server_ipv6.py
VultrServerIPv6.reverse_set_ipv6
def reverse_set_ipv6(self, subid, ipaddr, entry, params=None): ''' /v1/server/reverse_set_ipv6 POST - account Set a reverse DNS entry for an IPv6 address of a virtual machine. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_set_ipv6 ''' params = update_params(params, { 'SUBID': subid, 'ip': ipaddr, 'entry': entry }) return self.request('/v1/server/reverse_set_ipv6', params, 'POST')
python
def reverse_set_ipv6(self, subid, ipaddr, entry, params=None): params = update_params(params, { 'SUBID': subid, 'ip': ipaddr, 'entry': entry }) return self.request('/v1/server/reverse_set_ipv6', params, 'POST')
[ "def", "reverse_set_ipv6", "(", "self", ",", "subid", ",", "ipaddr", ",", "entry", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'ip'", ":", "ipaddr", ",", "'entry'", ":", "entry", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/reverse_set_ipv6'", ",", "params", ",", "'POST'", ")" ]
/v1/server/reverse_set_ipv6 POST - account Set a reverse DNS entry for an IPv6 address of a virtual machine. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_set_ipv6
[ "/", "v1", "/", "server", "/", "reverse_set_ipv6", "POST", "-", "account", "Set", "a", "reverse", "DNS", "entry", "for", "an", "IPv6", "address", "of", "a", "virtual", "machine", ".", "Upon", "success", "DNS", "changes", "may", "take", "6", "-", "12", "hours", "to", "become", "active", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server_ipv6.py#L49-L62
spry-group/python-vultr
vultr/v1_startupscript.py
VultrStartupScript.create
def create(self, name, script, params=None): ''' /v1/startupscript/create POST - account Create a startup script Link: https://www.vultr.com/api/#startupscript_create ''' params = update_params(params, { 'name': name, 'script': script }) return self.request('/v1/startupscript/create', params, 'POST')
python
def create(self, name, script, params=None): params = update_params(params, { 'name': name, 'script': script }) return self.request('/v1/startupscript/create', params, 'POST')
[ "def", "create", "(", "self", ",", "name", ",", "script", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'name'", ":", "name", ",", "'script'", ":", "script", "}", ")", "return", "self", ".", "request", "(", "'/v1/startupscript/create'", ",", "params", ",", "'POST'", ")" ]
/v1/startupscript/create POST - account Create a startup script Link: https://www.vultr.com/api/#startupscript_create
[ "/", "v1", "/", "startupscript", "/", "create", "POST", "-", "account", "Create", "a", "startup", "script" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_startupscript.py#L10-L21
spry-group/python-vultr
vultr/v1_startupscript.py
VultrStartupScript.destroy
def destroy(self, scriptid, params=None): ''' /v1/startupscript/destroy POST - account Remove a startup script Link: https://www.vultr.com/api/#startupscript_destroy ''' params = update_params(params, {'SCRIPTID': scriptid}) return self.request('/v1/startupscript/destroy', params, 'POST')
python
def destroy(self, scriptid, params=None): params = update_params(params, {'SCRIPTID': scriptid}) return self.request('/v1/startupscript/destroy', params, 'POST')
[ "def", "destroy", "(", "self", ",", "scriptid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SCRIPTID'", ":", "scriptid", "}", ")", "return", "self", ".", "request", "(", "'/v1/startupscript/destroy'", ",", "params", ",", "'POST'", ")" ]
/v1/startupscript/destroy POST - account Remove a startup script Link: https://www.vultr.com/api/#startupscript_destroy
[ "/", "v1", "/", "startupscript", "/", "destroy", "POST", "-", "account", "Remove", "a", "startup", "script" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_startupscript.py#L23-L31
spry-group/python-vultr
vultr/v1_startupscript.py
VultrStartupScript.update
def update(self, scriptid, params=None): ''' /v1/startupscript/update POST - account Update an existing startup script Link: https://www.vultr.com/api/#startupscript_update ''' params = update_params(params, {'SCRIPTID': scriptid}) return self.request('/v1/startupscript/update', params, 'POST')
python
def update(self, scriptid, params=None): params = update_params(params, {'SCRIPTID': scriptid}) return self.request('/v1/startupscript/update', params, 'POST')
[ "def", "update", "(", "self", ",", "scriptid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SCRIPTID'", ":", "scriptid", "}", ")", "return", "self", ".", "request", "(", "'/v1/startupscript/update'", ",", "params", ",", "'POST'", ")" ]
/v1/startupscript/update POST - account Update an existing startup script Link: https://www.vultr.com/api/#startupscript_update
[ "/", "v1", "/", "startupscript", "/", "update", "POST", "-", "account", "Update", "an", "existing", "startup", "script" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_startupscript.py#L46-L54
spry-group/python-vultr
examples/basic_haltRunning.py
halt_running
def halt_running(): '''Halts all running servers''' vultr = Vultr(API_KEY) try: serverList = vultr.server.list() #logging.info('Listing servers:\n%s', dumps( #serverList, indent=2 #)) except VultrError as ex: logging.error('VultrError: %s', ex) for serverID in serverList: if serverList[serverID]['power_status'] == 'running': logging.info(serverList[serverID]['label'] + " will be gracefully shutdown.") vultr.server.halt(serverID)
python
def halt_running(): vultr = Vultr(API_KEY) try: serverList = vultr.server.list() except VultrError as ex: logging.error('VultrError: %s', ex) for serverID in serverList: if serverList[serverID]['power_status'] == 'running': logging.info(serverList[serverID]['label'] + " will be gracefully shutdown.") vultr.server.halt(serverID)
[ "def", "halt_running", "(", ")", ":", "vultr", "=", "Vultr", "(", "API_KEY", ")", "try", ":", "serverList", "=", "vultr", ".", "server", ".", "list", "(", ")", "#logging.info('Listing servers:\\n%s', dumps(", "#serverList, indent=2", "#))", "except", "VultrError", "as", "ex", ":", "logging", ".", "error", "(", "'VultrError: %s'", ",", "ex", ")", "for", "serverID", "in", "serverList", ":", "if", "serverList", "[", "serverID", "]", "[", "'power_status'", "]", "==", "'running'", ":", "logging", ".", "info", "(", "serverList", "[", "serverID", "]", "[", "'label'", "]", "+", "\" will be gracefully shutdown.\"", ")", "vultr", ".", "server", ".", "halt", "(", "serverID", ")" ]
Halts all running servers
[ "Halts", "all", "running", "servers" ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/examples/basic_haltRunning.py#L18-L33
spry-group/python-vultr
vultr/v1_reservedip.py
VultrReservedIP.create
def create(self, dcid, ip_type, params=None): ''' /v1/reservedip/create POST - account Create a new reserved IP. Reserved IPs can only be used within the same datacenter for which they were created. Link: https://www.vultr.com/api/#reservedip_create ''' params = update_params(params, { 'DCID': dcid, 'ip_type': ip_type }) return self.request('/v1/reservedip/create', params, 'POST')
python
def create(self, dcid, ip_type, params=None): params = update_params(params, { 'DCID': dcid, 'ip_type': ip_type }) return self.request('/v1/reservedip/create', params, 'POST')
[ "def", "create", "(", "self", ",", "dcid", ",", "ip_type", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'DCID'", ":", "dcid", ",", "'ip_type'", ":", "ip_type", "}", ")", "return", "self", ".", "request", "(", "'/v1/reservedip/create'", ",", "params", ",", "'POST'", ")" ]
/v1/reservedip/create POST - account Create a new reserved IP. Reserved IPs can only be used within the same datacenter for which they were created. Link: https://www.vultr.com/api/#reservedip_create
[ "/", "v1", "/", "reservedip", "/", "create", "POST", "-", "account", "Create", "a", "new", "reserved", "IP", ".", "Reserved", "IPs", "can", "only", "be", "used", "within", "the", "same", "datacenter", "for", "which", "they", "were", "created", "." ]
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_reservedip.py#L10-L22
inspirehep/refextract
refextract/references/tag.py
process_reference_line
def process_reference_line(working_line, journals_matches, pprint_repnum_len, pprint_repnum_matchtext, publishers_matches, removed_spaces, standardised_titles, kbs): """After the phase of identifying and tagging citation instances in a reference line, this function is called to go through the line and the collected information about the recognised citations, and to transform the line into a string of MARC XML in which the recognised citations are grouped under various datafields and subfields, depending upon their type. @param line_marker: (string) - this is the marker for this reference line (e.g. [1]). @param working_line: (string) - this is the line before the punctuation was stripped. At this stage, it has not been capitalised, and neither TITLES nor REPORT NUMBERS have been stripped from it. However, any recognised numeration and/or URLs have been tagged with <cds.YYYY> tags. The working_line could, for example, look something like this: [1] CDS <cds.URL description="http //invenio-software.org/"> http //invenio-software.org/</cds.URL>. @param found_title_len: (dictionary) - the lengths of the title citations that have been recognised in the line. Keyed by the index within the line of each match. @param found_title_matchtext: (dictionary) - The text that was found for each matched title citation in the line. Keyed by the index within the line of each match. @param pprint_repnum_len: (dictionary) - the lengths of the matched institutional preprint report number citations found within the line. Keyed by the index within the line of each match. @param pprint_repnum_matchtext: (dictionary) - The matched text for each matched institutional report number. Keyed by the index within the line of each match. @param identified_dois (list) - The list of dois inside the citation @identified_urls: (list) - contains 2-cell tuples, each of which represents an idenitfied URL and its description string. The list takes the order in which the URLs were identified in the line (i.e. first-found, second-found, etc). @param removed_spaces: (dictionary) - The number of spaces removed from the various positions in the line. Keyed by the index of the position within the line at which the spaces were removed. @param standardised_titles: (dictionary) - The standardised journal titles, keyed by the non-standard version of those titles. @return: (tuple) of 5 components: ( string -> a MARC XML-ized reference line. integer -> number of fields of miscellaneous text marked-up for the line. integer -> number of title citations marked-up for the line. integer -> number of institutional report-number citations marked-up for the line. integer -> number of URL citations marked-up for the record. integer -> number of DOI's found for the record integer -> number of author groups found ) """ if len(journals_matches) + len(pprint_repnum_len) + len(publishers_matches) == 0: # no TITLE or REPORT-NUMBER citations were found within this line, # use the raw line: (This 'raw' line could still be tagged with # recognised URLs or numeration.) tagged_line = working_line else: # TITLE and/or REPORT-NUMBER citations were found in this line, # build a new version of the working-line in which the standard # versions of the REPORT-NUMBERs and TITLEs are tagged: startpos = 0 # First cell of the reference line... previous_match = {} # previously matched TITLE within line (used # for replacement of IBIDs. replacement_types = {} journals_keys = journals_matches.keys() journals_keys.sort() reports_keys = pprint_repnum_matchtext.keys() reports_keys.sort() publishers_keys = publishers_matches.keys() publishers_keys.sort() spaces_keys = removed_spaces.keys() spaces_keys.sort() replacement_types = get_replacement_types(journals_keys, reports_keys, publishers_keys) replacement_locations = replacement_types.keys() replacement_locations.sort() tagged_line = u"" # This is to be the new 'working-line'. It will # contain the tagged TITLEs and REPORT-NUMBERs, # as well as any previously tagged URLs and # numeration components. # begin: for replacement_index in replacement_locations: # first, factor in any stripped spaces before this 'replacement' true_replacement_index, extras = \ account_for_stripped_whitespace(spaces_keys, removed_spaces, replacement_types, pprint_repnum_len, journals_matches, replacement_index) if replacement_types[replacement_index] == u"journal": # Add a tagged periodical TITLE into the line: rebuilt_chunk, startpos, previous_match = \ add_tagged_journal( reading_line=working_line, journal_info=journals_matches[replacement_index], previous_match=previous_match, startpos=startpos, true_replacement_index=true_replacement_index, extras=extras, standardised_titles=standardised_titles) tagged_line += rebuilt_chunk elif replacement_types[replacement_index] == u"reportnumber": # Add a tagged institutional preprint REPORT-NUMBER # into the line: rebuilt_chunk, startpos = \ add_tagged_report_number( reading_line=working_line, len_reportnum=pprint_repnum_len[replacement_index], reportnum=pprint_repnum_matchtext[replacement_index], startpos=startpos, true_replacement_index=true_replacement_index, extras=extras ) tagged_line += rebuilt_chunk elif replacement_types[replacement_index] == u"publisher": rebuilt_chunk, startpos = \ add_tagged_publisher( reading_line=working_line, matched_publisher=publishers_matches[ replacement_index], startpos=startpos, true_replacement_index=true_replacement_index, extras=extras, kb_publishers=kbs['publishers'] ) tagged_line += rebuilt_chunk # add the remainder of the original working-line into the rebuilt line: tagged_line += working_line[startpos:] # we have all the numeration # we can make sure there's no space between the volume # letter and the volume number # e.g. B 20 -> B20 tagged_line = wash_volume_tag(tagged_line) # Try to find any authors in the line tagged_line = identify_and_tag_authors(tagged_line, kbs['authors']) # Try to find any collaboration in the line tagged_line = identify_and_tag_collaborations(tagged_line, kbs['collaborations']) return tagged_line.replace('\n', '')
python
def process_reference_line(working_line, journals_matches, pprint_repnum_len, pprint_repnum_matchtext, publishers_matches, removed_spaces, standardised_titles, kbs): if len(journals_matches) + len(pprint_repnum_len) + len(publishers_matches) == 0: tagged_line = working_line else: startpos = 0 previous_match = {} replacement_types = {} journals_keys = journals_matches.keys() journals_keys.sort() reports_keys = pprint_repnum_matchtext.keys() reports_keys.sort() publishers_keys = publishers_matches.keys() publishers_keys.sort() spaces_keys = removed_spaces.keys() spaces_keys.sort() replacement_types = get_replacement_types(journals_keys, reports_keys, publishers_keys) replacement_locations = replacement_types.keys() replacement_locations.sort() tagged_line = u"" for replacement_index in replacement_locations: true_replacement_index, extras = \ account_for_stripped_whitespace(spaces_keys, removed_spaces, replacement_types, pprint_repnum_len, journals_matches, replacement_index) if replacement_types[replacement_index] == u"journal": rebuilt_chunk, startpos, previous_match = \ add_tagged_journal( reading_line=working_line, journal_info=journals_matches[replacement_index], previous_match=previous_match, startpos=startpos, true_replacement_index=true_replacement_index, extras=extras, standardised_titles=standardised_titles) tagged_line += rebuilt_chunk elif replacement_types[replacement_index] == u"reportnumber": rebuilt_chunk, startpos = \ add_tagged_report_number( reading_line=working_line, len_reportnum=pprint_repnum_len[replacement_index], reportnum=pprint_repnum_matchtext[replacement_index], startpos=startpos, true_replacement_index=true_replacement_index, extras=extras ) tagged_line += rebuilt_chunk elif replacement_types[replacement_index] == u"publisher": rebuilt_chunk, startpos = \ add_tagged_publisher( reading_line=working_line, matched_publisher=publishers_matches[ replacement_index], startpos=startpos, true_replacement_index=true_replacement_index, extras=extras, kb_publishers=kbs['publishers'] ) tagged_line += rebuilt_chunk tagged_line += working_line[startpos:] tagged_line = wash_volume_tag(tagged_line) tagged_line = identify_and_tag_authors(tagged_line, kbs['authors']) tagged_line = identify_and_tag_collaborations(tagged_line, kbs['collaborations']) return tagged_line.replace('\n', '')
[ "def", "process_reference_line", "(", "working_line", ",", "journals_matches", ",", "pprint_repnum_len", ",", "pprint_repnum_matchtext", ",", "publishers_matches", ",", "removed_spaces", ",", "standardised_titles", ",", "kbs", ")", ":", "if", "len", "(", "journals_matches", ")", "+", "len", "(", "pprint_repnum_len", ")", "+", "len", "(", "publishers_matches", ")", "==", "0", ":", "# no TITLE or REPORT-NUMBER citations were found within this line,", "# use the raw line: (This 'raw' line could still be tagged with", "# recognised URLs or numeration.)", "tagged_line", "=", "working_line", "else", ":", "# TITLE and/or REPORT-NUMBER citations were found in this line,", "# build a new version of the working-line in which the standard", "# versions of the REPORT-NUMBERs and TITLEs are tagged:", "startpos", "=", "0", "# First cell of the reference line...", "previous_match", "=", "{", "}", "# previously matched TITLE within line (used", "# for replacement of IBIDs.", "replacement_types", "=", "{", "}", "journals_keys", "=", "journals_matches", ".", "keys", "(", ")", "journals_keys", ".", "sort", "(", ")", "reports_keys", "=", "pprint_repnum_matchtext", ".", "keys", "(", ")", "reports_keys", ".", "sort", "(", ")", "publishers_keys", "=", "publishers_matches", ".", "keys", "(", ")", "publishers_keys", ".", "sort", "(", ")", "spaces_keys", "=", "removed_spaces", ".", "keys", "(", ")", "spaces_keys", ".", "sort", "(", ")", "replacement_types", "=", "get_replacement_types", "(", "journals_keys", ",", "reports_keys", ",", "publishers_keys", ")", "replacement_locations", "=", "replacement_types", ".", "keys", "(", ")", "replacement_locations", ".", "sort", "(", ")", "tagged_line", "=", "u\"\"", "# This is to be the new 'working-line'. It will", "# contain the tagged TITLEs and REPORT-NUMBERs,", "# as well as any previously tagged URLs and", "# numeration components.", "# begin:", "for", "replacement_index", "in", "replacement_locations", ":", "# first, factor in any stripped spaces before this 'replacement'", "true_replacement_index", ",", "extras", "=", "account_for_stripped_whitespace", "(", "spaces_keys", ",", "removed_spaces", ",", "replacement_types", ",", "pprint_repnum_len", ",", "journals_matches", ",", "replacement_index", ")", "if", "replacement_types", "[", "replacement_index", "]", "==", "u\"journal\"", ":", "# Add a tagged periodical TITLE into the line:", "rebuilt_chunk", ",", "startpos", ",", "previous_match", "=", "add_tagged_journal", "(", "reading_line", "=", "working_line", ",", "journal_info", "=", "journals_matches", "[", "replacement_index", "]", ",", "previous_match", "=", "previous_match", ",", "startpos", "=", "startpos", ",", "true_replacement_index", "=", "true_replacement_index", ",", "extras", "=", "extras", ",", "standardised_titles", "=", "standardised_titles", ")", "tagged_line", "+=", "rebuilt_chunk", "elif", "replacement_types", "[", "replacement_index", "]", "==", "u\"reportnumber\"", ":", "# Add a tagged institutional preprint REPORT-NUMBER", "# into the line:", "rebuilt_chunk", ",", "startpos", "=", "add_tagged_report_number", "(", "reading_line", "=", "working_line", ",", "len_reportnum", "=", "pprint_repnum_len", "[", "replacement_index", "]", ",", "reportnum", "=", "pprint_repnum_matchtext", "[", "replacement_index", "]", ",", "startpos", "=", "startpos", ",", "true_replacement_index", "=", "true_replacement_index", ",", "extras", "=", "extras", ")", "tagged_line", "+=", "rebuilt_chunk", "elif", "replacement_types", "[", "replacement_index", "]", "==", "u\"publisher\"", ":", "rebuilt_chunk", ",", "startpos", "=", "add_tagged_publisher", "(", "reading_line", "=", "working_line", ",", "matched_publisher", "=", "publishers_matches", "[", "replacement_index", "]", ",", "startpos", "=", "startpos", ",", "true_replacement_index", "=", "true_replacement_index", ",", "extras", "=", "extras", ",", "kb_publishers", "=", "kbs", "[", "'publishers'", "]", ")", "tagged_line", "+=", "rebuilt_chunk", "# add the remainder of the original working-line into the rebuilt line:", "tagged_line", "+=", "working_line", "[", "startpos", ":", "]", "# we have all the numeration", "# we can make sure there's no space between the volume", "# letter and the volume number", "# e.g. B 20 -> B20", "tagged_line", "=", "wash_volume_tag", "(", "tagged_line", ")", "# Try to find any authors in the line", "tagged_line", "=", "identify_and_tag_authors", "(", "tagged_line", ",", "kbs", "[", "'authors'", "]", ")", "# Try to find any collaboration in the line", "tagged_line", "=", "identify_and_tag_collaborations", "(", "tagged_line", ",", "kbs", "[", "'collaborations'", "]", ")", "return", "tagged_line", ".", "replace", "(", "'\\n'", ",", "''", ")" ]
After the phase of identifying and tagging citation instances in a reference line, this function is called to go through the line and the collected information about the recognised citations, and to transform the line into a string of MARC XML in which the recognised citations are grouped under various datafields and subfields, depending upon their type. @param line_marker: (string) - this is the marker for this reference line (e.g. [1]). @param working_line: (string) - this is the line before the punctuation was stripped. At this stage, it has not been capitalised, and neither TITLES nor REPORT NUMBERS have been stripped from it. However, any recognised numeration and/or URLs have been tagged with <cds.YYYY> tags. The working_line could, for example, look something like this: [1] CDS <cds.URL description="http //invenio-software.org/"> http //invenio-software.org/</cds.URL>. @param found_title_len: (dictionary) - the lengths of the title citations that have been recognised in the line. Keyed by the index within the line of each match. @param found_title_matchtext: (dictionary) - The text that was found for each matched title citation in the line. Keyed by the index within the line of each match. @param pprint_repnum_len: (dictionary) - the lengths of the matched institutional preprint report number citations found within the line. Keyed by the index within the line of each match. @param pprint_repnum_matchtext: (dictionary) - The matched text for each matched institutional report number. Keyed by the index within the line of each match. @param identified_dois (list) - The list of dois inside the citation @identified_urls: (list) - contains 2-cell tuples, each of which represents an idenitfied URL and its description string. The list takes the order in which the URLs were identified in the line (i.e. first-found, second-found, etc). @param removed_spaces: (dictionary) - The number of spaces removed from the various positions in the line. Keyed by the index of the position within the line at which the spaces were removed. @param standardised_titles: (dictionary) - The standardised journal titles, keyed by the non-standard version of those titles. @return: (tuple) of 5 components: ( string -> a MARC XML-ized reference line. integer -> number of fields of miscellaneous text marked-up for the line. integer -> number of title citations marked-up for the line. integer -> number of institutional report-number citations marked-up for the line. integer -> number of URL citations marked-up for the record. integer -> number of DOI's found for the record integer -> number of author groups found )
[ "After", "the", "phase", "of", "identifying", "and", "tagging", "citation", "instances", "in", "a", "reference", "line", "this", "function", "is", "called", "to", "go", "through", "the", "line", "and", "the", "collected", "information", "about", "the", "recognised", "citations", "and", "to", "transform", "the", "line", "into", "a", "string", "of", "MARC", "XML", "in", "which", "the", "recognised", "citations", "are", "grouped", "under", "various", "datafields", "and", "subfields", "depending", "upon", "their", "type", ".", "@param", "line_marker", ":", "(", "string", ")", "-", "this", "is", "the", "marker", "for", "this", "reference", "line", "(", "e", ".", "g", ".", "[", "1", "]", ")", ".", "@param", "working_line", ":", "(", "string", ")", "-", "this", "is", "the", "line", "before", "the", "punctuation", "was", "stripped", ".", "At", "this", "stage", "it", "has", "not", "been", "capitalised", "and", "neither", "TITLES", "nor", "REPORT", "NUMBERS", "have", "been", "stripped", "from", "it", ".", "However", "any", "recognised", "numeration", "and", "/", "or", "URLs", "have", "been", "tagged", "with", "<cds", ".", "YYYY", ">", "tags", ".", "The", "working_line", "could", "for", "example", "look", "something", "like", "this", ":", "[", "1", "]", "CDS", "<cds", ".", "URL", "description", "=", "http", "//", "invenio", "-", "software", ".", "org", "/", ">", "http", "//", "invenio", "-", "software", ".", "org", "/", "<", "/", "cds", ".", "URL", ">", ".", "@param", "found_title_len", ":", "(", "dictionary", ")", "-", "the", "lengths", "of", "the", "title", "citations", "that", "have", "been", "recognised", "in", "the", "line", ".", "Keyed", "by", "the", "index", "within", "the", "line", "of", "each", "match", ".", "@param", "found_title_matchtext", ":", "(", "dictionary", ")", "-", "The", "text", "that", "was", "found", "for", "each", "matched", "title", "citation", "in", "the", "line", ".", "Keyed", "by", "the", "index", "within", "the", "line", "of", "each", "match", ".", "@param", "pprint_repnum_len", ":", "(", "dictionary", ")", "-", "the", "lengths", "of", "the", "matched", "institutional", "preprint", "report", "number", "citations", "found", "within", "the", "line", ".", "Keyed", "by", "the", "index", "within", "the", "line", "of", "each", "match", ".", "@param", "pprint_repnum_matchtext", ":", "(", "dictionary", ")", "-", "The", "matched", "text", "for", "each", "matched", "institutional", "report", "number", ".", "Keyed", "by", "the", "index", "within", "the", "line", "of", "each", "match", ".", "@param", "identified_dois", "(", "list", ")", "-", "The", "list", "of", "dois", "inside", "the", "citation", "@identified_urls", ":", "(", "list", ")", "-", "contains", "2", "-", "cell", "tuples", "each", "of", "which", "represents", "an", "idenitfied", "URL", "and", "its", "description", "string", ".", "The", "list", "takes", "the", "order", "in", "which", "the", "URLs", "were", "identified", "in", "the", "line", "(", "i", ".", "e", ".", "first", "-", "found", "second", "-", "found", "etc", ")", ".", "@param", "removed_spaces", ":", "(", "dictionary", ")", "-", "The", "number", "of", "spaces", "removed", "from", "the", "various", "positions", "in", "the", "line", ".", "Keyed", "by", "the", "index", "of", "the", "position", "within", "the", "line", "at", "which", "the", "spaces", "were", "removed", ".", "@param", "standardised_titles", ":", "(", "dictionary", ")", "-", "The", "standardised", "journal", "titles", "keyed", "by", "the", "non", "-", "standard", "version", "of", "those", "titles", ".", "@return", ":", "(", "tuple", ")", "of", "5", "components", ":", "(", "string", "-", ">", "a", "MARC", "XML", "-", "ized", "reference", "line", ".", "integer", "-", ">", "number", "of", "fields", "of", "miscellaneous", "text", "marked", "-", "up", "for", "the", "line", ".", "integer", "-", ">", "number", "of", "title", "citations", "marked", "-", "up", "for", "the", "line", ".", "integer", "-", ">", "number", "of", "institutional", "report", "-", "number", "citations", "marked", "-", "up", "for", "the", "line", ".", "integer", "-", ">", "number", "of", "URL", "citations", "marked", "-", "up", "for", "the", "record", ".", "integer", "-", ">", "number", "of", "DOI", "s", "found", "for", "the", "record", "integer", "-", ">", "number", "of", "author", "groups", "found", ")" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L182-L338
inspirehep/refextract
refextract/references/tag.py
tag_arxiv
def tag_arxiv(line): """Tag arxiv report numbers We handle arXiv in 2 ways: * starting with arXiv:1022.1111 * this format exactly 9999.9999 We also format the output to the standard arxiv notation: * arXiv:2007.12.1111 * arXiv:2007.12.1111v2 """ def tagger(match): groups = match.groupdict() if match.group('suffix'): groups['suffix'] = ' ' + groups['suffix'] else: groups['suffix'] = '' return u'<cds.REPORTNUMBER>arXiv:%(year)s'\ u'%(month)s.%(num)s%(suffix)s' \ u'</cds.REPORTNUMBER>' % groups line = re_arxiv_5digits.sub(tagger, line) line = re_arxiv.sub(tagger, line) line = re_new_arxiv_5digits.sub(tagger, line) line = re_new_arxiv.sub(tagger, line) return line
python
def tag_arxiv(line): def tagger(match): groups = match.groupdict() if match.group('suffix'): groups['suffix'] = ' ' + groups['suffix'] else: groups['suffix'] = '' return u'<cds.REPORTNUMBER>arXiv:%(year)s'\ u'%(month)s.%(num)s%(suffix)s' \ u'</cds.REPORTNUMBER>' % groups line = re_arxiv_5digits.sub(tagger, line) line = re_arxiv.sub(tagger, line) line = re_new_arxiv_5digits.sub(tagger, line) line = re_new_arxiv.sub(tagger, line) return line
[ "def", "tag_arxiv", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "if", "match", ".", "group", "(", "'suffix'", ")", ":", "groups", "[", "'suffix'", "]", "=", "' '", "+", "groups", "[", "'suffix'", "]", "else", ":", "groups", "[", "'suffix'", "]", "=", "''", "return", "u'<cds.REPORTNUMBER>arXiv:%(year)s'", "u'%(month)s.%(num)s%(suffix)s'", "u'</cds.REPORTNUMBER>'", "%", "groups", "line", "=", "re_arxiv_5digits", ".", "sub", "(", "tagger", ",", "line", ")", "line", "=", "re_arxiv", ".", "sub", "(", "tagger", ",", "line", ")", "line", "=", "re_new_arxiv_5digits", ".", "sub", "(", "tagger", ",", "line", ")", "line", "=", "re_new_arxiv", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
Tag arxiv report numbers We handle arXiv in 2 ways: * starting with arXiv:1022.1111 * this format exactly 9999.9999 We also format the output to the standard arxiv notation: * arXiv:2007.12.1111 * arXiv:2007.12.1111v2
[ "Tag", "arxiv", "report", "numbers" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L360-L384
inspirehep/refextract
refextract/references/tag.py
tag_arxiv_more
def tag_arxiv_more(line): """Tag old arxiv report numbers Either formats: * hep-th/1234567 * arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111 """ line = RE_ARXIV_CATCHUP.sub(ur"\g<suffix>/\g<year>\g<month>\g<num>", line) for report_re, report_repl in RE_OLD_ARXIV: report_number = report_repl + ur"/\g<num>" line = report_re.sub( u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>', line ) return line
python
def tag_arxiv_more(line): line = RE_ARXIV_CATCHUP.sub(ur"\g<suffix>/\g<year>\g<month>\g<num>", line) for report_re, report_repl in RE_OLD_ARXIV: report_number = report_repl + ur"/\g<num>" line = report_re.sub( u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>', line ) return line
[ "def", "tag_arxiv_more", "(", "line", ")", ":", "line", "=", "RE_ARXIV_CATCHUP", ".", "sub", "(", "ur\"\\g<suffix>/\\g<year>\\g<month>\\g<num>\"", ",", "line", ")", "for", "report_re", ",", "report_repl", "in", "RE_OLD_ARXIV", ":", "report_number", "=", "report_repl", "+", "ur\"/\\g<num>\"", "line", "=", "report_re", ".", "sub", "(", "u'<cds.REPORTNUMBER>'", "+", "report_number", "+", "u'</cds.REPORTNUMBER>'", ",", "line", ")", "return", "line" ]
Tag old arxiv report numbers Either formats: * hep-th/1234567 * arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111
[ "Tag", "old", "arxiv", "report", "numbers" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L387-L402
inspirehep/refextract
refextract/references/tag.py
tag_pos_volume
def tag_pos_volume(line): """Tag POS volume number POS is journal that has special volume numbers e.g. PoS LAT2007 (2007) 369 """ def tagger(match): groups = match.groupdict() try: year = match.group('year') except IndexError: # Extract year from volume name # which should always include the year g = re.search(re_pos_year_num, match.group( 'volume_num'), re.UNICODE) year = g.group(0) if year: groups[ 'year'] = ' <cds.YR>(%s)</cds.YR>' % year.strip().strip('()') else: groups['year'] = '' return '<cds.JOURNAL>PoS</cds.JOURNAL>' \ ' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' \ '%(year)s' \ ' <cds.PG>%(page)s</cds.PG>' % groups for p in re_pos: line = p.sub(tagger, line) return line
python
def tag_pos_volume(line): def tagger(match): groups = match.groupdict() try: year = match.group('year') except IndexError: g = re.search(re_pos_year_num, match.group( 'volume_num'), re.UNICODE) year = g.group(0) if year: groups[ 'year'] = ' <cds.YR>(%s)</cds.YR>' % year.strip().strip('()') else: groups['year'] = '' return '<cds.JOURNAL>PoS</cds.JOURNAL>' \ ' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' \ '%(year)s' \ ' <cds.PG>%(page)s</cds.PG>' % groups for p in re_pos: line = p.sub(tagger, line) return line
[ "def", "tag_pos_volume", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "try", ":", "year", "=", "match", ".", "group", "(", "'year'", ")", "except", "IndexError", ":", "# Extract year from volume name", "# which should always include the year", "g", "=", "re", ".", "search", "(", "re_pos_year_num", ",", "match", ".", "group", "(", "'volume_num'", ")", ",", "re", ".", "UNICODE", ")", "year", "=", "g", ".", "group", "(", "0", ")", "if", "year", ":", "groups", "[", "'year'", "]", "=", "' <cds.YR>(%s)</cds.YR>'", "%", "year", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "else", ":", "groups", "[", "'year'", "]", "=", "''", "return", "'<cds.JOURNAL>PoS</cds.JOURNAL>'", "' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>'", "'%(year)s'", "' <cds.PG>%(page)s</cds.PG>'", "%", "groups", "for", "p", "in", "re_pos", ":", "line", "=", "p", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
Tag POS volume number POS is journal that has special volume numbers e.g. PoS LAT2007 (2007) 369
[ "Tag", "POS", "volume", "number" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L405-L436
inspirehep/refextract
refextract/references/tag.py
find_numeration_more
def find_numeration_more(line): """Look for other numeration in line.""" # First, attempt to use marked-up titles patterns = ( re_correct_numeration_2nd_try_ptn1, re_correct_numeration_2nd_try_ptn2, re_correct_numeration_2nd_try_ptn3, re_correct_numeration_2nd_try_ptn4, ) for pattern in patterns: match = pattern.search(line) if match: info = match.groupdict() series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': len(info['aftertitle'])} return None
python
def find_numeration_more(line): patterns = ( re_correct_numeration_2nd_try_ptn1, re_correct_numeration_2nd_try_ptn2, re_correct_numeration_2nd_try_ptn3, re_correct_numeration_2nd_try_ptn4, ) for pattern in patterns: match = pattern.search(line) if match: info = match.groupdict() series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': len(info['aftertitle'])} return None
[ "def", "find_numeration_more", "(", "line", ")", ":", "# First, attempt to use marked-up titles", "patterns", "=", "(", "re_correct_numeration_2nd_try_ptn1", ",", "re_correct_numeration_2nd_try_ptn2", ",", "re_correct_numeration_2nd_try_ptn3", ",", "re_correct_numeration_2nd_try_ptn4", ",", ")", "for", "pattern", "in", "patterns", ":", "match", "=", "pattern", ".", "search", "(", "line", ")", "if", "match", ":", "info", "=", "match", ".", "groupdict", "(", ")", "series", "=", "extract_series_from_volume", "(", "info", "[", "'vol'", "]", ")", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt'", "]", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt2'", "]", "return", "{", "'year'", ":", "info", ".", "get", "(", "'year'", ",", "None", ")", ",", "'series'", ":", "series", ",", "'volume'", ":", "info", "[", "'vol_num'", "]", ",", "'page'", ":", "info", "[", "'page'", "]", "or", "info", "[", "'jinst_page'", "]", ",", "'page_end'", ":", "info", "[", "'page_end'", "]", ",", "'len'", ":", "len", "(", "info", "[", "'aftertitle'", "]", ")", "}", "return", "None" ]
Look for other numeration in line.
[ "Look", "for", "other", "numeration", "in", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L456-L481
inspirehep/refextract
refextract/references/tag.py
add_tagged_report_number
def add_tagged_report_number(reading_line, len_reportnum, reportnum, startpos, true_replacement_index, extras): """In rebuilding the line, add an identified institutional REPORT-NUMBER (standardised and tagged) into the line. @param reading_line: (string) The reference line before capitalization was performed, and before REPORT-NUMBERs and TITLEs were stipped out. @param len_reportnum: (integer) the length of the matched REPORT-NUMBER. @param reportnum: (string) the replacement text for the matched REPORT-NUMBER. @param startpos: (integer) the pointer to the next position in the reading-line from which to start rebuilding. @param true_replacement_index: (integer) the replacement index of the matched REPORT-NUMBER in the reading-line, with stripped punctuation and whitespace accounted for. @param extras: (integer) extras to be added into the replacement index. @return: (tuple) containing a string (the rebuilt line segment) and an integer (the next 'startpos' in the reading-line). """ rebuilt_line = u"" # The segment of the line that's being rebuilt to # include the tagged & standardised REPORT-NUMBER # Fill rebuilt_line with the contents of the reading_line up to the point # of the institutional REPORT-NUMBER. However, stop 1 character before the # replacement index of this REPORT-NUMBER to allow for removal of braces, # if necessary: if (true_replacement_index - startpos - 1) >= 0: rebuilt_line += reading_line[startpos:true_replacement_index - 1] else: rebuilt_line += reading_line[startpos:true_replacement_index] # Add the tagged REPORT-NUMBER into the rebuilt-line segment: rebuilt_line += u"<cds.REPORTNUMBER>%(reportnum)s</cds.REPORTNUMBER>" \ % {'reportnum': reportnum} # Move the pointer in the reading-line past the current match: startpos = true_replacement_index + len_reportnum + extras # Move past closing brace for report number (if there was one): try: if reading_line[startpos] in (u"]", u")"): startpos += 1 except IndexError: # moved past end of line - ignore pass # return the rebuilt-line segment and the pointer to the next position in # the reading-line from which to start rebuilding up to the next match: return rebuilt_line, startpos
python
def add_tagged_report_number(reading_line, len_reportnum, reportnum, startpos, true_replacement_index, extras): rebuilt_line = u"" if (true_replacement_index - startpos - 1) >= 0: rebuilt_line += reading_line[startpos:true_replacement_index - 1] else: rebuilt_line += reading_line[startpos:true_replacement_index] rebuilt_line += u"<cds.REPORTNUMBER>%(reportnum)s</cds.REPORTNUMBER>" \ % {'reportnum': reportnum} startpos = true_replacement_index + len_reportnum + extras try: if reading_line[startpos] in (u"]", u")"): startpos += 1 except IndexError: pass return rebuilt_line, startpos
[ "def", "add_tagged_report_number", "(", "reading_line", ",", "len_reportnum", ",", "reportnum", ",", "startpos", ",", "true_replacement_index", ",", "extras", ")", ":", "rebuilt_line", "=", "u\"\"", "# The segment of the line that's being rebuilt to", "# include the tagged & standardised REPORT-NUMBER", "# Fill rebuilt_line with the contents of the reading_line up to the point", "# of the institutional REPORT-NUMBER. However, stop 1 character before the", "# replacement index of this REPORT-NUMBER to allow for removal of braces,", "# if necessary:", "if", "(", "true_replacement_index", "-", "startpos", "-", "1", ")", ">=", "0", ":", "rebuilt_line", "+=", "reading_line", "[", "startpos", ":", "true_replacement_index", "-", "1", "]", "else", ":", "rebuilt_line", "+=", "reading_line", "[", "startpos", ":", "true_replacement_index", "]", "# Add the tagged REPORT-NUMBER into the rebuilt-line segment:", "rebuilt_line", "+=", "u\"<cds.REPORTNUMBER>%(reportnum)s</cds.REPORTNUMBER>\"", "%", "{", "'reportnum'", ":", "reportnum", "}", "# Move the pointer in the reading-line past the current match:", "startpos", "=", "true_replacement_index", "+", "len_reportnum", "+", "extras", "# Move past closing brace for report number (if there was one):", "try", ":", "if", "reading_line", "[", "startpos", "]", "in", "(", "u\"]\"", ",", "u\")\"", ")", ":", "startpos", "+=", "1", "except", "IndexError", ":", "# moved past end of line - ignore", "pass", "# return the rebuilt-line segment and the pointer to the next position in", "# the reading-line from which to start rebuilding up to the next match:", "return", "rebuilt_line", ",", "startpos" ]
In rebuilding the line, add an identified institutional REPORT-NUMBER (standardised and tagged) into the line. @param reading_line: (string) The reference line before capitalization was performed, and before REPORT-NUMBERs and TITLEs were stipped out. @param len_reportnum: (integer) the length of the matched REPORT-NUMBER. @param reportnum: (string) the replacement text for the matched REPORT-NUMBER. @param startpos: (integer) the pointer to the next position in the reading-line from which to start rebuilding. @param true_replacement_index: (integer) the replacement index of the matched REPORT-NUMBER in the reading-line, with stripped punctuation and whitespace accounted for. @param extras: (integer) extras to be added into the replacement index. @return: (tuple) containing a string (the rebuilt line segment) and an integer (the next 'startpos' in the reading-line).
[ "In", "rebuilding", "the", "line", "add", "an", "identified", "institutional", "REPORT", "-", "NUMBER", "(", "standardised", "and", "tagged", ")", "into", "the", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L484-L535
inspirehep/refextract
refextract/references/tag.py
add_tagged_journal
def add_tagged_journal(reading_line, journal_info, previous_match, startpos, true_replacement_index, extras, standardised_titles): """In rebuilding the line, add an identified periodical TITLE (standardised and tagged) into the line. @param reading_line: (string) The reference line before capitalization was performed, and before REPORT-NUMBERs and TITLEs were stripped out. @param len_title: (integer) the length of the matched TITLE. @param matched_title: (string) the matched TITLE text. @param previous_match: (dict) the previous periodical TITLE citation to have been matched in the current reference line. It is used when replacing an IBID instance in the line. @param startpos: (integer) the pointer to the next position in the reading-line from which to start rebuilding. @param true_replacement_index: (integer) the replacement index of the matched TITLE in the reading-line, with stripped punctuation and whitespace accounted for. @param extras: (integer) extras to be added into the replacement index. @param standardised_titles: (dictionary) the standardised versions of periodical titles, keyed by their various non-standard versions. @return: (tuple) containing a string (the rebuilt line segment), an integer (the next 'startpos' in the reading-line), and an other string (the newly updated previous-match). """ old_startpos = startpos old_previous_match = previous_match skip_numeration = False series = None def skip_ponctuation(line, pos): # Skip past any punctuation at the end of the replacement that was # just made: try: while line[pos] in (".", ":", "-", ")"): pos += 1 except IndexError: # The match was at the very end of the line pass return pos # Fill 'rebuilt_line' (the segment of the line that is being rebuilt to # include the tagged and standardised periodical TITLE) with the contents # of the reading-line, up to the point of the matched TITLE: rebuilt_line = reading_line[startpos:true_replacement_index] # Test to see whether a title or an "IBID" was matched: if journal_info.upper().find("IBID") != -1: # This is an IBID # Try to replace the IBID with a title: if previous_match: # Replace this IBID with the previous title match, if possible: rebuilt_line += add_tagged_journal_in_place_of_IBID(previous_match) series = previous_match['series'] # Update start position for next segment of original line: startpos = true_replacement_index + len(journal_info) + extras startpos = skip_ponctuation(reading_line, startpos) else: rebuilt_line = "" skip_numeration = True else: if ';' in standardised_titles[journal_info]: title, series = \ standardised_titles[journal_info].rsplit(';', 1) series = series.strip() previous_match = {'title': title, 'series': series} else: title = standardised_titles[journal_info] previous_match = {'title': title, 'series': None} # This is a normal title, not an IBID rebuilt_line += "<cds.JOURNAL>%s</cds.JOURNAL>" % title startpos = true_replacement_index + len(journal_info) + extras startpos = skip_ponctuation(reading_line, startpos) if not skip_numeration: # Check for numeration numeration_line = reading_line[startpos:] # First look for standard numeration numerotation_info = find_numeration(numeration_line) if not numerotation_info: numeration_line = rebuilt_line + " " + numeration_line # Now look for more funky numeration # With possibly some elements before the journal title numerotation_info = find_numeration_more(numeration_line) if not numerotation_info: startpos = old_startpos previous_match = old_previous_match rebuilt_line = "" else: if series and not numerotation_info['series']: numerotation_info['series'] = series startpos += numerotation_info['len'] rebuilt_line += create_numeration_tag(numerotation_info) previous_match['series'] = numerotation_info['series'] # return the rebuilt line-segment, the position (of the reading line) from # which the next part of the rebuilt line should be started, and the newly # updated previous match. return rebuilt_line, startpos, previous_match
python
def add_tagged_journal(reading_line, journal_info, previous_match, startpos, true_replacement_index, extras, standardised_titles): old_startpos = startpos old_previous_match = previous_match skip_numeration = False series = None def skip_ponctuation(line, pos): try: while line[pos] in (".", ":", "-", ")"): pos += 1 except IndexError: pass return pos rebuilt_line = reading_line[startpos:true_replacement_index] if journal_info.upper().find("IBID") != -1: if previous_match: rebuilt_line += add_tagged_journal_in_place_of_IBID(previous_match) series = previous_match['series'] startpos = true_replacement_index + len(journal_info) + extras startpos = skip_ponctuation(reading_line, startpos) else: rebuilt_line = "" skip_numeration = True else: if ';' in standardised_titles[journal_info]: title, series = \ standardised_titles[journal_info].rsplit(';', 1) series = series.strip() previous_match = {'title': title, 'series': series} else: title = standardised_titles[journal_info] previous_match = {'title': title, 'series': None} rebuilt_line += "<cds.JOURNAL>%s</cds.JOURNAL>" % title startpos = true_replacement_index + len(journal_info) + extras startpos = skip_ponctuation(reading_line, startpos) if not skip_numeration: numeration_line = reading_line[startpos:] numerotation_info = find_numeration(numeration_line) if not numerotation_info: numeration_line = rebuilt_line + " " + numeration_line numerotation_info = find_numeration_more(numeration_line) if not numerotation_info: startpos = old_startpos previous_match = old_previous_match rebuilt_line = "" else: if series and not numerotation_info['series']: numerotation_info['series'] = series startpos += numerotation_info['len'] rebuilt_line += create_numeration_tag(numerotation_info) previous_match['series'] = numerotation_info['series'] return rebuilt_line, startpos, previous_match
[ "def", "add_tagged_journal", "(", "reading_line", ",", "journal_info", ",", "previous_match", ",", "startpos", ",", "true_replacement_index", ",", "extras", ",", "standardised_titles", ")", ":", "old_startpos", "=", "startpos", "old_previous_match", "=", "previous_match", "skip_numeration", "=", "False", "series", "=", "None", "def", "skip_ponctuation", "(", "line", ",", "pos", ")", ":", "# Skip past any punctuation at the end of the replacement that was", "# just made:", "try", ":", "while", "line", "[", "pos", "]", "in", "(", "\".\"", ",", "\":\"", ",", "\"-\"", ",", "\")\"", ")", ":", "pos", "+=", "1", "except", "IndexError", ":", "# The match was at the very end of the line", "pass", "return", "pos", "# Fill 'rebuilt_line' (the segment of the line that is being rebuilt to", "# include the tagged and standardised periodical TITLE) with the contents", "# of the reading-line, up to the point of the matched TITLE:", "rebuilt_line", "=", "reading_line", "[", "startpos", ":", "true_replacement_index", "]", "# Test to see whether a title or an \"IBID\" was matched:", "if", "journal_info", ".", "upper", "(", ")", ".", "find", "(", "\"IBID\"", ")", "!=", "-", "1", ":", "# This is an IBID", "# Try to replace the IBID with a title:", "if", "previous_match", ":", "# Replace this IBID with the previous title match, if possible:", "rebuilt_line", "+=", "add_tagged_journal_in_place_of_IBID", "(", "previous_match", ")", "series", "=", "previous_match", "[", "'series'", "]", "# Update start position for next segment of original line:", "startpos", "=", "true_replacement_index", "+", "len", "(", "journal_info", ")", "+", "extras", "startpos", "=", "skip_ponctuation", "(", "reading_line", ",", "startpos", ")", "else", ":", "rebuilt_line", "=", "\"\"", "skip_numeration", "=", "True", "else", ":", "if", "';'", "in", "standardised_titles", "[", "journal_info", "]", ":", "title", ",", "series", "=", "standardised_titles", "[", "journal_info", "]", ".", "rsplit", "(", "';'", ",", "1", ")", "series", "=", "series", ".", "strip", "(", ")", "previous_match", "=", "{", "'title'", ":", "title", ",", "'series'", ":", "series", "}", "else", ":", "title", "=", "standardised_titles", "[", "journal_info", "]", "previous_match", "=", "{", "'title'", ":", "title", ",", "'series'", ":", "None", "}", "# This is a normal title, not an IBID", "rebuilt_line", "+=", "\"<cds.JOURNAL>%s</cds.JOURNAL>\"", "%", "title", "startpos", "=", "true_replacement_index", "+", "len", "(", "journal_info", ")", "+", "extras", "startpos", "=", "skip_ponctuation", "(", "reading_line", ",", "startpos", ")", "if", "not", "skip_numeration", ":", "# Check for numeration", "numeration_line", "=", "reading_line", "[", "startpos", ":", "]", "# First look for standard numeration", "numerotation_info", "=", "find_numeration", "(", "numeration_line", ")", "if", "not", "numerotation_info", ":", "numeration_line", "=", "rebuilt_line", "+", "\" \"", "+", "numeration_line", "# Now look for more funky numeration", "# With possibly some elements before the journal title", "numerotation_info", "=", "find_numeration_more", "(", "numeration_line", ")", "if", "not", "numerotation_info", ":", "startpos", "=", "old_startpos", "previous_match", "=", "old_previous_match", "rebuilt_line", "=", "\"\"", "else", ":", "if", "series", "and", "not", "numerotation_info", "[", "'series'", "]", ":", "numerotation_info", "[", "'series'", "]", "=", "series", "startpos", "+=", "numerotation_info", "[", "'len'", "]", "rebuilt_line", "+=", "create_numeration_tag", "(", "numerotation_info", ")", "previous_match", "[", "'series'", "]", "=", "numerotation_info", "[", "'series'", "]", "# return the rebuilt line-segment, the position (of the reading line) from", "# which the next part of the rebuilt line should be started, and the newly", "# updated previous match.", "return", "rebuilt_line", ",", "startpos", ",", "previous_match" ]
In rebuilding the line, add an identified periodical TITLE (standardised and tagged) into the line. @param reading_line: (string) The reference line before capitalization was performed, and before REPORT-NUMBERs and TITLEs were stripped out. @param len_title: (integer) the length of the matched TITLE. @param matched_title: (string) the matched TITLE text. @param previous_match: (dict) the previous periodical TITLE citation to have been matched in the current reference line. It is used when replacing an IBID instance in the line. @param startpos: (integer) the pointer to the next position in the reading-line from which to start rebuilding. @param true_replacement_index: (integer) the replacement index of the matched TITLE in the reading-line, with stripped punctuation and whitespace accounted for. @param extras: (integer) extras to be added into the replacement index. @param standardised_titles: (dictionary) the standardised versions of periodical titles, keyed by their various non-standard versions. @return: (tuple) containing a string (the rebuilt line segment), an integer (the next 'startpos' in the reading-line), and an other string (the newly updated previous-match).
[ "In", "rebuilding", "the", "line", "add", "an", "identified", "periodical", "TITLE", "(", "standardised", "and", "tagged", ")", "into", "the", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L582-L689
inspirehep/refextract
refextract/references/tag.py
add_tagged_publisher
def add_tagged_publisher(reading_line, matched_publisher, startpos, true_replacement_index, extras, kb_publishers): """In rebuilding the line, add an identified periodical TITLE (standardised and tagged) into the line. @param reading_line: (string) The reference line before capitalization was performed, and before REPORT-NUMBERs and TITLEs were stripped out. @param len_title: (integer) the length of the matched TITLE. @param matched_title: (string) the matched TITLE text. @param previous_match: (string) the previous periodical TITLE citation to have been matched in the current reference line. It is used when replacing an IBID instance in the line. @param startpos: (integer) the pointer to the next position in the reading-line from which to start rebuilding. @param true_replacement_index: (integer) the replacement index of the matched TITLE in the reading-line, with stripped punctuation and whitespace accounted for. @param extras: (integer) extras to be added into the replacement index. @param standardised_titles: (dictionary) the standardised versions of periodical titles, keyed by their various non-standard versions. @return: (tuple) containing a string (the rebuilt line segment), an integer (the next 'startpos' in the reading-line), and an other string (the newly updated previous-match). """ # Fill 'rebuilt_line' (the segment of the line that is being rebuilt to # include the tagged and standardised periodical TITLE) with the contents # of the reading-line, up to the point of the matched TITLE: rebuilt_line = reading_line[startpos:true_replacement_index] # This is a normal title, not an IBID rebuilt_line += "<cds.PUBLISHER>%(title)s</cds.PUBLISHER>" \ % {'title': kb_publishers[matched_publisher]['repl']} # Compute new start pos startpos = true_replacement_index + len(matched_publisher) + extras # return the rebuilt line-segment, the position (of the reading line) from # which the next part of the rebuilt line should be started, and the newly # updated previous match. return rebuilt_line, startpos
python
def add_tagged_publisher(reading_line, matched_publisher, startpos, true_replacement_index, extras, kb_publishers): rebuilt_line = reading_line[startpos:true_replacement_index] rebuilt_line += "<cds.PUBLISHER>%(title)s</cds.PUBLISHER>" \ % {'title': kb_publishers[matched_publisher]['repl']} startpos = true_replacement_index + len(matched_publisher) + extras return rebuilt_line, startpos
[ "def", "add_tagged_publisher", "(", "reading_line", ",", "matched_publisher", ",", "startpos", ",", "true_replacement_index", ",", "extras", ",", "kb_publishers", ")", ":", "# Fill 'rebuilt_line' (the segment of the line that is being rebuilt to", "# include the tagged and standardised periodical TITLE) with the contents", "# of the reading-line, up to the point of the matched TITLE:", "rebuilt_line", "=", "reading_line", "[", "startpos", ":", "true_replacement_index", "]", "# This is a normal title, not an IBID", "rebuilt_line", "+=", "\"<cds.PUBLISHER>%(title)s</cds.PUBLISHER>\"", "%", "{", "'title'", ":", "kb_publishers", "[", "matched_publisher", "]", "[", "'repl'", "]", "}", "# Compute new start pos", "startpos", "=", "true_replacement_index", "+", "len", "(", "matched_publisher", ")", "+", "extras", "# return the rebuilt line-segment, the position (of the reading line) from", "# which the next part of the rebuilt line should be started, and the newly", "# updated previous match.", "return", "rebuilt_line", ",", "startpos" ]
In rebuilding the line, add an identified periodical TITLE (standardised and tagged) into the line. @param reading_line: (string) The reference line before capitalization was performed, and before REPORT-NUMBERs and TITLEs were stripped out. @param len_title: (integer) the length of the matched TITLE. @param matched_title: (string) the matched TITLE text. @param previous_match: (string) the previous periodical TITLE citation to have been matched in the current reference line. It is used when replacing an IBID instance in the line. @param startpos: (integer) the pointer to the next position in the reading-line from which to start rebuilding. @param true_replacement_index: (integer) the replacement index of the matched TITLE in the reading-line, with stripped punctuation and whitespace accounted for. @param extras: (integer) extras to be added into the replacement index. @param standardised_titles: (dictionary) the standardised versions of periodical titles, keyed by their various non-standard versions. @return: (tuple) containing a string (the rebuilt line segment), an integer (the next 'startpos' in the reading-line), and an other string (the newly updated previous-match).
[ "In", "rebuilding", "the", "line", "add", "an", "identified", "periodical", "TITLE", "(", "standardised", "and", "tagged", ")", "into", "the", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L692-L733
inspirehep/refextract
refextract/references/tag.py
get_replacement_types
def get_replacement_types(titles, reportnumbers, publishers): """Given the indices of the titles and reportnumbers that have been recognised within a reference line, create a dictionary keyed by the replacement position in the line, where the value for each key is a string describing the type of item replaced at that position in the line. The description strings are: 'title' - indicating that the replacement is a periodical title 'reportnumber' - indicating that the replacement is a preprint report number. @param titles: (list) of locations in the string at which periodical titles were found. @param reportnumbers: (list) of locations in the string at which reportnumbers were found. @return: (dictionary) of replacement types at various locations within the string. """ rep_types = {} for item_idx in titles: rep_types[item_idx] = "journal" for item_idx in reportnumbers: rep_types[item_idx] = "reportnumber" for item_idx in publishers: rep_types[item_idx] = "publisher" return rep_types
python
def get_replacement_types(titles, reportnumbers, publishers): rep_types = {} for item_idx in titles: rep_types[item_idx] = "journal" for item_idx in reportnumbers: rep_types[item_idx] = "reportnumber" for item_idx in publishers: rep_types[item_idx] = "publisher" return rep_types
[ "def", "get_replacement_types", "(", "titles", ",", "reportnumbers", ",", "publishers", ")", ":", "rep_types", "=", "{", "}", "for", "item_idx", "in", "titles", ":", "rep_types", "[", "item_idx", "]", "=", "\"journal\"", "for", "item_idx", "in", "reportnumbers", ":", "rep_types", "[", "item_idx", "]", "=", "\"reportnumber\"", "for", "item_idx", "in", "publishers", ":", "rep_types", "[", "item_idx", "]", "=", "\"publisher\"", "return", "rep_types" ]
Given the indices of the titles and reportnumbers that have been recognised within a reference line, create a dictionary keyed by the replacement position in the line, where the value for each key is a string describing the type of item replaced at that position in the line. The description strings are: 'title' - indicating that the replacement is a periodical title 'reportnumber' - indicating that the replacement is a preprint report number. @param titles: (list) of locations in the string at which periodical titles were found. @param reportnumbers: (list) of locations in the string at which reportnumbers were found. @return: (dictionary) of replacement types at various locations within the string.
[ "Given", "the", "indices", "of", "the", "titles", "and", "reportnumbers", "that", "have", "been", "recognised", "within", "a", "reference", "line", "create", "a", "dictionary", "keyed", "by", "the", "replacement", "position", "in", "the", "line", "where", "the", "value", "for", "each", "key", "is", "a", "string", "describing", "the", "type", "of", "item", "replaced", "at", "that", "position", "in", "the", "line", ".", "The", "description", "strings", "are", ":", "title", "-", "indicating", "that", "the", "replacement", "is", "a", "periodical", "title", "reportnumber", "-", "indicating", "that", "the", "replacement", "is", "a", "preprint", "report", "number", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L736-L761
inspirehep/refextract
refextract/references/tag.py
account_for_stripped_whitespace
def account_for_stripped_whitespace(spaces_keys, removed_spaces, replacement_types, len_reportnums, journals_matches, replacement_index): """To build a processed (MARC XML) reference line in which the recognised citations such as standardised periodical TITLEs and REPORT-NUMBERs have been marked up, it is necessary to read from the reference line BEFORE all punctuation was stripped and it was made into upper-case. The indices of the cited items in this 'original line', however, will be different to those in the 'working-line', in which punctuation and multiple-spaces were stripped out. For example, the following reading-line: [26] E. Witten and S.-T. Yau, hep-th/9910245. ...becomes (after punctuation and multiple white-space stripping): [26] E WITTEN AND S T YAU HEP TH/9910245 It can be seen that the report-number citation (hep-th/9910245) is at a different index in the two strings. When refextract searches for this citation, it uses the 2nd string (i.e. that which is capitalised and has no punctuation). When it builds the MARC XML representation of the reference line, however, it needs to read from the first string. It must therefore consider the whitespace, punctuation, etc that has been removed, in order to get the correct index for the cited item. This function accounts for the stripped characters before a given TITLE or REPORT-NUMBER index. @param spaces_keys: (list) - the indices at which spaces were removed from the reference line. @param removed_spaces: (dictionary) - keyed by the indices at which spaces were removed from the line, the values are the number of spaces actually removed from that position. So, for example, "3 spaces were removed from position 25 in the line." @param replacement_types: (dictionary) - at each 'replacement_index' in the line, the of replacement to make (title or reportnumber). @param len_reportnums: (dictionary) - the lengths of the REPORT- NUMBERs matched at the various indices in the line. @param len_titles: (dictionary) - the lengths of the various TITLEs matched at the various indices in the line. @param replacement_index: (integer) - the index in the working line of the identified TITLE or REPORT-NUMBER citation. @return: (tuple) containing 2 elements: + the true replacement index of a replacement in the reading line; + any extras to add into the replacement index; """ extras = 0 true_replacement_index = replacement_index spare_replacement_index = replacement_index for space in spaces_keys: if space < true_replacement_index: # There were spaces stripped before the current replacement # Add the number of spaces removed from this location to the # current replacement index: true_replacement_index += removed_spaces[space] spare_replacement_index += removed_spaces[space] elif space >= spare_replacement_index and \ replacement_types[replacement_index] == u"journal" and \ space < (spare_replacement_index + len(journals_matches[replacement_index])): # A periodical title is being replaced. Account for multi-spaces # that may have been stripped from the title before its # recognition: spare_replacement_index += removed_spaces[space] extras += removed_spaces[space] elif space >= spare_replacement_index and \ replacement_types[replacement_index] == u"reportnumber" and \ space < (spare_replacement_index + len_reportnums[replacement_index]): # An institutional preprint report-number is being replaced. # Account for multi-spaces that may have been stripped from it # before its recognition: spare_replacement_index += removed_spaces[space] extras += removed_spaces[space] # return the new values for replacement indices with stripped # whitespace accounted for: return true_replacement_index, extras
python
def account_for_stripped_whitespace(spaces_keys, removed_spaces, replacement_types, len_reportnums, journals_matches, replacement_index): extras = 0 true_replacement_index = replacement_index spare_replacement_index = replacement_index for space in spaces_keys: if space < true_replacement_index: true_replacement_index += removed_spaces[space] spare_replacement_index += removed_spaces[space] elif space >= spare_replacement_index and \ replacement_types[replacement_index] == u"journal" and \ space < (spare_replacement_index + len(journals_matches[replacement_index])): spare_replacement_index += removed_spaces[space] extras += removed_spaces[space] elif space >= spare_replacement_index and \ replacement_types[replacement_index] == u"reportnumber" and \ space < (spare_replacement_index + len_reportnums[replacement_index]): spare_replacement_index += removed_spaces[space] extras += removed_spaces[space] return true_replacement_index, extras
[ "def", "account_for_stripped_whitespace", "(", "spaces_keys", ",", "removed_spaces", ",", "replacement_types", ",", "len_reportnums", ",", "journals_matches", ",", "replacement_index", ")", ":", "extras", "=", "0", "true_replacement_index", "=", "replacement_index", "spare_replacement_index", "=", "replacement_index", "for", "space", "in", "spaces_keys", ":", "if", "space", "<", "true_replacement_index", ":", "# There were spaces stripped before the current replacement", "# Add the number of spaces removed from this location to the", "# current replacement index:", "true_replacement_index", "+=", "removed_spaces", "[", "space", "]", "spare_replacement_index", "+=", "removed_spaces", "[", "space", "]", "elif", "space", ">=", "spare_replacement_index", "and", "replacement_types", "[", "replacement_index", "]", "==", "u\"journal\"", "and", "space", "<", "(", "spare_replacement_index", "+", "len", "(", "journals_matches", "[", "replacement_index", "]", ")", ")", ":", "# A periodical title is being replaced. Account for multi-spaces", "# that may have been stripped from the title before its", "# recognition:", "spare_replacement_index", "+=", "removed_spaces", "[", "space", "]", "extras", "+=", "removed_spaces", "[", "space", "]", "elif", "space", ">=", "spare_replacement_index", "and", "replacement_types", "[", "replacement_index", "]", "==", "u\"reportnumber\"", "and", "space", "<", "(", "spare_replacement_index", "+", "len_reportnums", "[", "replacement_index", "]", ")", ":", "# An institutional preprint report-number is being replaced.", "# Account for multi-spaces that may have been stripped from it", "# before its recognition:", "spare_replacement_index", "+=", "removed_spaces", "[", "space", "]", "extras", "+=", "removed_spaces", "[", "space", "]", "# return the new values for replacement indices with stripped", "# whitespace accounted for:", "return", "true_replacement_index", ",", "extras" ]
To build a processed (MARC XML) reference line in which the recognised citations such as standardised periodical TITLEs and REPORT-NUMBERs have been marked up, it is necessary to read from the reference line BEFORE all punctuation was stripped and it was made into upper-case. The indices of the cited items in this 'original line', however, will be different to those in the 'working-line', in which punctuation and multiple-spaces were stripped out. For example, the following reading-line: [26] E. Witten and S.-T. Yau, hep-th/9910245. ...becomes (after punctuation and multiple white-space stripping): [26] E WITTEN AND S T YAU HEP TH/9910245 It can be seen that the report-number citation (hep-th/9910245) is at a different index in the two strings. When refextract searches for this citation, it uses the 2nd string (i.e. that which is capitalised and has no punctuation). When it builds the MARC XML representation of the reference line, however, it needs to read from the first string. It must therefore consider the whitespace, punctuation, etc that has been removed, in order to get the correct index for the cited item. This function accounts for the stripped characters before a given TITLE or REPORT-NUMBER index. @param spaces_keys: (list) - the indices at which spaces were removed from the reference line. @param removed_spaces: (dictionary) - keyed by the indices at which spaces were removed from the line, the values are the number of spaces actually removed from that position. So, for example, "3 spaces were removed from position 25 in the line." @param replacement_types: (dictionary) - at each 'replacement_index' in the line, the of replacement to make (title or reportnumber). @param len_reportnums: (dictionary) - the lengths of the REPORT- NUMBERs matched at the various indices in the line. @param len_titles: (dictionary) - the lengths of the various TITLEs matched at the various indices in the line. @param replacement_index: (integer) - the index in the working line of the identified TITLE or REPORT-NUMBER citation. @return: (tuple) containing 2 elements: + the true replacement index of a replacement in the reading line; + any extras to add into the replacement index;
[ "To", "build", "a", "processed", "(", "MARC", "XML", ")", "reference", "line", "in", "which", "the", "recognised", "citations", "such", "as", "standardised", "periodical", "TITLEs", "and", "REPORT", "-", "NUMBERs", "have", "been", "marked", "up", "it", "is", "necessary", "to", "read", "from", "the", "reference", "line", "BEFORE", "all", "punctuation", "was", "stripped", "and", "it", "was", "made", "into", "upper", "-", "case", ".", "The", "indices", "of", "the", "cited", "items", "in", "this", "original", "line", "however", "will", "be", "different", "to", "those", "in", "the", "working", "-", "line", "in", "which", "punctuation", "and", "multiple", "-", "spaces", "were", "stripped", "out", ".", "For", "example", "the", "following", "reading", "-", "line", ":" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L764-L844
inspirehep/refextract
refextract/references/tag.py
identify_and_tag_collaborations
def identify_and_tag_collaborations(line, collaborations_kb): """Given a line where Authors have been tagged, and all other tags and content has been replaced with underscores, go through and try to identify extra items of data which should be placed into 'h' subfields. Later on, these tagged pieces of information will be merged into the content of the most recently found author. This is separated from the author tagging procedure since separate tags can be used, which won't influence the reference splitting heuristics (used when looking at mulitple <AUTH> tags in a line). """ for dummy_collab, re_collab in collaborations_kb.iteritems(): matches = re_collab.finditer(strip_tags(line)) for match in reversed(list(matches)): line = line[:match.start()] \ + CFG_REFEXTRACT_MARKER_OPENING_COLLABORATION \ + match.group(1).strip(".,:;- [](){}") \ + CFG_REFEXTRACT_MARKER_CLOSING_COLLABORATION \ + line[match.end():] return line
python
def identify_and_tag_collaborations(line, collaborations_kb): for dummy_collab, re_collab in collaborations_kb.iteritems(): matches = re_collab.finditer(strip_tags(line)) for match in reversed(list(matches)): line = line[:match.start()] \ + CFG_REFEXTRACT_MARKER_OPENING_COLLABORATION \ + match.group(1).strip(".,:;- [](){}") \ + CFG_REFEXTRACT_MARKER_CLOSING_COLLABORATION \ + line[match.end():] return line
[ "def", "identify_and_tag_collaborations", "(", "line", ",", "collaborations_kb", ")", ":", "for", "dummy_collab", ",", "re_collab", "in", "collaborations_kb", ".", "iteritems", "(", ")", ":", "matches", "=", "re_collab", ".", "finditer", "(", "strip_tags", "(", "line", ")", ")", "for", "match", "in", "reversed", "(", "list", "(", "matches", ")", ")", ":", "line", "=", "line", "[", ":", "match", ".", "start", "(", ")", "]", "+", "CFG_REFEXTRACT_MARKER_OPENING_COLLABORATION", "+", "match", ".", "group", "(", "1", ")", ".", "strip", "(", "\".,:;- [](){}\"", ")", "+", "CFG_REFEXTRACT_MARKER_CLOSING_COLLABORATION", "+", "line", "[", "match", ".", "end", "(", ")", ":", "]", "return", "line" ]
Given a line where Authors have been tagged, and all other tags and content has been replaced with underscores, go through and try to identify extra items of data which should be placed into 'h' subfields. Later on, these tagged pieces of information will be merged into the content of the most recently found author. This is separated from the author tagging procedure since separate tags can be used, which won't influence the reference splitting heuristics (used when looking at mulitple <AUTH> tags in a line).
[ "Given", "a", "line", "where", "Authors", "have", "been", "tagged", "and", "all", "other", "tags", "and", "content", "has", "been", "replaced", "with", "underscores", "go", "through", "and", "try", "to", "identify", "extra", "items", "of", "data", "which", "should", "be", "placed", "into", "h", "subfields", ".", "Later", "on", "these", "tagged", "pieces", "of", "information", "will", "be", "merged", "into", "the", "content", "of", "the", "most", "recently", "found", "author", ".", "This", "is", "separated", "from", "the", "author", "tagging", "procedure", "since", "separate", "tags", "can", "be", "used", "which", "won", "t", "influence", "the", "reference", "splitting", "heuristics", "(", "used", "when", "looking", "at", "mulitple", "<AUTH", ">", "tags", "in", "a", "line", ")", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L860-L881
inspirehep/refextract
refextract/references/tag.py
identify_and_tag_authors
def identify_and_tag_authors(line, authors_kb): """Given a reference, look for a group of author names, place tags around the author group, return the newly tagged line. """ re_auth, re_auth_near_miss = get_author_regexps() # Replace authors which do not convert well from utf-8 for pattern, repl in authors_kb: line = line.replace(pattern, repl) output_line = line # We matched authors here line = strip_tags(output_line) matched_authors = list(re_auth.finditer(line)) # We try to have better results by unidecoding unidecoded_line = strip_tags(unidecode(output_line)) matched_authors_unidecode = list(re_auth.finditer(unidecoded_line)) if len(matched_authors_unidecode) > len(matched_authors): output_line = unidecode(output_line) matched_authors = matched_authors_unidecode # If there is at least one matched author group if matched_authors: matched_positions = [] preceeding_text_string = line preceeding_text_start = 0 for auth_no, match in enumerate(matched_authors): # Only if there are no underscores or closing arrows found in the matched author group # This must be checked for here, as it cannot be applied to the re without clashing with # other Unicode characters if line[match.start():match.end()].find("_") == -1: # Has the group with name 'et' (for 'et al') been found in the pattern? # Has the group with name 'es' (for ed. before the author) been found in the pattern? # Has the group with name 'ee' (for ed. after the author) been # found in the pattern? matched_positions.append({ 'start': match.start(), 'end': match.end(), 'etal': match.group('et') or match.group('et2'), 'ed_start': match.group('es'), 'ed_end': match.group('ee'), 'multi_auth': match.group('multi_auth'), 'multi_surs': match.group('multi_surs'), 'text_before': preceeding_text_string[preceeding_text_start:match.start()], 'auth_no': auth_no, 'author_names': match.group('author_names') }) # Save the end of the match, from where to snip the misc text # found before an author match preceeding_text_start = match.end() # Work backwards to avoid index problems when adding AUTH tags matched_positions.reverse() for m in matched_positions: dump_in_misc = False start = m['start'] end = m['end'] # Check the text before the current match to see if it has a bad # 'et al' lower_text_before = m['text_before'].strip().lower() for e in etal_matches: if lower_text_before.endswith(e): # If so, this author match is likely to be a bad match on a # missed title dump_in_misc = True break # An AND found here likely indicates a missed author before this text # Thus, triggers weaker author searching, within the previous misc text # (Check the text before the current match to see if it has a bad 'and') # A bad 'and' will only be denoted as such if there exists only one author after it # and the author group is legit (not to be dumped in misc) if not dump_in_misc and not (m['multi_auth'] or m['multi_surs']) \ and (lower_text_before.endswith(' and')): # Search using a weaker author pattern to try and find the # missed author(s) (cut away the end 'and') weaker_match = re_auth_near_miss.match(m['text_before']) if weaker_match and not (weaker_match.group('es') or weaker_match.group('ee')): # Change the start of the author group to include this new # author group start = start - \ (len(m['text_before']) - weaker_match.start()) # Still no match, do not add tags for this author match.. dump # it into misc else: dump_in_misc = True add_to_misc = "" # If a semi-colon was found at the end of this author group, keep it in misc # so that it can be looked at for splitting heurisitics if len(output_line) > m['end']: if output_line[m['end']].strip(" ,.") == ';': add_to_misc = ';' # Standardize eds. notation tmp_output_line = re.sub(re_ed_notation, '(ed.)', output_line[start:end], re.IGNORECASE) # Standardize et al. notation tmp_output_line = re.sub(re_etal, 'et al.', tmp_output_line, re.IGNORECASE) # Strip tmp_output_line = tmp_output_line.lstrip('.').strip(",:;- [](") if not tmp_output_line.endswith('(ed.)'): tmp_output_line = tmp_output_line.strip(')') # ONLY wrap author data with tags IF there is no evidence that it is an # ed. author. (i.e. The author is not referred to as an editor) # Does this author group string have 'et al.'? if m['etal'] and not (m['ed_start'] or m['ed_end'] or dump_in_misc): output_line = output_line[:start] \ + "<cds.AUTHetal>" \ + tmp_output_line \ + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL \ + add_to_misc \ + output_line[end:] elif not (m['ed_start'] or m['ed_end'] or dump_in_misc): # Insert the std (standard) tag output_line = output_line[:start] \ + "<cds.AUTHstnd>" \ + tmp_output_line \ + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND \ + add_to_misc \ + output_line[end:] # Apply the 'include in $h' method to author groups marked as # editors elif m['ed_start'] or m['ed_end']: ed_notation = " (eds.)" # Standardize et al. notation tmp_output_line = re.sub(re_etal, 'et al.', m['author_names'], re.IGNORECASE) # remove any characters which denote this author group # to be editors, just take the # author names, and append '(ed.)' output_line = output_line[:start] \ + "<cds.AUTHincl>" \ + tmp_output_line.strip(",:;- [](") \ + ed_notation \ + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL \ + add_to_misc \ + output_line[end:] return output_line
python
def identify_and_tag_authors(line, authors_kb): re_auth, re_auth_near_miss = get_author_regexps() for pattern, repl in authors_kb: line = line.replace(pattern, repl) output_line = line line = strip_tags(output_line) matched_authors = list(re_auth.finditer(line)) unidecoded_line = strip_tags(unidecode(output_line)) matched_authors_unidecode = list(re_auth.finditer(unidecoded_line)) if len(matched_authors_unidecode) > len(matched_authors): output_line = unidecode(output_line) matched_authors = matched_authors_unidecode if matched_authors: matched_positions = [] preceeding_text_string = line preceeding_text_start = 0 for auth_no, match in enumerate(matched_authors): if line[match.start():match.end()].find("_") == -1: matched_positions.append({ 'start': match.start(), 'end': match.end(), 'etal': match.group('et') or match.group('et2'), 'ed_start': match.group('es'), 'ed_end': match.group('ee'), 'multi_auth': match.group('multi_auth'), 'multi_surs': match.group('multi_surs'), 'text_before': preceeding_text_string[preceeding_text_start:match.start()], 'auth_no': auth_no, 'author_names': match.group('author_names') }) preceeding_text_start = match.end() matched_positions.reverse() for m in matched_positions: dump_in_misc = False start = m['start'] end = m['end'] lower_text_before = m['text_before'].strip().lower() for e in etal_matches: if lower_text_before.endswith(e): dump_in_misc = True break if not dump_in_misc and not (m['multi_auth'] or m['multi_surs']) \ and (lower_text_before.endswith(' and')): weaker_match = re_auth_near_miss.match(m['text_before']) if weaker_match and not (weaker_match.group('es') or weaker_match.group('ee')): start = start - \ (len(m['text_before']) - weaker_match.start()) else: dump_in_misc = True add_to_misc = "" if len(output_line) > m['end']: if output_line[m['end']].strip(" ,.") == ';': add_to_misc = ';' tmp_output_line = re.sub(re_ed_notation, '(ed.)', output_line[start:end], re.IGNORECASE) tmp_output_line = re.sub(re_etal, 'et al.', tmp_output_line, re.IGNORECASE) tmp_output_line = tmp_output_line.lstrip('.').strip(",:;- [](") if not tmp_output_line.endswith('(ed.)'): tmp_output_line = tmp_output_line.strip(')') if m['etal'] and not (m['ed_start'] or m['ed_end'] or dump_in_misc): output_line = output_line[:start] \ + "<cds.AUTHetal>" \ + tmp_output_line \ + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL \ + add_to_misc \ + output_line[end:] elif not (m['ed_start'] or m['ed_end'] or dump_in_misc): output_line = output_line[:start] \ + "<cds.AUTHstnd>" \ + tmp_output_line \ + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND \ + add_to_misc \ + output_line[end:] elif m['ed_start'] or m['ed_end']: ed_notation = " (eds.)" tmp_output_line = re.sub(re_etal, 'et al.', m['author_names'], re.IGNORECASE) output_line = output_line[:start] \ + "<cds.AUTHincl>" \ + tmp_output_line.strip(",:;- [](") \ + ed_notation \ + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL \ + add_to_misc \ + output_line[end:] return output_line
[ "def", "identify_and_tag_authors", "(", "line", ",", "authors_kb", ")", ":", "re_auth", ",", "re_auth_near_miss", "=", "get_author_regexps", "(", ")", "# Replace authors which do not convert well from utf-8", "for", "pattern", ",", "repl", "in", "authors_kb", ":", "line", "=", "line", ".", "replace", "(", "pattern", ",", "repl", ")", "output_line", "=", "line", "# We matched authors here", "line", "=", "strip_tags", "(", "output_line", ")", "matched_authors", "=", "list", "(", "re_auth", ".", "finditer", "(", "line", ")", ")", "# We try to have better results by unidecoding", "unidecoded_line", "=", "strip_tags", "(", "unidecode", "(", "output_line", ")", ")", "matched_authors_unidecode", "=", "list", "(", "re_auth", ".", "finditer", "(", "unidecoded_line", ")", ")", "if", "len", "(", "matched_authors_unidecode", ")", ">", "len", "(", "matched_authors", ")", ":", "output_line", "=", "unidecode", "(", "output_line", ")", "matched_authors", "=", "matched_authors_unidecode", "# If there is at least one matched author group", "if", "matched_authors", ":", "matched_positions", "=", "[", "]", "preceeding_text_string", "=", "line", "preceeding_text_start", "=", "0", "for", "auth_no", ",", "match", "in", "enumerate", "(", "matched_authors", ")", ":", "# Only if there are no underscores or closing arrows found in the matched author group", "# This must be checked for here, as it cannot be applied to the re without clashing with", "# other Unicode characters", "if", "line", "[", "match", ".", "start", "(", ")", ":", "match", ".", "end", "(", ")", "]", ".", "find", "(", "\"_\"", ")", "==", "-", "1", ":", "# Has the group with name 'et' (for 'et al') been found in the pattern?", "# Has the group with name 'es' (for ed. before the author) been found in the pattern?", "# Has the group with name 'ee' (for ed. after the author) been", "# found in the pattern?", "matched_positions", ".", "append", "(", "{", "'start'", ":", "match", ".", "start", "(", ")", ",", "'end'", ":", "match", ".", "end", "(", ")", ",", "'etal'", ":", "match", ".", "group", "(", "'et'", ")", "or", "match", ".", "group", "(", "'et2'", ")", ",", "'ed_start'", ":", "match", ".", "group", "(", "'es'", ")", ",", "'ed_end'", ":", "match", ".", "group", "(", "'ee'", ")", ",", "'multi_auth'", ":", "match", ".", "group", "(", "'multi_auth'", ")", ",", "'multi_surs'", ":", "match", ".", "group", "(", "'multi_surs'", ")", ",", "'text_before'", ":", "preceeding_text_string", "[", "preceeding_text_start", ":", "match", ".", "start", "(", ")", "]", ",", "'auth_no'", ":", "auth_no", ",", "'author_names'", ":", "match", ".", "group", "(", "'author_names'", ")", "}", ")", "# Save the end of the match, from where to snip the misc text", "# found before an author match", "preceeding_text_start", "=", "match", ".", "end", "(", ")", "# Work backwards to avoid index problems when adding AUTH tags", "matched_positions", ".", "reverse", "(", ")", "for", "m", "in", "matched_positions", ":", "dump_in_misc", "=", "False", "start", "=", "m", "[", "'start'", "]", "end", "=", "m", "[", "'end'", "]", "# Check the text before the current match to see if it has a bad", "# 'et al'", "lower_text_before", "=", "m", "[", "'text_before'", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "e", "in", "etal_matches", ":", "if", "lower_text_before", ".", "endswith", "(", "e", ")", ":", "# If so, this author match is likely to be a bad match on a", "# missed title", "dump_in_misc", "=", "True", "break", "# An AND found here likely indicates a missed author before this text", "# Thus, triggers weaker author searching, within the previous misc text", "# (Check the text before the current match to see if it has a bad 'and')", "# A bad 'and' will only be denoted as such if there exists only one author after it", "# and the author group is legit (not to be dumped in misc)", "if", "not", "dump_in_misc", "and", "not", "(", "m", "[", "'multi_auth'", "]", "or", "m", "[", "'multi_surs'", "]", ")", "and", "(", "lower_text_before", ".", "endswith", "(", "' and'", ")", ")", ":", "# Search using a weaker author pattern to try and find the", "# missed author(s) (cut away the end 'and')", "weaker_match", "=", "re_auth_near_miss", ".", "match", "(", "m", "[", "'text_before'", "]", ")", "if", "weaker_match", "and", "not", "(", "weaker_match", ".", "group", "(", "'es'", ")", "or", "weaker_match", ".", "group", "(", "'ee'", ")", ")", ":", "# Change the start of the author group to include this new", "# author group", "start", "=", "start", "-", "(", "len", "(", "m", "[", "'text_before'", "]", ")", "-", "weaker_match", ".", "start", "(", ")", ")", "# Still no match, do not add tags for this author match.. dump", "# it into misc", "else", ":", "dump_in_misc", "=", "True", "add_to_misc", "=", "\"\"", "# If a semi-colon was found at the end of this author group, keep it in misc", "# so that it can be looked at for splitting heurisitics", "if", "len", "(", "output_line", ")", ">", "m", "[", "'end'", "]", ":", "if", "output_line", "[", "m", "[", "'end'", "]", "]", ".", "strip", "(", "\" ,.\"", ")", "==", "';'", ":", "add_to_misc", "=", "';'", "# Standardize eds. notation", "tmp_output_line", "=", "re", ".", "sub", "(", "re_ed_notation", ",", "'(ed.)'", ",", "output_line", "[", "start", ":", "end", "]", ",", "re", ".", "IGNORECASE", ")", "# Standardize et al. notation", "tmp_output_line", "=", "re", ".", "sub", "(", "re_etal", ",", "'et al.'", ",", "tmp_output_line", ",", "re", ".", "IGNORECASE", ")", "# Strip", "tmp_output_line", "=", "tmp_output_line", ".", "lstrip", "(", "'.'", ")", ".", "strip", "(", "\",:;- [](\"", ")", "if", "not", "tmp_output_line", ".", "endswith", "(", "'(ed.)'", ")", ":", "tmp_output_line", "=", "tmp_output_line", ".", "strip", "(", "')'", ")", "# ONLY wrap author data with tags IF there is no evidence that it is an", "# ed. author. (i.e. The author is not referred to as an editor)", "# Does this author group string have 'et al.'?", "if", "m", "[", "'etal'", "]", "and", "not", "(", "m", "[", "'ed_start'", "]", "or", "m", "[", "'ed_end'", "]", "or", "dump_in_misc", ")", ":", "output_line", "=", "output_line", "[", ":", "start", "]", "+", "\"<cds.AUTHetal>\"", "+", "tmp_output_line", "+", "CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL", "+", "add_to_misc", "+", "output_line", "[", "end", ":", "]", "elif", "not", "(", "m", "[", "'ed_start'", "]", "or", "m", "[", "'ed_end'", "]", "or", "dump_in_misc", ")", ":", "# Insert the std (standard) tag", "output_line", "=", "output_line", "[", ":", "start", "]", "+", "\"<cds.AUTHstnd>\"", "+", "tmp_output_line", "+", "CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND", "+", "add_to_misc", "+", "output_line", "[", "end", ":", "]", "# Apply the 'include in $h' method to author groups marked as", "# editors", "elif", "m", "[", "'ed_start'", "]", "or", "m", "[", "'ed_end'", "]", ":", "ed_notation", "=", "\" (eds.)\"", "# Standardize et al. notation", "tmp_output_line", "=", "re", ".", "sub", "(", "re_etal", ",", "'et al.'", ",", "m", "[", "'author_names'", "]", ",", "re", ".", "IGNORECASE", ")", "# remove any characters which denote this author group", "# to be editors, just take the", "# author names, and append '(ed.)'", "output_line", "=", "output_line", "[", ":", "start", "]", "+", "\"<cds.AUTHincl>\"", "+", "tmp_output_line", ".", "strip", "(", "\",:;- [](\"", ")", "+", "ed_notation", "+", "CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL", "+", "add_to_misc", "+", "output_line", "[", "end", ":", "]", "return", "output_line" ]
Given a reference, look for a group of author names, place tags around the author group, return the newly tagged line.
[ "Given", "a", "reference", "look", "for", "a", "group", "of", "author", "names", "place", "tags", "around", "the", "author", "group", "return", "the", "newly", "tagged", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L884-L1028
inspirehep/refextract
refextract/references/tag.py
sum_2_dictionaries
def sum_2_dictionaries(dicta, dictb): """Given two dictionaries of totals, where each total refers to a key in the dictionary, add the totals. E.g.: dicta = { 'a' : 3, 'b' : 1 } dictb = { 'a' : 1, 'c' : 5 } dicta + dictb = { 'a' : 4, 'b' : 1, 'c' : 5 } @param dicta: (dictionary) @param dictb: (dictionary) @return: (dictionary) - the sum of the 2 dictionaries """ dict_out = dicta.copy() for key in dictb.keys(): if 'key' in dict_out: # Add the sum for key in dictb to that of dict_out: dict_out[key] += dictb[key] else: # the key is not in the first dictionary - add it directly: dict_out[key] = dictb[key] return dict_out
python
def sum_2_dictionaries(dicta, dictb): dict_out = dicta.copy() for key in dictb.keys(): if 'key' in dict_out: dict_out[key] += dictb[key] else: dict_out[key] = dictb[key] return dict_out
[ "def", "sum_2_dictionaries", "(", "dicta", ",", "dictb", ")", ":", "dict_out", "=", "dicta", ".", "copy", "(", ")", "for", "key", "in", "dictb", ".", "keys", "(", ")", ":", "if", "'key'", "in", "dict_out", ":", "# Add the sum for key in dictb to that of dict_out:", "dict_out", "[", "key", "]", "+=", "dictb", "[", "key", "]", "else", ":", "# the key is not in the first dictionary - add it directly:", "dict_out", "[", "key", "]", "=", "dictb", "[", "key", "]", "return", "dict_out" ]
Given two dictionaries of totals, where each total refers to a key in the dictionary, add the totals. E.g.: dicta = { 'a' : 3, 'b' : 1 } dictb = { 'a' : 1, 'c' : 5 } dicta + dictb = { 'a' : 4, 'b' : 1, 'c' : 5 } @param dicta: (dictionary) @param dictb: (dictionary) @return: (dictionary) - the sum of the 2 dictionaries
[ "Given", "two", "dictionaries", "of", "totals", "where", "each", "total", "refers", "to", "a", "key", "in", "the", "dictionary", "add", "the", "totals", ".", "E", ".", "g", ".", ":", "dicta", "=", "{", "a", ":", "3", "b", ":", "1", "}", "dictb", "=", "{", "a", ":", "1", "c", ":", "5", "}", "dicta", "+", "dictb", "=", "{", "a", ":", "4", "b", ":", "1", "c", ":", "5", "}" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1031-L1049
inspirehep/refextract
refextract/references/tag.py
identify_ibids
def identify_ibids(line): """Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed """ ibid_match_txt = {} # Record details of each matched ibid: for m_ibid in re_ibid.finditer(line): ibid_match_txt[m_ibid.start()] = m_ibid.group(0) # Replace matched text in line with underscores: line = line[0:m_ibid.start()] + \ "_" * len(m_ibid.group(0)) + \ line[m_ibid.end():] return ibid_match_txt, line
python
def identify_ibids(line): ibid_match_txt = {} for m_ibid in re_ibid.finditer(line): ibid_match_txt[m_ibid.start()] = m_ibid.group(0) line = line[0:m_ibid.start()] + \ "_" * len(m_ibid.group(0)) + \ line[m_ibid.end():] return ibid_match_txt, line
[ "def", "identify_ibids", "(", "line", ")", ":", "ibid_match_txt", "=", "{", "}", "# Record details of each matched ibid:", "for", "m_ibid", "in", "re_ibid", ".", "finditer", "(", "line", ")", ":", "ibid_match_txt", "[", "m_ibid", ".", "start", "(", ")", "]", "=", "m_ibid", ".", "group", "(", "0", ")", "# Replace matched text in line with underscores:", "line", "=", "line", "[", "0", ":", "m_ibid", ".", "start", "(", ")", "]", "+", "\"_\"", "*", "len", "(", "m_ibid", ".", "group", "(", "0", ")", ")", "+", "line", "[", "m_ibid", ".", "end", "(", ")", ":", "]", "return", "ibid_match_txt", ",", "line" ]
Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed
[ "Find", "IBIDs", "within", "the", "line", "record", "their", "position", "and", "length", "and", "replace", "them", "with", "underscores", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1052-L1070
inspirehep/refextract
refextract/references/tag.py
find_numeration
def find_numeration(line): """Given a reference line, attempt to locate instances of citation 'numeration' in the line. @param line: (string) the reference line. @return: (string) the reference line after numeration has been checked and possibly recognized/marked-up. """ patterns = ( # vol,page,year re_numeration_vol_page_yr, re_numeration_vol_nucphys_page_yr, re_numeration_nucphys_vol_page_yr, # With sub volume re_numeration_vol_subvol_nucphys_yr_page, re_numeration_vol_nucphys_yr_subvol_page, # vol,year,page re_numeration_vol_yr_page, re_numeration_nucphys_vol_yr_page, re_numeration_vol_nucphys_series_yr_page, # vol,page,year re_numeration_vol_series_nucphys_page_yr, re_numeration_vol_nucphys_series_page_yr, # year,vol,page re_numeration_yr_vol_page, ) for pattern in patterns: match = pattern.match(line) if match: info = match.groupdict() series = info.get('series', None) if not series: series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': match.end()} return None
python
def find_numeration(line): patterns = ( re_numeration_vol_page_yr, re_numeration_vol_nucphys_page_yr, re_numeration_nucphys_vol_page_yr, re_numeration_vol_subvol_nucphys_yr_page, re_numeration_vol_nucphys_yr_subvol_page, re_numeration_vol_yr_page, re_numeration_nucphys_vol_yr_page, re_numeration_vol_nucphys_series_yr_page, re_numeration_vol_series_nucphys_page_yr, re_numeration_vol_nucphys_series_page_yr, re_numeration_yr_vol_page, ) for pattern in patterns: match = pattern.match(line) if match: info = match.groupdict() series = info.get('series', None) if not series: series = extract_series_from_volume(info['vol']) if not info['vol_num']: info['vol_num'] = info['vol_num_alt'] if not info['vol_num']: info['vol_num'] = info['vol_num_alt2'] return {'year': info.get('year', None), 'series': series, 'volume': info['vol_num'], 'page': info['page'] or info['jinst_page'], 'page_end': info['page_end'], 'len': match.end()} return None
[ "def", "find_numeration", "(", "line", ")", ":", "patterns", "=", "(", "# vol,page,year", "re_numeration_vol_page_yr", ",", "re_numeration_vol_nucphys_page_yr", ",", "re_numeration_nucphys_vol_page_yr", ",", "# With sub volume", "re_numeration_vol_subvol_nucphys_yr_page", ",", "re_numeration_vol_nucphys_yr_subvol_page", ",", "# vol,year,page", "re_numeration_vol_yr_page", ",", "re_numeration_nucphys_vol_yr_page", ",", "re_numeration_vol_nucphys_series_yr_page", ",", "# vol,page,year", "re_numeration_vol_series_nucphys_page_yr", ",", "re_numeration_vol_nucphys_series_page_yr", ",", "# year,vol,page", "re_numeration_yr_vol_page", ",", ")", "for", "pattern", "in", "patterns", ":", "match", "=", "pattern", ".", "match", "(", "line", ")", "if", "match", ":", "info", "=", "match", ".", "groupdict", "(", ")", "series", "=", "info", ".", "get", "(", "'series'", ",", "None", ")", "if", "not", "series", ":", "series", "=", "extract_series_from_volume", "(", "info", "[", "'vol'", "]", ")", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt'", "]", "if", "not", "info", "[", "'vol_num'", "]", ":", "info", "[", "'vol_num'", "]", "=", "info", "[", "'vol_num_alt2'", "]", "return", "{", "'year'", ":", "info", ".", "get", "(", "'year'", ",", "None", ")", ",", "'series'", ":", "series", ",", "'volume'", ":", "info", "[", "'vol_num'", "]", ",", "'page'", ":", "info", "[", "'page'", "]", "or", "info", "[", "'jinst_page'", "]", ",", "'page_end'", ":", "info", "[", "'page_end'", "]", ",", "'len'", ":", "match", ".", "end", "(", ")", "}", "return", "None" ]
Given a reference line, attempt to locate instances of citation 'numeration' in the line. @param line: (string) the reference line. @return: (string) the reference line after numeration has been checked and possibly recognized/marked-up.
[ "Given", "a", "reference", "line", "attempt", "to", "locate", "instances", "of", "citation", "numeration", "in", "the", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1083-L1127
inspirehep/refextract
refextract/references/tag.py
identify_journals
def identify_journals(line, kb_journals): """Attempt to identify all periodical titles in a reference line. Titles will be identified, their information (location in line, length in line, and non-standardised version) will be recorded, and they will be replaced in the working line by underscores. @param line: (string) - the working reference line. @param periodical_title_search_kb: (dictionary) - contains the regexp patterns used to search for a non-standard TITLE in the working reference line. Keyed by the TITLE string itself. @param periodical_title_search_keys: (list) - contains the non- standard periodical TITLEs to be searched for in the line. This list of titles has already been ordered and is used to force the order of searching. @return: (tuple) containing 4 elements: + (dictionary) - the lengths of all titles matched at each given index within the line. + (dictionary) - the text actually matched for each title at each given index within the line. + (string) - the working line, with the titles removed from it and replaced by underscores. + (dictionary) - the totals for each bad-title found in the line. """ periodical_title_search_kb = kb_journals[0] periodical_title_search_keys = kb_journals[2] title_matches = {} # the text matched at the given line # location (i.e. the title itself) titles_count = {} # sum totals of each 'bad title found in # line. # Begin searching: for title in periodical_title_search_keys: # search for all instances of the current periodical title # in the line: # for each matched periodical title: for title_match in periodical_title_search_kb[title].finditer(line): if title not in titles_count: # Add this title into the titles_count dictionary: titles_count[title] = 1 else: # Add 1 to the count for the given title: titles_count[title] += 1 # record the details of this title match: # record the match length: title_matches[title_match.start()] = title len_to_replace = len(title) # replace the matched title text in the line it n * '_', # where n is the length of the matched title: line = u"".join((line[:title_match.start()], u"_" * len_to_replace, line[title_match.start() + len_to_replace:])) # return recorded information about matched periodical titles, # along with the newly changed working line: return title_matches, line, titles_count
python
def identify_journals(line, kb_journals): periodical_title_search_kb = kb_journals[0] periodical_title_search_keys = kb_journals[2] title_matches = {} titles_count = {} for title in periodical_title_search_keys: for title_match in periodical_title_search_kb[title].finditer(line): if title not in titles_count: titles_count[title] = 1 else: titles_count[title] += 1 title_matches[title_match.start()] = title len_to_replace = len(title) line = u"".join((line[:title_match.start()], u"_" * len_to_replace, line[title_match.start() + len_to_replace:])) return title_matches, line, titles_count
[ "def", "identify_journals", "(", "line", ",", "kb_journals", ")", ":", "periodical_title_search_kb", "=", "kb_journals", "[", "0", "]", "periodical_title_search_keys", "=", "kb_journals", "[", "2", "]", "title_matches", "=", "{", "}", "# the text matched at the given line", "# location (i.e. the title itself)", "titles_count", "=", "{", "}", "# sum totals of each 'bad title found in", "# line.", "# Begin searching:", "for", "title", "in", "periodical_title_search_keys", ":", "# search for all instances of the current periodical title", "# in the line:", "# for each matched periodical title:", "for", "title_match", "in", "periodical_title_search_kb", "[", "title", "]", ".", "finditer", "(", "line", ")", ":", "if", "title", "not", "in", "titles_count", ":", "# Add this title into the titles_count dictionary:", "titles_count", "[", "title", "]", "=", "1", "else", ":", "# Add 1 to the count for the given title:", "titles_count", "[", "title", "]", "+=", "1", "# record the details of this title match:", "# record the match length:", "title_matches", "[", "title_match", ".", "start", "(", ")", "]", "=", "title", "len_to_replace", "=", "len", "(", "title", ")", "# replace the matched title text in the line it n * '_',", "# where n is the length of the matched title:", "line", "=", "u\"\"", ".", "join", "(", "(", "line", "[", ":", "title_match", ".", "start", "(", ")", "]", ",", "u\"_\"", "*", "len_to_replace", ",", "line", "[", "title_match", ".", "start", "(", ")", "+", "len_to_replace", ":", "]", ")", ")", "# return recorded information about matched periodical titles,", "# along with the newly changed working line:", "return", "title_matches", ",", "line", ",", "titles_count" ]
Attempt to identify all periodical titles in a reference line. Titles will be identified, their information (location in line, length in line, and non-standardised version) will be recorded, and they will be replaced in the working line by underscores. @param line: (string) - the working reference line. @param periodical_title_search_kb: (dictionary) - contains the regexp patterns used to search for a non-standard TITLE in the working reference line. Keyed by the TITLE string itself. @param periodical_title_search_keys: (list) - contains the non- standard periodical TITLEs to be searched for in the line. This list of titles has already been ordered and is used to force the order of searching. @return: (tuple) containing 4 elements: + (dictionary) - the lengths of all titles matched at each given index within the line. + (dictionary) - the text actually matched for each title at each given index within the line. + (string) - the working line, with the titles removed from it and replaced by underscores. + (dictionary) - the totals for each bad-title found in the line.
[ "Attempt", "to", "identify", "all", "periodical", "titles", "in", "a", "reference", "line", ".", "Titles", "will", "be", "identified", "their", "information", "(", "location", "in", "line", "length", "in", "line", "and", "non", "-", "standardised", "version", ")", "will", "be", "recorded", "and", "they", "will", "be", "replaced", "in", "the", "working", "line", "by", "underscores", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1130-L1192
inspirehep/refextract
refextract/references/tag.py
identify_report_numbers
def identify_report_numbers(line, kb_reports): """Attempt to identify all preprint report numbers in a reference line. Report numbers will be identified, their information (location in line, length in line, and standardised replacement version) will be recorded, and they will be replaced in the working-line by underscores. @param line: (string) - the working reference line. @param preprint_repnum_search_kb: (dictionary) - contains the regexp patterns used to identify preprint report numbers. @param preprint_repnum_standardised_categs: (dictionary) - contains the standardised 'category' of a given preprint report number. @return: (tuple) - 3 elements: * a dictionary containing the lengths in the line of the matched preprint report numbers, keyed by the index at which each match was found in the line. * a dictionary containing the replacement strings (standardised versions) of preprint report numbers that were matched in the line. * a string, that is the new version of the working reference line, in which any matched preprint report numbers have been replaced by underscores. Returned tuple is therefore in the following order: (matched-reportnum-lengths, matched-reportnum-replacements, working-line) """ def _by_len(a, b): """Comparison function used to sort a list by the length of the strings in each element of the list. """ if len(a[1]) < len(b[1]): return 1 elif len(a[1]) == len(b[1]): return 0 else: return -1 repnum_matches_matchlen = {} # info about lengths of report numbers # matched at given locations in line repnum_matches_repl_str = {} # standardised report numbers matched # at given locations in line repnum_search_kb, repnum_standardised_categs = kb_reports repnum_categs = repnum_standardised_categs.keys() repnum_categs.sort(_by_len) # Handle CERN/LHCC/98-013 line = line.replace('/', ' ') # try to match preprint report numbers in the line: for categ in repnum_categs: # search for all instances of the current report # numbering style in the line: repnum_matches_iter = repnum_search_kb[categ].finditer(line) # for each matched report number of this style: for repnum_match in repnum_matches_iter: # Get the matched text for the numeration part of the # preprint report number: numeration_match = repnum_match.group('numn') # clean/standardise this numeration text: numeration_match = numeration_match.replace(" ", "-") numeration_match = re_multiple_hyphens.sub("-", numeration_match) numeration_match = numeration_match.replace("/-", "/") numeration_match = numeration_match.replace("-/", "/") numeration_match = numeration_match.replace("-/-", "/") # replace the found preprint report number in the # string with underscores # (this will replace chars in the lower-cased line): line = line[0:repnum_match.start(1)] \ + "_" * len(repnum_match.group(1)) + line[repnum_match.end(1):] # record the information about the matched preprint report number: # total length in the line of the matched preprint report number: repnum_matches_matchlen[repnum_match.start(1)] = \ len(repnum_match.group(1)) # standardised replacement for the matched preprint report number: repnum_matches_repl_str[repnum_match.start(1)] = \ repnum_standardised_categs[categ] \ + numeration_match # return recorded information about matched report numbers, along with # the newly changed working line: return repnum_matches_matchlen, repnum_matches_repl_str, line
python
def identify_report_numbers(line, kb_reports): def _by_len(a, b): if len(a[1]) < len(b[1]): return 1 elif len(a[1]) == len(b[1]): return 0 else: return -1 repnum_matches_matchlen = {} repnum_matches_repl_str = {} repnum_search_kb, repnum_standardised_categs = kb_reports repnum_categs = repnum_standardised_categs.keys() repnum_categs.sort(_by_len) line = line.replace('/', ' ') for categ in repnum_categs: repnum_matches_iter = repnum_search_kb[categ].finditer(line) for repnum_match in repnum_matches_iter: numeration_match = repnum_match.group('numn') numeration_match = numeration_match.replace(" ", "-") numeration_match = re_multiple_hyphens.sub("-", numeration_match) numeration_match = numeration_match.replace("/-", "/") numeration_match = numeration_match.replace("-/", "/") numeration_match = numeration_match.replace("-/-", "/") line = line[0:repnum_match.start(1)] \ + "_" * len(repnum_match.group(1)) + line[repnum_match.end(1):] repnum_matches_matchlen[repnum_match.start(1)] = \ len(repnum_match.group(1)) repnum_matches_repl_str[repnum_match.start(1)] = \ repnum_standardised_categs[categ] \ + numeration_match return repnum_matches_matchlen, repnum_matches_repl_str, line
[ "def", "identify_report_numbers", "(", "line", ",", "kb_reports", ")", ":", "def", "_by_len", "(", "a", ",", "b", ")", ":", "\"\"\"Comparison function used to sort a list by the length of the\n strings in each element of the list.\n \"\"\"", "if", "len", "(", "a", "[", "1", "]", ")", "<", "len", "(", "b", "[", "1", "]", ")", ":", "return", "1", "elif", "len", "(", "a", "[", "1", "]", ")", "==", "len", "(", "b", "[", "1", "]", ")", ":", "return", "0", "else", ":", "return", "-", "1", "repnum_matches_matchlen", "=", "{", "}", "# info about lengths of report numbers", "# matched at given locations in line", "repnum_matches_repl_str", "=", "{", "}", "# standardised report numbers matched", "# at given locations in line", "repnum_search_kb", ",", "repnum_standardised_categs", "=", "kb_reports", "repnum_categs", "=", "repnum_standardised_categs", ".", "keys", "(", ")", "repnum_categs", ".", "sort", "(", "_by_len", ")", "# Handle CERN/LHCC/98-013", "line", "=", "line", ".", "replace", "(", "'/'", ",", "' '", ")", "# try to match preprint report numbers in the line:", "for", "categ", "in", "repnum_categs", ":", "# search for all instances of the current report", "# numbering style in the line:", "repnum_matches_iter", "=", "repnum_search_kb", "[", "categ", "]", ".", "finditer", "(", "line", ")", "# for each matched report number of this style:", "for", "repnum_match", "in", "repnum_matches_iter", ":", "# Get the matched text for the numeration part of the", "# preprint report number:", "numeration_match", "=", "repnum_match", ".", "group", "(", "'numn'", ")", "# clean/standardise this numeration text:", "numeration_match", "=", "numeration_match", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "numeration_match", "=", "re_multiple_hyphens", ".", "sub", "(", "\"-\"", ",", "numeration_match", ")", "numeration_match", "=", "numeration_match", ".", "replace", "(", "\"/-\"", ",", "\"/\"", ")", "numeration_match", "=", "numeration_match", ".", "replace", "(", "\"-/\"", ",", "\"/\"", ")", "numeration_match", "=", "numeration_match", ".", "replace", "(", "\"-/-\"", ",", "\"/\"", ")", "# replace the found preprint report number in the", "# string with underscores", "# (this will replace chars in the lower-cased line):", "line", "=", "line", "[", "0", ":", "repnum_match", ".", "start", "(", "1", ")", "]", "+", "\"_\"", "*", "len", "(", "repnum_match", ".", "group", "(", "1", ")", ")", "+", "line", "[", "repnum_match", ".", "end", "(", "1", ")", ":", "]", "# record the information about the matched preprint report number:", "# total length in the line of the matched preprint report number:", "repnum_matches_matchlen", "[", "repnum_match", ".", "start", "(", "1", ")", "]", "=", "len", "(", "repnum_match", ".", "group", "(", "1", ")", ")", "# standardised replacement for the matched preprint report number:", "repnum_matches_repl_str", "[", "repnum_match", ".", "start", "(", "1", ")", "]", "=", "repnum_standardised_categs", "[", "categ", "]", "+", "numeration_match", "# return recorded information about matched report numbers, along with", "# the newly changed working line:", "return", "repnum_matches_matchlen", ",", "repnum_matches_repl_str", ",", "line" ]
Attempt to identify all preprint report numbers in a reference line. Report numbers will be identified, their information (location in line, length in line, and standardised replacement version) will be recorded, and they will be replaced in the working-line by underscores. @param line: (string) - the working reference line. @param preprint_repnum_search_kb: (dictionary) - contains the regexp patterns used to identify preprint report numbers. @param preprint_repnum_standardised_categs: (dictionary) - contains the standardised 'category' of a given preprint report number. @return: (tuple) - 3 elements: * a dictionary containing the lengths in the line of the matched preprint report numbers, keyed by the index at which each match was found in the line. * a dictionary containing the replacement strings (standardised versions) of preprint report numbers that were matched in the line. * a string, that is the new version of the working reference line, in which any matched preprint report numbers have been replaced by underscores. Returned tuple is therefore in the following order: (matched-reportnum-lengths, matched-reportnum-replacements, working-line)
[ "Attempt", "to", "identify", "all", "preprint", "report", "numbers", "in", "a", "reference", "line", ".", "Report", "numbers", "will", "be", "identified", "their", "information", "(", "location", "in", "line", "length", "in", "line", "and", "standardised", "replacement", "version", ")", "will", "be", "recorded", "and", "they", "will", "be", "replaced", "in", "the", "working", "-", "line", "by", "underscores", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1195-L1280
inspirehep/refextract
refextract/references/tag.py
identify_and_tag_URLs
def identify_and_tag_URLs(line): """Given a reference line, identify URLs in the line, record the information about them, and replace them with a "<cds.URL />" tag. URLs are identified in 2 forms: + Raw: http://invenio-software.org/ + HTML marked-up: <a href="http://invenio-software.org/">CERN Document Server Software Consortium</a> These URLs are considered to have 2 components: The URL itself (url string); and the URL description. The description is effectively the text used for the created Hyperlink when the URL is marked-up in HTML. When an HTML marked-up URL has been recognised, the text between the anchor tags is therefore taken as the URL description. In the case of a raw URL recognition, however, the URL itself will also be used as the URL description. For example, in the following reference line: [1] See <a href="http://invenio-software.org/">CERN Document Server Software Consortium</a>. ...the URL string will be "http://invenio-software.org/" and the URL description will be "CERN Document Server Software Consortium". The line returned from this function will be: [1] See <cds.URL /> In the following line, however: [1] See http //invenio-software.org/ for more details. ...the URL string will be "http://invenio-software.org/" and the URL description will also be "http://invenio-software.org/". The line returned will be: [1] See <cds.URL /> for more details. @param line: (string) the reference line in which to search for URLs. @return: (tuple) - containing 2 items: + the line after URLs have been recognised and removed; + a list of 2-item tuples where each tuple represents a recognised URL and its description: [(url, url-description), (url, url-description), ... ] @Exceptions raised: + an IndexError if there is a problem with the number of URLs recognised (this should not happen.) """ # Take a copy of the line: line_pre_url_check = line # Dictionaries to record details of matched URLs: found_url_full_matchlen = {} found_url_urlstring = {} found_url_urldescr = {} # List to contain details of all matched URLs: identified_urls = [] # Attempt to identify and tag all HTML-MARKED-UP URLs in the line: m_tagged_url_iter = re_html_tagged_url.finditer(line) for m_tagged_url in m_tagged_url_iter: startposn = m_tagged_url.start() # start position of matched URL endposn = m_tagged_url.end() # end position of matched URL matchlen = len(m_tagged_url.group(0)) # total length of URL match found_url_full_matchlen[startposn] = matchlen found_url_urlstring[startposn] = m_tagged_url.group('url') found_url_urldescr[startposn] = m_tagged_url.group('desc') # temporarily replace the URL match with underscores so that # it won't be re-found line = line[0:startposn] + u"_" * matchlen + line[endposn:] # Attempt to identify and tag all RAW (i.e. not # HTML-marked-up) URLs in the line: m_raw_url_iter = re_raw_url.finditer(line) for m_raw_url in m_raw_url_iter: startposn = m_raw_url.start() # start position of matched URL endposn = m_raw_url.end() # end position of matched URL matchlen = len(m_raw_url.group(0)) # total length of URL match matched_url = m_raw_url.group('url') if len(matched_url) > 0 and matched_url[-1] in (".", ","): # Strip the full-stop or comma from the end of the url: matched_url = matched_url[:-1] found_url_full_matchlen[startposn] = matchlen found_url_urlstring[startposn] = matched_url found_url_urldescr[startposn] = matched_url # temporarily replace the URL match with underscores # so that it won't be re-found line = line[0:startposn] + u"_" * matchlen + line[endposn:] # Now that all URLs have been identified, insert them # back into the line, tagged: found_url_positions = found_url_urlstring.keys() found_url_positions.sort() found_url_positions.reverse() for url_position in found_url_positions: line = line[0:url_position] + "<cds.URL />" \ + line[url_position + found_url_full_matchlen[url_position]:] # The line has been rebuilt. Now record the information about the # matched URLs: found_url_positions = found_url_urlstring.keys() found_url_positions.sort() for url_position in found_url_positions: identified_urls.append((found_url_urlstring[url_position], found_url_urldescr[url_position])) # Somehow the number of URLs found doesn't match the number of # URLs recorded in "identified_urls". Raise an IndexError. msg = """Error: The number of URLs found in the reference line """ \ """does not match the number of URLs recorded in the """ \ """list of identified URLs!\nLine pre-URL checking: %s\n""" \ """Line post-URL checking: %s\n""" \ % (line_pre_url_check, line) assert len(identified_urls) == len(found_url_positions), msg # return the line containing the tagged URLs: return line, identified_urls
python
def identify_and_tag_URLs(line): line_pre_url_check = line found_url_full_matchlen = {} found_url_urlstring = {} found_url_urldescr = {} identified_urls = [] m_tagged_url_iter = re_html_tagged_url.finditer(line) for m_tagged_url in m_tagged_url_iter: startposn = m_tagged_url.start() endposn = m_tagged_url.end() matchlen = len(m_tagged_url.group(0)) found_url_full_matchlen[startposn] = matchlen found_url_urlstring[startposn] = m_tagged_url.group('url') found_url_urldescr[startposn] = m_tagged_url.group('desc') line = line[0:startposn] + u"_" * matchlen + line[endposn:] m_raw_url_iter = re_raw_url.finditer(line) for m_raw_url in m_raw_url_iter: startposn = m_raw_url.start() endposn = m_raw_url.end() matchlen = len(m_raw_url.group(0)) matched_url = m_raw_url.group('url') if len(matched_url) > 0 and matched_url[-1] in (".", ","): matched_url = matched_url[:-1] found_url_full_matchlen[startposn] = matchlen found_url_urlstring[startposn] = matched_url found_url_urldescr[startposn] = matched_url line = line[0:startposn] + u"_" * matchlen + line[endposn:] found_url_positions = found_url_urlstring.keys() found_url_positions.sort() found_url_positions.reverse() for url_position in found_url_positions: line = line[0:url_position] + "<cds.URL />" \ + line[url_position + found_url_full_matchlen[url_position]:] found_url_positions = found_url_urlstring.keys() found_url_positions.sort() for url_position in found_url_positions: identified_urls.append((found_url_urlstring[url_position], found_url_urldescr[url_position])) msg = \ \ \ \ % (line_pre_url_check, line) assert len(identified_urls) == len(found_url_positions), msg return line, identified_urls
[ "def", "identify_and_tag_URLs", "(", "line", ")", ":", "# Take a copy of the line:", "line_pre_url_check", "=", "line", "# Dictionaries to record details of matched URLs:", "found_url_full_matchlen", "=", "{", "}", "found_url_urlstring", "=", "{", "}", "found_url_urldescr", "=", "{", "}", "# List to contain details of all matched URLs:", "identified_urls", "=", "[", "]", "# Attempt to identify and tag all HTML-MARKED-UP URLs in the line:", "m_tagged_url_iter", "=", "re_html_tagged_url", ".", "finditer", "(", "line", ")", "for", "m_tagged_url", "in", "m_tagged_url_iter", ":", "startposn", "=", "m_tagged_url", ".", "start", "(", ")", "# start position of matched URL", "endposn", "=", "m_tagged_url", ".", "end", "(", ")", "# end position of matched URL", "matchlen", "=", "len", "(", "m_tagged_url", ".", "group", "(", "0", ")", ")", "# total length of URL match", "found_url_full_matchlen", "[", "startposn", "]", "=", "matchlen", "found_url_urlstring", "[", "startposn", "]", "=", "m_tagged_url", ".", "group", "(", "'url'", ")", "found_url_urldescr", "[", "startposn", "]", "=", "m_tagged_url", ".", "group", "(", "'desc'", ")", "# temporarily replace the URL match with underscores so that", "# it won't be re-found", "line", "=", "line", "[", "0", ":", "startposn", "]", "+", "u\"_\"", "*", "matchlen", "+", "line", "[", "endposn", ":", "]", "# Attempt to identify and tag all RAW (i.e. not", "# HTML-marked-up) URLs in the line:", "m_raw_url_iter", "=", "re_raw_url", ".", "finditer", "(", "line", ")", "for", "m_raw_url", "in", "m_raw_url_iter", ":", "startposn", "=", "m_raw_url", ".", "start", "(", ")", "# start position of matched URL", "endposn", "=", "m_raw_url", ".", "end", "(", ")", "# end position of matched URL", "matchlen", "=", "len", "(", "m_raw_url", ".", "group", "(", "0", ")", ")", "# total length of URL match", "matched_url", "=", "m_raw_url", ".", "group", "(", "'url'", ")", "if", "len", "(", "matched_url", ")", ">", "0", "and", "matched_url", "[", "-", "1", "]", "in", "(", "\".\"", ",", "\",\"", ")", ":", "# Strip the full-stop or comma from the end of the url:", "matched_url", "=", "matched_url", "[", ":", "-", "1", "]", "found_url_full_matchlen", "[", "startposn", "]", "=", "matchlen", "found_url_urlstring", "[", "startposn", "]", "=", "matched_url", "found_url_urldescr", "[", "startposn", "]", "=", "matched_url", "# temporarily replace the URL match with underscores", "# so that it won't be re-found", "line", "=", "line", "[", "0", ":", "startposn", "]", "+", "u\"_\"", "*", "matchlen", "+", "line", "[", "endposn", ":", "]", "# Now that all URLs have been identified, insert them", "# back into the line, tagged:", "found_url_positions", "=", "found_url_urlstring", ".", "keys", "(", ")", "found_url_positions", ".", "sort", "(", ")", "found_url_positions", ".", "reverse", "(", ")", "for", "url_position", "in", "found_url_positions", ":", "line", "=", "line", "[", "0", ":", "url_position", "]", "+", "\"<cds.URL />\"", "+", "line", "[", "url_position", "+", "found_url_full_matchlen", "[", "url_position", "]", ":", "]", "# The line has been rebuilt. Now record the information about the", "# matched URLs:", "found_url_positions", "=", "found_url_urlstring", ".", "keys", "(", ")", "found_url_positions", ".", "sort", "(", ")", "for", "url_position", "in", "found_url_positions", ":", "identified_urls", ".", "append", "(", "(", "found_url_urlstring", "[", "url_position", "]", ",", "found_url_urldescr", "[", "url_position", "]", ")", ")", "# Somehow the number of URLs found doesn't match the number of", "# URLs recorded in \"identified_urls\". Raise an IndexError.", "msg", "=", "\"\"\"Error: The number of URLs found in the reference line \"\"\"", "\"\"\"does not match the number of URLs recorded in the \"\"\"", "\"\"\"list of identified URLs!\\nLine pre-URL checking: %s\\n\"\"\"", "\"\"\"Line post-URL checking: %s\\n\"\"\"", "%", "(", "line_pre_url_check", ",", "line", ")", "assert", "len", "(", "identified_urls", ")", "==", "len", "(", "found_url_positions", ")", ",", "msg", "# return the line containing the tagged URLs:", "return", "line", ",", "identified_urls" ]
Given a reference line, identify URLs in the line, record the information about them, and replace them with a "<cds.URL />" tag. URLs are identified in 2 forms: + Raw: http://invenio-software.org/ + HTML marked-up: <a href="http://invenio-software.org/">CERN Document Server Software Consortium</a> These URLs are considered to have 2 components: The URL itself (url string); and the URL description. The description is effectively the text used for the created Hyperlink when the URL is marked-up in HTML. When an HTML marked-up URL has been recognised, the text between the anchor tags is therefore taken as the URL description. In the case of a raw URL recognition, however, the URL itself will also be used as the URL description. For example, in the following reference line: [1] See <a href="http://invenio-software.org/">CERN Document Server Software Consortium</a>. ...the URL string will be "http://invenio-software.org/" and the URL description will be "CERN Document Server Software Consortium". The line returned from this function will be: [1] See <cds.URL /> In the following line, however: [1] See http //invenio-software.org/ for more details. ...the URL string will be "http://invenio-software.org/" and the URL description will also be "http://invenio-software.org/". The line returned will be: [1] See <cds.URL /> for more details. @param line: (string) the reference line in which to search for URLs. @return: (tuple) - containing 2 items: + the line after URLs have been recognised and removed; + a list of 2-item tuples where each tuple represents a recognised URL and its description: [(url, url-description), (url, url-description), ... ] @Exceptions raised: + an IndexError if there is a problem with the number of URLs recognised (this should not happen.)
[ "Given", "a", "reference", "line", "identify", "URLs", "in", "the", "line", "record", "the", "information", "about", "them", "and", "replace", "them", "with", "a", "<cds", ".", "URL", "/", ">", "tag", ".", "URLs", "are", "identified", "in", "2", "forms", ":", "+", "Raw", ":", "http", ":", "//", "invenio", "-", "software", ".", "org", "/", "+", "HTML", "marked", "-", "up", ":", "<a", "href", "=", "http", ":", "//", "invenio", "-", "software", ".", "org", "/", ">", "CERN", "Document", "Server", "Software", "Consortium<", "/", "a", ">", "These", "URLs", "are", "considered", "to", "have", "2", "components", ":", "The", "URL", "itself", "(", "url", "string", ")", ";", "and", "the", "URL", "description", ".", "The", "description", "is", "effectively", "the", "text", "used", "for", "the", "created", "Hyperlink", "when", "the", "URL", "is", "marked", "-", "up", "in", "HTML", ".", "When", "an", "HTML", "marked", "-", "up", "URL", "has", "been", "recognised", "the", "text", "between", "the", "anchor", "tags", "is", "therefore", "taken", "as", "the", "URL", "description", ".", "In", "the", "case", "of", "a", "raw", "URL", "recognition", "however", "the", "URL", "itself", "will", "also", "be", "used", "as", "the", "URL", "description", ".", "For", "example", "in", "the", "following", "reference", "line", ":", "[", "1", "]", "See", "<a", "href", "=", "http", ":", "//", "invenio", "-", "software", ".", "org", "/", ">", "CERN", "Document", "Server", "Software", "Consortium<", "/", "a", ">", ".", "...", "the", "URL", "string", "will", "be", "http", ":", "//", "invenio", "-", "software", ".", "org", "/", "and", "the", "URL", "description", "will", "be", "CERN", "Document", "Server", "Software", "Consortium", ".", "The", "line", "returned", "from", "this", "function", "will", "be", ":", "[", "1", "]", "See", "<cds", ".", "URL", "/", ">", "In", "the", "following", "line", "however", ":", "[", "1", "]", "See", "http", "//", "invenio", "-", "software", ".", "org", "/", "for", "more", "details", ".", "...", "the", "URL", "string", "will", "be", "http", ":", "//", "invenio", "-", "software", ".", "org", "/", "and", "the", "URL", "description", "will", "also", "be", "http", ":", "//", "invenio", "-", "software", ".", "org", "/", ".", "The", "line", "returned", "will", "be", ":", "[", "1", "]", "See", "<cds", ".", "URL", "/", ">", "for", "more", "details", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1295-L1405
inspirehep/refextract
refextract/references/tag.py
identify_and_tag_DOI
def identify_and_tag_DOI(line): """takes a single citation line and attempts to locate any DOI references. DOI references are recognised in both http (url) format and also the standard DOI notation (DOI: ...) @param line: (string) the reference line in which to search for DOI's. @return: the tagged line and a list of DOI strings (if any) """ # Used to hold the DOI strings in the citation line doi_strings = [] # Run the DOI pattern on the line, returning the re.match objects matched_doi = re_doi.finditer(line) # For each match found in the line for match in reversed(list(matched_doi)): # Store the start and end position start = match.start() end = match.end() # Get the actual DOI string (remove the url part of the doi string) doi_phrase = match.group('doi') if '%2f' in doi_phrase.lower(): doi_phrase = unquote(doi_phrase) # Replace the entire matched doi with a tag line = line[0:start] + "<cds.DOI />" + line[end:] # Add the single DOI string to the list of DOI strings doi_strings.append(doi_phrase) doi_strings.reverse() return line, doi_strings
python
def identify_and_tag_DOI(line): doi_strings = [] matched_doi = re_doi.finditer(line) for match in reversed(list(matched_doi)): start = match.start() end = match.end() doi_phrase = match.group('doi') if '%2f' in doi_phrase.lower(): doi_phrase = unquote(doi_phrase) line = line[0:start] + "<cds.DOI />" + line[end:] doi_strings.append(doi_phrase) doi_strings.reverse() return line, doi_strings
[ "def", "identify_and_tag_DOI", "(", "line", ")", ":", "# Used to hold the DOI strings in the citation line", "doi_strings", "=", "[", "]", "# Run the DOI pattern on the line, returning the re.match objects", "matched_doi", "=", "re_doi", ".", "finditer", "(", "line", ")", "# For each match found in the line", "for", "match", "in", "reversed", "(", "list", "(", "matched_doi", ")", ")", ":", "# Store the start and end position", "start", "=", "match", ".", "start", "(", ")", "end", "=", "match", ".", "end", "(", ")", "# Get the actual DOI string (remove the url part of the doi string)", "doi_phrase", "=", "match", ".", "group", "(", "'doi'", ")", "if", "'%2f'", "in", "doi_phrase", ".", "lower", "(", ")", ":", "doi_phrase", "=", "unquote", "(", "doi_phrase", ")", "# Replace the entire matched doi with a tag", "line", "=", "line", "[", "0", ":", "start", "]", "+", "\"<cds.DOI />\"", "+", "line", "[", "end", ":", "]", "# Add the single DOI string to the list of DOI strings", "doi_strings", ".", "append", "(", "doi_phrase", ")", "doi_strings", ".", "reverse", "(", ")", "return", "line", ",", "doi_strings" ]
takes a single citation line and attempts to locate any DOI references. DOI references are recognised in both http (url) format and also the standard DOI notation (DOI: ...) @param line: (string) the reference line in which to search for DOI's. @return: the tagged line and a list of DOI strings (if any)
[ "takes", "a", "single", "citation", "line", "and", "attempts", "to", "locate", "any", "DOI", "references", ".", "DOI", "references", "are", "recognised", "in", "both", "http", "(", "url", ")", "format", "and", "also", "the", "standard", "DOI", "notation", "(", "DOI", ":", "...", ")" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1408-L1436
inspirehep/refextract
refextract/references/engine.py
remove_reference_line_marker
def remove_reference_line_marker(line): """Trim a reference line's 'marker' from the beginning of the line. @param line: (string) - the reference line. @return: (tuple) containing two strings: + The reference line's marker (or if there was not one, a 'space' character. + The reference line with it's marker removed from the beginning. """ # Get patterns to identify reference-line marker patterns: marker_patterns = get_reference_line_numeration_marker_patterns() line = line.lstrip() marker_match = regex_match_list(line, marker_patterns) if marker_match is not None: # found a marker: marker_val = marker_match.group(u'mark') # trim the marker from the start of the line: line = line[marker_match.end():].lstrip() else: marker_val = u" " return (marker_val, line)
python
def remove_reference_line_marker(line): marker_patterns = get_reference_line_numeration_marker_patterns() line = line.lstrip() marker_match = regex_match_list(line, marker_patterns) if marker_match is not None: marker_val = marker_match.group(u'mark') line = line[marker_match.end():].lstrip() else: marker_val = u" " return (marker_val, line)
[ "def", "remove_reference_line_marker", "(", "line", ")", ":", "# Get patterns to identify reference-line marker patterns:", "marker_patterns", "=", "get_reference_line_numeration_marker_patterns", "(", ")", "line", "=", "line", ".", "lstrip", "(", ")", "marker_match", "=", "regex_match_list", "(", "line", ",", "marker_patterns", ")", "if", "marker_match", "is", "not", "None", ":", "# found a marker:", "marker_val", "=", "marker_match", ".", "group", "(", "u'mark'", ")", "# trim the marker from the start of the line:", "line", "=", "line", "[", "marker_match", ".", "end", "(", ")", ":", "]", ".", "lstrip", "(", ")", "else", ":", "marker_val", "=", "u\" \"", "return", "(", "marker_val", ",", "line", ")" ]
Trim a reference line's 'marker' from the beginning of the line. @param line: (string) - the reference line. @return: (tuple) containing two strings: + The reference line's marker (or if there was not one, a 'space' character. + The reference line with it's marker removed from the beginning.
[ "Trim", "a", "reference", "line", "s", "marker", "from", "the", "beginning", "of", "the", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L92-L114
inspirehep/refextract
refextract/references/engine.py
roman2arabic
def roman2arabic(num): """Convert numbers from roman to arabic This function expects a string like XXII and outputs an integer """ t = 0 p = 0 for r in num: n = 10 ** (205558 % ord(r) % 7) % 9995 t += n - 2 * p % n p = n return t
python
def roman2arabic(num): t = 0 p = 0 for r in num: n = 10 ** (205558 % ord(r) % 7) % 9995 t += n - 2 * p % n p = n return t
[ "def", "roman2arabic", "(", "num", ")", ":", "t", "=", "0", "p", "=", "0", "for", "r", "in", "num", ":", "n", "=", "10", "**", "(", "205558", "%", "ord", "(", "r", ")", "%", "7", ")", "%", "9995", "t", "+=", "n", "-", "2", "*", "p", "%", "n", "p", "=", "n", "return", "t" ]
Convert numbers from roman to arabic This function expects a string like XXII and outputs an integer
[ "Convert", "numbers", "from", "roman", "to", "arabic" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L117-L129
inspirehep/refextract
refextract/references/engine.py
format_volume
def format_volume(citation_elements): """format volume number (roman numbers to arabic) When the volume number is expressed in roman numbers (CXXII), they are converted to their equivalent in arabic numbers (42) """ re_roman = re.compile(re_roman_numbers + u'$', re.UNICODE) for el in citation_elements: if el['type'] == 'JOURNAL' and re_roman.match(el['volume']): el['volume'] = str(roman2arabic(el['volume'].upper())) return citation_elements
python
def format_volume(citation_elements): re_roman = re.compile(re_roman_numbers + u'$', re.UNICODE) for el in citation_elements: if el['type'] == 'JOURNAL' and re_roman.match(el['volume']): el['volume'] = str(roman2arabic(el['volume'].upper())) return citation_elements
[ "def", "format_volume", "(", "citation_elements", ")", ":", "re_roman", "=", "re", ".", "compile", "(", "re_roman_numbers", "+", "u'$'", ",", "re", ".", "UNICODE", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", "and", "re_roman", ".", "match", "(", "el", "[", "'volume'", "]", ")", ":", "el", "[", "'volume'", "]", "=", "str", "(", "roman2arabic", "(", "el", "[", "'volume'", "]", ".", "upper", "(", ")", ")", ")", "return", "citation_elements" ]
format volume number (roman numbers to arabic) When the volume number is expressed in roman numbers (CXXII), they are converted to their equivalent in arabic numbers (42)
[ "format", "volume", "number", "(", "roman", "numbers", "to", "arabic", ")" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L134-L144
inspirehep/refextract
refextract/references/engine.py
handle_special_journals
def handle_special_journals(citation_elements, kbs): """format special journals (like JHEP) volume number JHEP needs the volume number prefixed with the year e.g. JHEP 0301 instead of JHEP 01 """ for el in citation_elements: if el['type'] == 'JOURNAL' and el['title'] in kbs['special_journals']: if re.match(r'\d{1,2}$', el['volume']): # Sometimes the page is omitted and the year is written in its place # We can never be sure but it's very likely that page > 1900 is # actually a year, so we skip this reference if el['year'] == '' and re.match(r'(19|20)\d{2}$', el['page']): el['type'] = 'MISC' el['misc_txt'] = "%s,%s,%s" \ % (el['title'], el['volume'], el['page']) el['volume'] = el['year'][-2:] + '%02d' % int(el['volume']) if el['page'].isdigit(): # JHEP and JCAP have always pages 3 digits long el['page'] = '%03d' % int(el['page']) return citation_elements
python
def handle_special_journals(citation_elements, kbs): for el in citation_elements: if el['type'] == 'JOURNAL' and el['title'] in kbs['special_journals']: if re.match(r'\d{1,2}$', el['volume']): if el['year'] == '' and re.match(r'(19|20)\d{2}$', el['page']): el['type'] = 'MISC' el['misc_txt'] = "%s,%s,%s" \ % (el['title'], el['volume'], el['page']) el['volume'] = el['year'][-2:] + '%02d' % int(el['volume']) if el['page'].isdigit(): el['page'] = '%03d' % int(el['page']) return citation_elements
[ "def", "handle_special_journals", "(", "citation_elements", ",", "kbs", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", "and", "el", "[", "'title'", "]", "in", "kbs", "[", "'special_journals'", "]", ":", "if", "re", ".", "match", "(", "r'\\d{1,2}$'", ",", "el", "[", "'volume'", "]", ")", ":", "# Sometimes the page is omitted and the year is written in its place", "# We can never be sure but it's very likely that page > 1900 is", "# actually a year, so we skip this reference", "if", "el", "[", "'year'", "]", "==", "''", "and", "re", ".", "match", "(", "r'(19|20)\\d{2}$'", ",", "el", "[", "'page'", "]", ")", ":", "el", "[", "'type'", "]", "=", "'MISC'", "el", "[", "'misc_txt'", "]", "=", "\"%s,%s,%s\"", "%", "(", "el", "[", "'title'", "]", ",", "el", "[", "'volume'", "]", ",", "el", "[", "'page'", "]", ")", "el", "[", "'volume'", "]", "=", "el", "[", "'year'", "]", "[", "-", "2", ":", "]", "+", "'%02d'", "%", "int", "(", "el", "[", "'volume'", "]", ")", "if", "el", "[", "'page'", "]", ".", "isdigit", "(", ")", ":", "# JHEP and JCAP have always pages 3 digits long", "el", "[", "'page'", "]", "=", "'%03d'", "%", "int", "(", "el", "[", "'page'", "]", ")", "return", "citation_elements" ]
format special journals (like JHEP) volume number JHEP needs the volume number prefixed with the year e.g. JHEP 0301 instead of JHEP 01
[ "format", "special", "journals", "(", "like", "JHEP", ")", "volume", "number" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L147-L168
inspirehep/refextract
refextract/references/engine.py
format_report_number
def format_report_number(citation_elements): """Format report numbers that are missing a dash e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01 """ re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE) for el in citation_elements: if el['type'] == 'REPORTNUMBER': m = re_report.match(el['report_num']) if m: name = m.group('name') if not name.endswith('-'): el['report_num'] = m.group('name') + '-' + m.group('nums') return citation_elements
python
def format_report_number(citation_elements): re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE) for el in citation_elements: if el['type'] == 'REPORTNUMBER': m = re_report.match(el['report_num']) if m: name = m.group('name') if not name.endswith('-'): el['report_num'] = m.group('name') + '-' + m.group('nums') return citation_elements
[ "def", "format_report_number", "(", "citation_elements", ")", ":", "re_report", "=", "re", ".", "compile", "(", "ur'^(?P<name>[A-Z-]+)(?P<nums>[\\d-]+)$'", ",", "re", ".", "UNICODE", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'REPORTNUMBER'", ":", "m", "=", "re_report", ".", "match", "(", "el", "[", "'report_num'", "]", ")", "if", "m", ":", "name", "=", "m", ".", "group", "(", "'name'", ")", "if", "not", "name", ".", "endswith", "(", "'-'", ")", ":", "el", "[", "'report_num'", "]", "=", "m", ".", "group", "(", "'name'", ")", "+", "'-'", "+", "m", ".", "group", "(", "'nums'", ")", "return", "citation_elements" ]
Format report numbers that are missing a dash e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01
[ "Format", "report", "numbers", "that", "are", "missing", "a", "dash" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L171-L184
inspirehep/refextract
refextract/references/engine.py
format_hep
def format_hep(citation_elements): """Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200 """ prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-', 'math-ph-') for el in citation_elements: if el['type'] == 'REPORTNUMBER': for p in prefixes: if el['report_num'].startswith(p): el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \ el['report_num'][len(p):] return citation_elements
python
def format_hep(citation_elements): prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-', 'math-ph-') for el in citation_elements: if el['type'] == 'REPORTNUMBER': for p in prefixes: if el['report_num'].startswith(p): el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \ el['report_num'][len(p):] return citation_elements
[ "def", "format_hep", "(", "citation_elements", ")", ":", "prefixes", "=", "(", "'astro-ph-'", ",", "'hep-th-'", ",", "'hep-ph-'", ",", "'hep-ex-'", ",", "'hep-lat-'", ",", "'math-ph-'", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'REPORTNUMBER'", ":", "for", "p", "in", "prefixes", ":", "if", "el", "[", "'report_num'", "]", ".", "startswith", "(", "p", ")", ":", "el", "[", "'report_num'", "]", "=", "el", "[", "'report_num'", "]", "[", ":", "len", "(", "p", ")", "-", "1", "]", "+", "'/'", "+", "el", "[", "'report_num'", "]", "[", "len", "(", "p", ")", ":", "]", "return", "citation_elements" ]
Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200
[ "Format", "hep", "-", "th", "report", "numbers", "with", "a", "dash" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L187-L200
inspirehep/refextract
refextract/references/engine.py
format_author_ed
def format_author_ed(citation_elements): """Standardise to (ed.) and (eds.) e.g. Remove extra space in (ed. ) """ for el in citation_elements: if el['type'] == 'AUTH': el['auth_txt'] = el['auth_txt'].replace('(ed. )', '(ed.)') el['auth_txt'] = el['auth_txt'].replace('(eds. )', '(eds.)') return citation_elements
python
def format_author_ed(citation_elements): for el in citation_elements: if el['type'] == 'AUTH': el['auth_txt'] = el['auth_txt'].replace('(ed. )', '(ed.)') el['auth_txt'] = el['auth_txt'].replace('(eds. )', '(eds.)') return citation_elements
[ "def", "format_author_ed", "(", "citation_elements", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'AUTH'", ":", "el", "[", "'auth_txt'", "]", "=", "el", "[", "'auth_txt'", "]", ".", "replace", "(", "'(ed. )'", ",", "'(ed.)'", ")", "el", "[", "'auth_txt'", "]", "=", "el", "[", "'auth_txt'", "]", ".", "replace", "(", "'(eds. )'", ",", "'(eds.)'", ")", "return", "citation_elements" ]
Standardise to (ed.) and (eds.) e.g. Remove extra space in (ed. )
[ "Standardise", "to", "(", "ed", ".", ")", "and", "(", "eds", ".", ")" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L203-L212
inspirehep/refextract
refextract/references/engine.py
look_for_books
def look_for_books(citation_elements, kbs): """Look for books in our kb Create book tags by using the authors and the title to find books in our knowledge base """ title = None for el in citation_elements: if el['type'] == 'QUOTED': title = el break if title: normalized_title = title['title'].upper() if normalized_title in kbs['books']: line = kbs['books'][normalized_title] el = {'type': 'BOOK', 'misc_txt': '', 'authors': line[0], 'title': line[1], 'year': line[2].strip(';')} citation_elements.append(el) citation_elements.remove(title) return citation_elements
python
def look_for_books(citation_elements, kbs): title = None for el in citation_elements: if el['type'] == 'QUOTED': title = el break if title: normalized_title = title['title'].upper() if normalized_title in kbs['books']: line = kbs['books'][normalized_title] el = {'type': 'BOOK', 'misc_txt': '', 'authors': line[0], 'title': line[1], 'year': line[2].strip(';')} citation_elements.append(el) citation_elements.remove(title) return citation_elements
[ "def", "look_for_books", "(", "citation_elements", ",", "kbs", ")", ":", "title", "=", "None", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'QUOTED'", ":", "title", "=", "el", "break", "if", "title", ":", "normalized_title", "=", "title", "[", "'title'", "]", ".", "upper", "(", ")", "if", "normalized_title", "in", "kbs", "[", "'books'", "]", ":", "line", "=", "kbs", "[", "'books'", "]", "[", "normalized_title", "]", "el", "=", "{", "'type'", ":", "'BOOK'", ",", "'misc_txt'", ":", "''", ",", "'authors'", ":", "line", "[", "0", "]", ",", "'title'", ":", "line", "[", "1", "]", ",", "'year'", ":", "line", "[", "2", "]", ".", "strip", "(", "';'", ")", "}", "citation_elements", ".", "append", "(", "el", ")", "citation_elements", ".", "remove", "(", "title", ")", "return", "citation_elements" ]
Look for books in our kb Create book tags by using the authors and the title to find books in our knowledge base
[ "Look", "for", "books", "in", "our", "kb" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L215-L239
inspirehep/refextract
refextract/references/engine.py
split_volume_from_journal
def split_volume_from_journal(citation_elements): """Split volume from journal title We need this because sometimes the volume is attached to the journal title instead of the volume. In those cases we move it here from the title to the volume """ for el in citation_elements: if el['type'] == 'JOURNAL' and ';' in el['title']: el['title'], series = el['title'].rsplit(';', 1) el['volume'] = series + el['volume'] return citation_elements
python
def split_volume_from_journal(citation_elements): for el in citation_elements: if el['type'] == 'JOURNAL' and ';' in el['title']: el['title'], series = el['title'].rsplit(';', 1) el['volume'] = series + el['volume'] return citation_elements
[ "def", "split_volume_from_journal", "(", "citation_elements", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", "and", "';'", "in", "el", "[", "'title'", "]", ":", "el", "[", "'title'", "]", ",", "series", "=", "el", "[", "'title'", "]", ".", "rsplit", "(", "';'", ",", "1", ")", "el", "[", "'volume'", "]", "=", "series", "+", "el", "[", "'volume'", "]", "return", "citation_elements" ]
Split volume from journal title We need this because sometimes the volume is attached to the journal title instead of the volume. In those cases we move it here from the title to the volume
[ "Split", "volume", "from", "journal", "title" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L242-L253
inspirehep/refextract
refextract/references/engine.py
remove_b_for_nucl_phys
def remove_b_for_nucl_phys(citation_elements): """Removes b from the volume of some journals Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE that journal is handled differently. """ for el in citation_elements: if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \ and 'volume' in el \ and (el['volume'].startswith('b') or el['volume'].startswith('B')): el['volume'] = el['volume'][1:] return citation_elements
python
def remove_b_for_nucl_phys(citation_elements): for el in citation_elements: if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \ and 'volume' in el \ and (el['volume'].startswith('b') or el['volume'].startswith('B')): el['volume'] = el['volume'][1:] return citation_elements
[ "def", "remove_b_for_nucl_phys", "(", "citation_elements", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", "and", "el", "[", "'title'", "]", "==", "'Nucl.Phys.Proc.Suppl.'", "and", "'volume'", "in", "el", "and", "(", "el", "[", "'volume'", "]", ".", "startswith", "(", "'b'", ")", "or", "el", "[", "'volume'", "]", ".", "startswith", "(", "'B'", ")", ")", ":", "el", "[", "'volume'", "]", "=", "el", "[", "'volume'", "]", "[", "1", ":", "]", "return", "citation_elements" ]
Removes b from the volume of some journals Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE that journal is handled differently.
[ "Removes", "b", "from", "the", "volume", "of", "some", "journals" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L256-L267
inspirehep/refextract
refextract/references/engine.py
mangle_volume
def mangle_volume(citation_elements): """Make sure the volume letter is before the volume number e.g. transforms 100B to B100 """ volume_re = re.compile(ur"(\d+)([A-Z])", re.U | re.I) for el in citation_elements: if el['type'] == 'JOURNAL': matches = volume_re.match(el['volume']) if matches: el['volume'] = matches.group(2) + matches.group(1) return citation_elements
python
def mangle_volume(citation_elements): volume_re = re.compile(ur"(\d+)([A-Z])", re.U | re.I) for el in citation_elements: if el['type'] == 'JOURNAL': matches = volume_re.match(el['volume']) if matches: el['volume'] = matches.group(2) + matches.group(1) return citation_elements
[ "def", "mangle_volume", "(", "citation_elements", ")", ":", "volume_re", "=", "re", ".", "compile", "(", "ur\"(\\d+)([A-Z])\"", ",", "re", ".", "U", "|", "re", ".", "I", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", ":", "matches", "=", "volume_re", ".", "match", "(", "el", "[", "'volume'", "]", ")", "if", "matches", ":", "el", "[", "'volume'", "]", "=", "matches", ".", "group", "(", "2", ")", "+", "matches", ".", "group", "(", "1", ")", "return", "citation_elements" ]
Make sure the volume letter is before the volume number e.g. transforms 100B to B100
[ "Make", "sure", "the", "volume", "letter", "is", "before", "the", "volume", "number" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L270-L282
inspirehep/refextract
refextract/references/engine.py
split_citations
def split_citations(citation_elements): """Split a citation line in multiple citations We handle the case where the author has put 2 citations in the same line but split with ; or some other method. """ splitted_citations = [] new_elements = [] current_recid = None current_doi = None def check_ibid(current_elements, trigger_el): for el in new_elements: if el['type'] == 'AUTH': return # Check for ibid if trigger_el.get('is_ibid', False): if splitted_citations: els = chain(reversed(current_elements), reversed(splitted_citations[-1])) else: els = reversed(current_elements) for el in els: if el['type'] == 'AUTH': new_elements.append(el.copy()) break def start_new_citation(): """Start new citation""" splitted_citations.append(new_elements[:]) del new_elements[:] for el in citation_elements: try: el_recid = el['recid'] except KeyError: el_recid = None if current_recid and el_recid and current_recid == el_recid: # Do not start a new citation pass elif current_recid and el_recid and current_recid != el_recid \ or current_doi and el['type'] == 'DOI' and \ current_doi != el['doi_string']: start_new_citation() # Some authors may be found in the previous citation balance_authors(splitted_citations, new_elements) elif ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() # In case el['recid'] is None, we want to reset it # because we are starting a new reference current_recid = el_recid while ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() current_recid = None if el_recid: current_recid = el_recid if el['type'] == 'DOI': current_doi = el['doi_string'] check_ibid(new_elements, el) new_elements.append(el) splitted_citations.append(new_elements) return [el for el in splitted_citations if not empty_citation(el)]
python
def split_citations(citation_elements): splitted_citations = [] new_elements = [] current_recid = None current_doi = None def check_ibid(current_elements, trigger_el): for el in new_elements: if el['type'] == 'AUTH': return if trigger_el.get('is_ibid', False): if splitted_citations: els = chain(reversed(current_elements), reversed(splitted_citations[-1])) else: els = reversed(current_elements) for el in els: if el['type'] == 'AUTH': new_elements.append(el.copy()) break def start_new_citation(): splitted_citations.append(new_elements[:]) del new_elements[:] for el in citation_elements: try: el_recid = el['recid'] except KeyError: el_recid = None if current_recid and el_recid and current_recid == el_recid: pass elif current_recid and el_recid and current_recid != el_recid \ or current_doi and el['type'] == 'DOI' and \ current_doi != el['doi_string']: start_new_citation() balance_authors(splitted_citations, new_elements) elif ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() current_recid = el_recid while ';' in el['misc_txt']: misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1) if misc_txt: new_elements.append({'type': 'MISC', 'misc_txt': misc_txt}) start_new_citation() current_recid = None if el_recid: current_recid = el_recid if el['type'] == 'DOI': current_doi = el['doi_string'] check_ibid(new_elements, el) new_elements.append(el) splitted_citations.append(new_elements) return [el for el in splitted_citations if not empty_citation(el)]
[ "def", "split_citations", "(", "citation_elements", ")", ":", "splitted_citations", "=", "[", "]", "new_elements", "=", "[", "]", "current_recid", "=", "None", "current_doi", "=", "None", "def", "check_ibid", "(", "current_elements", ",", "trigger_el", ")", ":", "for", "el", "in", "new_elements", ":", "if", "el", "[", "'type'", "]", "==", "'AUTH'", ":", "return", "# Check for ibid", "if", "trigger_el", ".", "get", "(", "'is_ibid'", ",", "False", ")", ":", "if", "splitted_citations", ":", "els", "=", "chain", "(", "reversed", "(", "current_elements", ")", ",", "reversed", "(", "splitted_citations", "[", "-", "1", "]", ")", ")", "else", ":", "els", "=", "reversed", "(", "current_elements", ")", "for", "el", "in", "els", ":", "if", "el", "[", "'type'", "]", "==", "'AUTH'", ":", "new_elements", ".", "append", "(", "el", ".", "copy", "(", ")", ")", "break", "def", "start_new_citation", "(", ")", ":", "\"\"\"Start new citation\"\"\"", "splitted_citations", ".", "append", "(", "new_elements", "[", ":", "]", ")", "del", "new_elements", "[", ":", "]", "for", "el", "in", "citation_elements", ":", "try", ":", "el_recid", "=", "el", "[", "'recid'", "]", "except", "KeyError", ":", "el_recid", "=", "None", "if", "current_recid", "and", "el_recid", "and", "current_recid", "==", "el_recid", ":", "# Do not start a new citation", "pass", "elif", "current_recid", "and", "el_recid", "and", "current_recid", "!=", "el_recid", "or", "current_doi", "and", "el", "[", "'type'", "]", "==", "'DOI'", "and", "current_doi", "!=", "el", "[", "'doi_string'", "]", ":", "start_new_citation", "(", ")", "# Some authors may be found in the previous citation", "balance_authors", "(", "splitted_citations", ",", "new_elements", ")", "elif", "';'", "in", "el", "[", "'misc_txt'", "]", ":", "misc_txt", ",", "el", "[", "'misc_txt'", "]", "=", "el", "[", "'misc_txt'", "]", ".", "split", "(", "';'", ",", "1", ")", "if", "misc_txt", ":", "new_elements", ".", "append", "(", "{", "'type'", ":", "'MISC'", ",", "'misc_txt'", ":", "misc_txt", "}", ")", "start_new_citation", "(", ")", "# In case el['recid'] is None, we want to reset it", "# because we are starting a new reference", "current_recid", "=", "el_recid", "while", "';'", "in", "el", "[", "'misc_txt'", "]", ":", "misc_txt", ",", "el", "[", "'misc_txt'", "]", "=", "el", "[", "'misc_txt'", "]", ".", "split", "(", "';'", ",", "1", ")", "if", "misc_txt", ":", "new_elements", ".", "append", "(", "{", "'type'", ":", "'MISC'", ",", "'misc_txt'", ":", "misc_txt", "}", ")", "start_new_citation", "(", ")", "current_recid", "=", "None", "if", "el_recid", ":", "current_recid", "=", "el_recid", "if", "el", "[", "'type'", "]", "==", "'DOI'", ":", "current_doi", "=", "el", "[", "'doi_string'", "]", "check_ibid", "(", "new_elements", ",", "el", ")", "new_elements", ".", "append", "(", "el", ")", "splitted_citations", ".", "append", "(", "new_elements", ")", "return", "[", "el", "for", "el", "in", "splitted_citations", "if", "not", "empty_citation", "(", "el", ")", "]" ]
Split a citation line in multiple citations We handle the case where the author has put 2 citations in the same line but split with ; or some other method.
[ "Split", "a", "citation", "line", "in", "multiple", "citations" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L307-L383
inspirehep/refextract
refextract/references/engine.py
look_for_hdl
def look_for_hdl(citation_elements): """Looks for handle identifiers in the misc txt of the citation elements When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process """ for el in list(citation_elements): matched_hdl = re_hdl.finditer(el['misc_txt']) for match in reversed(list(matched_hdl)): hdl_el = {'type': 'HDL', 'hdl_id': match.group('hdl_id'), 'misc_txt': el['misc_txt'][match.end():]} el['misc_txt'] = el['misc_txt'][0:match.start()] citation_elements.insert(citation_elements.index(el) + 1, hdl_el)
python
def look_for_hdl(citation_elements): for el in list(citation_elements): matched_hdl = re_hdl.finditer(el['misc_txt']) for match in reversed(list(matched_hdl)): hdl_el = {'type': 'HDL', 'hdl_id': match.group('hdl_id'), 'misc_txt': el['misc_txt'][match.end():]} el['misc_txt'] = el['misc_txt'][0:match.start()] citation_elements.insert(citation_elements.index(el) + 1, hdl_el)
[ "def", "look_for_hdl", "(", "citation_elements", ")", ":", "for", "el", "in", "list", "(", "citation_elements", ")", ":", "matched_hdl", "=", "re_hdl", ".", "finditer", "(", "el", "[", "'misc_txt'", "]", ")", "for", "match", "in", "reversed", "(", "list", "(", "matched_hdl", ")", ")", ":", "hdl_el", "=", "{", "'type'", ":", "'HDL'", ",", "'hdl_id'", ":", "match", ".", "group", "(", "'hdl_id'", ")", ",", "'misc_txt'", ":", "el", "[", "'misc_txt'", "]", "[", "match", ".", "end", "(", ")", ":", "]", "}", "el", "[", "'misc_txt'", "]", "=", "el", "[", "'misc_txt'", "]", "[", "0", ":", "match", ".", "start", "(", ")", "]", "citation_elements", ".", "insert", "(", "citation_elements", ".", "index", "(", "el", ")", "+", "1", ",", "hdl_el", ")" ]
Looks for handle identifiers in the misc txt of the citation elements When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process
[ "Looks", "for", "handle", "identifiers", "in", "the", "misc", "txt", "of", "the", "citation", "elements" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L596-L609
inspirehep/refextract
refextract/references/engine.py
look_for_hdl_urls
def look_for_hdl_urls(citation_elements): """Looks for handle identifiers that have already been identified as urls When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process """ for el in citation_elements: if el['type'] == 'URL': match = re_hdl.match(el['url_string']) if match: el['type'] = 'HDL' el['hdl_id'] = match.group('hdl_id') del el['url_desc'] del el['url_string']
python
def look_for_hdl_urls(citation_elements): for el in citation_elements: if el['type'] == 'URL': match = re_hdl.match(el['url_string']) if match: el['type'] = 'HDL' el['hdl_id'] = match.group('hdl_id') del el['url_desc'] del el['url_string']
[ "def", "look_for_hdl_urls", "(", "citation_elements", ")", ":", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'URL'", ":", "match", "=", "re_hdl", ".", "match", "(", "el", "[", "'url_string'", "]", ")", "if", "match", ":", "el", "[", "'type'", "]", "=", "'HDL'", "el", "[", "'hdl_id'", "]", "=", "match", ".", "group", "(", "'hdl_id'", ")", "del", "el", "[", "'url_desc'", "]", "del", "el", "[", "'url_string'", "]" ]
Looks for handle identifiers that have already been identified as urls When finding an hdl, creates a new HDL element. @param citation_elements: (list) elements to process
[ "Looks", "for", "handle", "identifiers", "that", "have", "already", "been", "identified", "as", "urls" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L612-L625
inspirehep/refextract
refextract/references/engine.py
parse_reference_line
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None): """Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects) """ # Strip the 'marker' (e.g. [1]) from this reference line: line_marker, ref_line = remove_reference_line_marker(ref_line) # Find DOI sections in citation ref_line, identified_dois = identify_and_tag_DOI(ref_line) # Identify and replace URLs in the line: ref_line, identified_urls = identify_and_tag_URLs(ref_line) # Tag <cds.JOURNAL>, etc. tagged_line, bad_titles_count = tag_reference_line(ref_line, kbs, bad_titles_count) # Debug print tagging (authors, titles, volumes, etc.) LOGGER.debug("tags %r", tagged_line) # Using the recorded information, create a MARC XML representation # of the rebuilt line: # At the same time, get stats of citations found in the reference line # (titles, urls, etc): citation_elements, line_marker, counts = \ parse_tagged_reference_line(line_marker, tagged_line, identified_dois, identified_urls) # Transformations on elements split_volume_from_journal(citation_elements) format_volume(citation_elements) handle_special_journals(citation_elements, kbs) format_report_number(citation_elements) format_author_ed(citation_elements) look_for_books(citation_elements, kbs) format_hep(citation_elements) remove_b_for_nucl_phys(citation_elements) mangle_volume(citation_elements) arxiv_urls_to_report_numbers(citation_elements) look_for_hdl(citation_elements) look_for_hdl_urls(citation_elements) # Link references if desired if linker_callback: associate_recids(citation_elements, linker_callback) # Split the reference in multiple ones if needed splitted_citations = split_citations(citation_elements) # Look for implied ibids look_for_implied_ibids(splitted_citations) # Find year add_year_elements(splitted_citations) # Look for books in misc field look_for_undetected_books(splitted_citations, kbs) if linker_callback: # Link references with the newly added ibids/books information for citations in splitted_citations: associate_recids(citations, linker_callback) # FIXME: Needed? # Remove references with only misc text # splitted_citations = remove_invalid_references(splitted_citations) # Merge references with only misc text # splitted_citations = merge_invalid_references(splitted_citations) remove_duplicated_authors(splitted_citations) remove_duplicated_dois(splitted_citations) remove_duplicated_collaborations(splitted_citations) add_recid_elements(splitted_citations) # For debugging purposes print_citations(splitted_citations, line_marker) return splitted_citations, line_marker, counts, bad_titles_count
python
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None): line_marker, ref_line = remove_reference_line_marker(ref_line) ref_line, identified_dois = identify_and_tag_DOI(ref_line) ref_line, identified_urls = identify_and_tag_URLs(ref_line) tagged_line, bad_titles_count = tag_reference_line(ref_line, kbs, bad_titles_count) LOGGER.debug("tags %r", tagged_line) citation_elements, line_marker, counts = \ parse_tagged_reference_line(line_marker, tagged_line, identified_dois, identified_urls) split_volume_from_journal(citation_elements) format_volume(citation_elements) handle_special_journals(citation_elements, kbs) format_report_number(citation_elements) format_author_ed(citation_elements) look_for_books(citation_elements, kbs) format_hep(citation_elements) remove_b_for_nucl_phys(citation_elements) mangle_volume(citation_elements) arxiv_urls_to_report_numbers(citation_elements) look_for_hdl(citation_elements) look_for_hdl_urls(citation_elements) if linker_callback: associate_recids(citation_elements, linker_callback) splitted_citations = split_citations(citation_elements) look_for_implied_ibids(splitted_citations) add_year_elements(splitted_citations) look_for_undetected_books(splitted_citations, kbs) if linker_callback: for citations in splitted_citations: associate_recids(citations, linker_callback) remove_duplicated_authors(splitted_citations) remove_duplicated_dois(splitted_citations) remove_duplicated_collaborations(splitted_citations) add_recid_elements(splitted_citations) print_citations(splitted_citations, line_marker) return splitted_citations, line_marker, counts, bad_titles_count
[ "def", "parse_reference_line", "(", "ref_line", ",", "kbs", ",", "bad_titles_count", "=", "{", "}", ",", "linker_callback", "=", "None", ")", ":", "# Strip the 'marker' (e.g. [1]) from this reference line:", "line_marker", ",", "ref_line", "=", "remove_reference_line_marker", "(", "ref_line", ")", "# Find DOI sections in citation", "ref_line", ",", "identified_dois", "=", "identify_and_tag_DOI", "(", "ref_line", ")", "# Identify and replace URLs in the line:", "ref_line", ",", "identified_urls", "=", "identify_and_tag_URLs", "(", "ref_line", ")", "# Tag <cds.JOURNAL>, etc.", "tagged_line", ",", "bad_titles_count", "=", "tag_reference_line", "(", "ref_line", ",", "kbs", ",", "bad_titles_count", ")", "# Debug print tagging (authors, titles, volumes, etc.)", "LOGGER", ".", "debug", "(", "\"tags %r\"", ",", "tagged_line", ")", "# Using the recorded information, create a MARC XML representation", "# of the rebuilt line:", "# At the same time, get stats of citations found in the reference line", "# (titles, urls, etc):", "citation_elements", ",", "line_marker", ",", "counts", "=", "parse_tagged_reference_line", "(", "line_marker", ",", "tagged_line", ",", "identified_dois", ",", "identified_urls", ")", "# Transformations on elements", "split_volume_from_journal", "(", "citation_elements", ")", "format_volume", "(", "citation_elements", ")", "handle_special_journals", "(", "citation_elements", ",", "kbs", ")", "format_report_number", "(", "citation_elements", ")", "format_author_ed", "(", "citation_elements", ")", "look_for_books", "(", "citation_elements", ",", "kbs", ")", "format_hep", "(", "citation_elements", ")", "remove_b_for_nucl_phys", "(", "citation_elements", ")", "mangle_volume", "(", "citation_elements", ")", "arxiv_urls_to_report_numbers", "(", "citation_elements", ")", "look_for_hdl", "(", "citation_elements", ")", "look_for_hdl_urls", "(", "citation_elements", ")", "# Link references if desired", "if", "linker_callback", ":", "associate_recids", "(", "citation_elements", ",", "linker_callback", ")", "# Split the reference in multiple ones if needed", "splitted_citations", "=", "split_citations", "(", "citation_elements", ")", "# Look for implied ibids", "look_for_implied_ibids", "(", "splitted_citations", ")", "# Find year", "add_year_elements", "(", "splitted_citations", ")", "# Look for books in misc field", "look_for_undetected_books", "(", "splitted_citations", ",", "kbs", ")", "if", "linker_callback", ":", "# Link references with the newly added ibids/books information", "for", "citations", "in", "splitted_citations", ":", "associate_recids", "(", "citations", ",", "linker_callback", ")", "# FIXME: Needed?", "# Remove references with only misc text", "# splitted_citations = remove_invalid_references(splitted_citations)", "# Merge references with only misc text", "# splitted_citations = merge_invalid_references(splitted_citations)", "remove_duplicated_authors", "(", "splitted_citations", ")", "remove_duplicated_dois", "(", "splitted_citations", ")", "remove_duplicated_collaborations", "(", "splitted_citations", ")", "add_recid_elements", "(", "splitted_citations", ")", "# For debugging purposes", "print_citations", "(", "splitted_citations", ",", "line_marker", ")", "return", "splitted_citations", ",", "line_marker", ",", "counts", ",", "bad_titles_count" ]
Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects)
[ "Parse", "one", "reference", "line" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L639-L716
inspirehep/refextract
refextract/references/engine.py
search_for_book_in_misc
def search_for_book_in_misc(citation, kbs): """Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc. """ citation_year = year_from_citation(citation) for citation_element in citation: LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt']) for title in kbs['books']: startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title) if startIndex != -1: line = kbs['books'][title.upper()] book_year = line[2].strip(';') book_authors = line[0] book_found = False if citation_year == book_year: # For now consider the citation as valid, we are using # an exact search, we don't need to check the authors # However, the code below will be useful if we decide # to introduce fuzzy matching. book_found = True for author in get_possible_author_names(citation): if find_substring_ignore_special_chars(book_authors, author) != -1: book_found = True for author in re.findall('[a-zA-Z]{4,}', book_authors): if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1: book_found = True if book_found: LOGGER.debug(u"Book found: %s", title) book_element = {'type': 'BOOK', 'misc_txt': '', 'authors': book_authors, 'title': line[1], 'year': book_year} citation.append(book_element) citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex) # Remove year from misc txt citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year) return True LOGGER.debug("Book not found!") return False
python
def search_for_book_in_misc(citation, kbs): citation_year = year_from_citation(citation) for citation_element in citation: LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt']) for title in kbs['books']: startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title) if startIndex != -1: line = kbs['books'][title.upper()] book_year = line[2].strip(';') book_authors = line[0] book_found = False if citation_year == book_year: book_found = True for author in get_possible_author_names(citation): if find_substring_ignore_special_chars(book_authors, author) != -1: book_found = True for author in re.findall('[a-zA-Z]{4,}', book_authors): if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1: book_found = True if book_found: LOGGER.debug(u"Book found: %s", title) book_element = {'type': 'BOOK', 'misc_txt': '', 'authors': book_authors, 'title': line[1], 'year': book_year} citation.append(book_element) citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex) citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year) return True LOGGER.debug("Book not found!") return False
[ "def", "search_for_book_in_misc", "(", "citation", ",", "kbs", ")", ":", "citation_year", "=", "year_from_citation", "(", "citation", ")", "for", "citation_element", "in", "citation", ":", "LOGGER", ".", "debug", "(", "u\"Searching for book title in: %s\"", ",", "citation_element", "[", "'misc_txt'", "]", ")", "for", "title", "in", "kbs", "[", "'books'", "]", ":", "startIndex", "=", "find_substring_ignore_special_chars", "(", "citation_element", "[", "'misc_txt'", "]", ",", "title", ")", "if", "startIndex", "!=", "-", "1", ":", "line", "=", "kbs", "[", "'books'", "]", "[", "title", ".", "upper", "(", ")", "]", "book_year", "=", "line", "[", "2", "]", ".", "strip", "(", "';'", ")", "book_authors", "=", "line", "[", "0", "]", "book_found", "=", "False", "if", "citation_year", "==", "book_year", ":", "# For now consider the citation as valid, we are using", "# an exact search, we don't need to check the authors", "# However, the code below will be useful if we decide", "# to introduce fuzzy matching.", "book_found", "=", "True", "for", "author", "in", "get_possible_author_names", "(", "citation", ")", ":", "if", "find_substring_ignore_special_chars", "(", "book_authors", ",", "author", ")", "!=", "-", "1", ":", "book_found", "=", "True", "for", "author", "in", "re", ".", "findall", "(", "'[a-zA-Z]{4,}'", ",", "book_authors", ")", ":", "if", "find_substring_ignore_special_chars", "(", "citation_element", "[", "'misc_txt'", "]", ",", "author", ")", "!=", "-", "1", ":", "book_found", "=", "True", "if", "book_found", ":", "LOGGER", ".", "debug", "(", "u\"Book found: %s\"", ",", "title", ")", "book_element", "=", "{", "'type'", ":", "'BOOK'", ",", "'misc_txt'", ":", "''", ",", "'authors'", ":", "book_authors", ",", "'title'", ":", "line", "[", "1", "]", ",", "'year'", ":", "book_year", "}", "citation", ".", "append", "(", "book_element", ")", "citation_element", "[", "'misc_txt'", "]", "=", "cut_substring_with_special_chars", "(", "citation_element", "[", "'misc_txt'", "]", ",", "title", ",", "startIndex", ")", "# Remove year from misc txt", "citation_element", "[", "'misc_txt'", "]", "=", "remove_year", "(", "citation_element", "[", "'misc_txt'", "]", ",", "book_year", ")", "return", "True", "LOGGER", ".", "debug", "(", "\"Book not found!\"", ")", "return", "False" ]
Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc.
[ "Searches", "for", "books", "in", "the", "misc_txt", "field", "if", "the", "citation", "is", "not", "recognized", "as", "anything", "like", "a", "journal", "book", "etc", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L736-L779
inspirehep/refextract
refextract/references/engine.py
parse_references_elements
def parse_references_elements(ref_sect, kbs, linker_callback=None): """Passed a complete reference section, process each line and attempt to ## identify and standardise individual citations within the line. @param ref_sect: (list) of strings - each string in the list is a reference line. @param preprint_repnum_search_kb: (dictionary) - keyed by a tuple containing the line-number of the pattern in the KB and the non-standard category string. E.g.: (3, 'ASTRO PH'). Value is regexp pattern used to search for that report-number. @param preprint_repnum_standardised_categs: (dictionary) - keyed by non- standard version of institutional report number, value is the standardised version of that report number. @param periodical_title_search_kb: (dictionary) - keyed by non-standard title to search for, value is the compiled regexp pattern used to search for that title. @param standardised_periodical_titles: (dictionary) - keyed by non- standard title to search for, value is the standardised version of that title. @param periodical_title_search_keys: (list) - ordered list of non- standard titles to search for. @return: (tuple) of 6 components: ( list -> of strings, each string is a MARC XML-ized reference line. integer -> number of fields of miscellaneous text found for the record. integer -> number of title citations found for the record. integer -> number of institutional report-number citations found for the record. integer -> number of URL citations found for the record. integer -> number of DOI's found integer -> number of author groups found dictionary -> The totals for each 'bad title' found in the reference section. ) """ # a list to contain the processed reference lines: citations = [] # counters for extraction stats: counts = { 'misc': 0, 'title': 0, 'reportnum': 0, 'url': 0, 'doi': 0, 'auth_group': 0, } # A dictionary to contain the total count of each 'bad title' found # in the entire reference section: bad_titles_count = {} # Cleanup the reference lines # process references line-by-line: for ref_line in ref_sect: clean_line = wash_and_repair_reference_line(ref_line) citation_elements, line_marker, this_counts, bad_titles_count = \ parse_reference_line( clean_line, kbs, bad_titles_count, linker_callback) # Accumulate stats counts = sum_2_dictionaries(counts, this_counts) citations.append({'elements': citation_elements, 'line_marker': line_marker, 'raw_ref': ref_line}) # Return the list of processed reference lines: return citations, counts, bad_titles_count
python
def parse_references_elements(ref_sect, kbs, linker_callback=None): citations = [] counts = { 'misc': 0, 'title': 0, 'reportnum': 0, 'url': 0, 'doi': 0, 'auth_group': 0, } bad_titles_count = {} for ref_line in ref_sect: clean_line = wash_and_repair_reference_line(ref_line) citation_elements, line_marker, this_counts, bad_titles_count = \ parse_reference_line( clean_line, kbs, bad_titles_count, linker_callback) counts = sum_2_dictionaries(counts, this_counts) citations.append({'elements': citation_elements, 'line_marker': line_marker, 'raw_ref': ref_line}) return citations, counts, bad_titles_count
[ "def", "parse_references_elements", "(", "ref_sect", ",", "kbs", ",", "linker_callback", "=", "None", ")", ":", "# a list to contain the processed reference lines:", "citations", "=", "[", "]", "# counters for extraction stats:", "counts", "=", "{", "'misc'", ":", "0", ",", "'title'", ":", "0", ",", "'reportnum'", ":", "0", ",", "'url'", ":", "0", ",", "'doi'", ":", "0", ",", "'auth_group'", ":", "0", ",", "}", "# A dictionary to contain the total count of each 'bad title' found", "# in the entire reference section:", "bad_titles_count", "=", "{", "}", "# Cleanup the reference lines", "# process references line-by-line:", "for", "ref_line", "in", "ref_sect", ":", "clean_line", "=", "wash_and_repair_reference_line", "(", "ref_line", ")", "citation_elements", ",", "line_marker", ",", "this_counts", ",", "bad_titles_count", "=", "parse_reference_line", "(", "clean_line", ",", "kbs", ",", "bad_titles_count", ",", "linker_callback", ")", "# Accumulate stats", "counts", "=", "sum_2_dictionaries", "(", "counts", ",", "this_counts", ")", "citations", ".", "append", "(", "{", "'elements'", ":", "citation_elements", ",", "'line_marker'", ":", "line_marker", ",", "'raw_ref'", ":", "ref_line", "}", ")", "# Return the list of processed reference lines:", "return", "citations", ",", "counts", ",", "bad_titles_count" ]
Passed a complete reference section, process each line and attempt to ## identify and standardise individual citations within the line. @param ref_sect: (list) of strings - each string in the list is a reference line. @param preprint_repnum_search_kb: (dictionary) - keyed by a tuple containing the line-number of the pattern in the KB and the non-standard category string. E.g.: (3, 'ASTRO PH'). Value is regexp pattern used to search for that report-number. @param preprint_repnum_standardised_categs: (dictionary) - keyed by non- standard version of institutional report number, value is the standardised version of that report number. @param periodical_title_search_kb: (dictionary) - keyed by non-standard title to search for, value is the compiled regexp pattern used to search for that title. @param standardised_periodical_titles: (dictionary) - keyed by non- standard title to search for, value is the standardised version of that title. @param periodical_title_search_keys: (list) - ordered list of non- standard titles to search for. @return: (tuple) of 6 components: ( list -> of strings, each string is a MARC XML-ized reference line. integer -> number of fields of miscellaneous text found for the record. integer -> number of title citations found for the record. integer -> number of institutional report-number citations found for the record. integer -> number of URL citations found for the record. integer -> number of DOI's found integer -> number of author groups found dictionary -> The totals for each 'bad title' found in the reference section. )
[ "Passed", "a", "complete", "reference", "section", "process", "each", "line", "and", "attempt", "to", "##", "identify", "and", "standardise", "individual", "citations", "within", "the", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L839-L907
inspirehep/refextract
refextract/references/engine.py
parse_tagged_reference_line
def parse_tagged_reference_line(line_marker, line, identified_dois, identified_urls): """ Given a single tagged reference line, convert it to its MARC-XML representation. Try to find all tags and extract their contents and their types into corresponding dictionary elements. Append each dictionary tag representation onto a list, which is given to 'build_formatted_xml_citation()' where the correct xml output will be generated. This method is dumb, with very few heuristics. It simply looks for tags, and makes dictionaries from the data it finds in a tagged reference line. @param line_marker: (string) The line marker for this single reference line (e.g. [19]) @param line: (string) The tagged reference line. @param identified_dois: (list) a list of dois which were found in this line. The ordering of dois corresponds to the ordering of tags in the line, reading from left to right. @param identified_urls: (list) a list of urls which were found in this line. The ordering of urls corresponds to the ordering of tags in the line, reading from left to right. @param which format to use for references, roughly "<title> <volume> <page>" or "<title>,<volume>,<page>" @return xml_line: (string) the MARC-XML representation of the tagged reference line @return count_*: (integer) the number of * (pieces of info) found in the reference line. """ count_misc = count_title = count_reportnum = count_url = count_doi = count_auth_group = 0 processed_line = line cur_misc_txt = u"" tag_match = re_tagged_citation.search(processed_line) # contains a list of dictionary entries of previously cited items citation_elements = [] # the last tag element found when working from left-to-right across the # line identified_citation_element = None while tag_match is not None: # While there are tags inside this reference line... tag_match_start = tag_match.start() tag_match_end = tag_match.end() tag_type = tag_match.group(1) cur_misc_txt += processed_line[0:tag_match_start] # Catches both standard titles, and ibid's if tag_type.find("JOURNAL") != -1: # This tag is an identified journal TITLE. It should be followed # by VOLUME, YEAR and PAGE tags. # See if the found title has been tagged as an ibid: # <cds.JOURNALibid> if tag_match.group('ibid'): is_ibid = True closing_tag_length = len( CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID) idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID, tag_match_end) else: is_ibid = False closing_tag_length = len(CFG_REFEXTRACT_MARKER_CLOSING_TITLE) # extract the title from the line: idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_TITLE, tag_match_end) if idx_closing_tag == -1: # no closing TITLE tag found - get rid of the solitary tag processed_line = processed_line[tag_match_end:] identified_citation_element = None else: # Closing tag was found: # The title text to be used in the marked-up citation: title_text = processed_line[tag_match_end:idx_closing_tag] # Now trim this matched title and its tags from the start of # the line: processed_line = processed_line[ idx_closing_tag + closing_tag_length:] numeration_match = re_recognised_numeration_for_title_plus_series.search( processed_line) if numeration_match: # recognised numeration immediately after the title - # extract it: reference_volume = numeration_match.group('vol') reference_year = numeration_match.group('yr') or '' reference_page = numeration_match.group('pg') # This is used on two accounts: # 1. To get the series char from the title, if no series was found with the numeration # 2. To always remove any series character from the title match text # series_from_title = re_series_from_title.search(title_text) # if numeration_match.group('series'): reference_volume = numeration_match.group( 'series') + reference_volume # Skip past the matched numeration in the working line: processed_line = processed_line[numeration_match.end():] # 'id_ibid' saves whether THIS TITLE is an ibid or not. (True or False) # 'extra_ibids' are there to hold ibid's without the word 'ibid', which # come directly after this title # i.e., they are recognised using title numeration instead # of ibid notation identified_citation_element = {'type': "JOURNAL", 'misc_txt': cur_misc_txt, 'title': title_text, 'volume': reference_volume, 'year': reference_year, 'page': reference_page, 'is_ibid': is_ibid, 'extra_ibids': [] } count_title += 1 cur_misc_txt = u"" # Try to find IBID's after this title, on top of previously found titles that were # denoted with the word 'IBID'. (i.e. look for IBID's without the word 'IBID' by # looking at extra numeration after this title) numeration_match = re_numeration_no_ibid_txt.match( processed_line) while numeration_match is not None: reference_volume = numeration_match.group('vol') reference_year = numeration_match.group('yr') reference_page = numeration_match.group('pg') if numeration_match.group('series'): reference_volume = numeration_match.group( 'series') + reference_volume # Skip past the matched numeration in the working line: processed_line = processed_line[ numeration_match.end():] # Takes the just found title text identified_citation_element['extra_ibids'].append( {'type': "JOURNAL", 'misc_txt': "", 'title': title_text, 'volume': reference_volume, 'year': reference_year, 'page': reference_page, }) # Increment the stats counters: count_title += 1 title_text = "" reference_volume = "" reference_year = "" reference_page = "" numeration_match = re_numeration_no_ibid_txt.match( processed_line) else: # No numeration was recognised after the title. Add the # title into a MISC item instead: cur_misc_txt += "%s" % title_text identified_citation_element = None elif tag_type == "REPORTNUMBER": # This tag is an identified institutional report number: # extract the institutional report-number from the line: idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM, tag_match_end) # Sanity check - did we find a closing report-number tag? if idx_closing_tag == -1: # no closing </cds.REPORTNUMBER> tag found - strip the opening tag and move past this # recognised reportnumber as it is unreliable: processed_line = processed_line[tag_match_end:] identified_citation_element = None else: # closing tag was found report_num = processed_line[tag_match_end:idx_closing_tag] # now trim this matched institutional report-number # and its tags from the start of the line: ending_tag_pos = idx_closing_tag \ + len(CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM) processed_line = processed_line[ending_tag_pos:] identified_citation_element = {'type': "REPORTNUMBER", 'misc_txt': cur_misc_txt, 'report_num': report_num} count_reportnum += 1 cur_misc_txt = u"" elif tag_type == "URL": # This tag is an identified URL: # From the "identified_urls" list, get this URL and its # description string: url_string = identified_urls[0][0] url_desc = identified_urls[0][1] # Now move past this "<cds.URL />"tag in the line: processed_line = processed_line[tag_match_end:] # Delete the information for this URL from the start of the list # of identified URLs: identified_urls[0:1] = [] # Save the current misc text identified_citation_element = { 'type': "URL", 'misc_txt': "%s" % cur_misc_txt, 'url_string': "%s" % url_string, 'url_desc': "%s" % url_desc } count_url += 1 cur_misc_txt = u"" elif tag_type == "DOI": # This tag is an identified DOI: # From the "identified_dois" list, get this DOI and its # description string: doi_string = identified_dois[0] # Now move past this "<cds.CDS />"tag in the line: processed_line = processed_line[tag_match_end:] # Remove DOI from the list of DOI strings identified_dois[0:1] = [] # SAVE the current misc text identified_citation_element = { 'type': "DOI", 'misc_txt': "%s" % cur_misc_txt, 'doi_string': "%s" % doi_string } # Increment the stats counters: count_doi += 1 cur_misc_txt = u"" elif tag_type.find("AUTH") != -1: # This tag is an identified Author: auth_type = "" # extract the title from the line: if tag_type.find("stnd") != -1: auth_type = "stnd" idx_closing_tag_nearest = processed_line.find( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND, tag_match_end) elif tag_type.find("etal") != -1: auth_type = "etal" idx_closing_tag_nearest = processed_line.find( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL, tag_match_end) elif tag_type.find("incl") != -1: auth_type = "incl" idx_closing_tag_nearest = processed_line.find( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL, tag_match_end) if idx_closing_tag_nearest == -1: # no closing </cds.AUTH****> tag found - strip the opening tag # and move past it processed_line = processed_line[tag_match_end:] identified_citation_element = None else: auth_txt = processed_line[ tag_match_end:idx_closing_tag_nearest] # Now move past the ending tag in the line: processed_line = processed_line[ idx_closing_tag_nearest + len("</cds.AUTHxxxx>"):] # SAVE the current misc text identified_citation_element = { 'type': "AUTH", 'misc_txt': "%s" % cur_misc_txt, 'auth_txt': "%s" % auth_txt, 'auth_type': "%s" % auth_type } # Increment the stats counters: count_auth_group += 1 cur_misc_txt = u"" # These following tags may be found separately; # They are usually found when a "JOURNAL" tag is hit # (ONLY immediately afterwards, however) # Sitting by themselves means they do not have # an associated TITLE tag, and should be MISC elif tag_type == "SER": # This tag is a SERIES tag; Since it was not preceeded by a TITLE # tag, it is useless - strip the tag and put it into miscellaneous: (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_SERIES) identified_citation_element = None elif tag_type == "VOL": # This tag is a VOLUME tag; Since it was not preceeded by a TITLE # tag, it is useless - strip the tag and put it into miscellaneous: (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_VOLUME) identified_citation_element = None elif tag_type == "YR": # This tag is a YEAR tag; Since it's not preceeded by TITLE and # VOLUME tags, it is useless - strip the tag and put the contents # into miscellaneous: (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_YEAR) identified_citation_element = None elif tag_type == "PG": # This tag is a PAGE tag; Since it's not preceeded by TITLE, # VOLUME and YEAR tags, it is useless - strip the tag and put the # contents into miscellaneous: (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_PAGE) identified_citation_element = None elif tag_type == "QUOTED": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, 'title') elif tag_type == "ISBN": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, tag_type) elif tag_type == "PUBLISHER": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, 'publisher') elif tag_type == "COLLABORATION": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, 'collaboration') if identified_citation_element: # Append the found tagged data and current misc text citation_elements.append(identified_citation_element) identified_citation_element = None # Look for the next tag in the processed line: tag_match = re_tagged_citation.search(processed_line) # place any remaining miscellaneous text into the # appropriate MARC XML fields: cur_misc_txt += processed_line # This MISC element will hold the entire citation in the event # that no tags were found. if len(cur_misc_txt.strip(" .;,")) > 0: # Increment the stats counters: count_misc += 1 identified_citation_element = { 'type': "MISC", 'misc_txt': cur_misc_txt, } citation_elements.append(identified_citation_element) return (citation_elements, line_marker, { 'misc': count_misc, 'title': count_title, 'reportnum': count_reportnum, 'url': count_url, 'doi': count_doi, 'auth_group': count_auth_group })
python
def parse_tagged_reference_line(line_marker, line, identified_dois, identified_urls): count_misc = count_title = count_reportnum = count_url = count_doi = count_auth_group = 0 processed_line = line cur_misc_txt = u"" tag_match = re_tagged_citation.search(processed_line) citation_elements = [] identified_citation_element = None while tag_match is not None: tag_match_start = tag_match.start() tag_match_end = tag_match.end() tag_type = tag_match.group(1) cur_misc_txt += processed_line[0:tag_match_start] if tag_type.find("JOURNAL") != -1: if tag_match.group('ibid'): is_ibid = True closing_tag_length = len( CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID) idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID, tag_match_end) else: is_ibid = False closing_tag_length = len(CFG_REFEXTRACT_MARKER_CLOSING_TITLE) idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_TITLE, tag_match_end) if idx_closing_tag == -1: processed_line = processed_line[tag_match_end:] identified_citation_element = None else: title_text = processed_line[tag_match_end:idx_closing_tag] processed_line = processed_line[ idx_closing_tag + closing_tag_length:] numeration_match = re_recognised_numeration_for_title_plus_series.search( processed_line) if numeration_match: reference_volume = numeration_match.group('vol') reference_year = numeration_match.group('yr') or '' reference_page = numeration_match.group('pg') if numeration_match.group('series'): reference_volume = numeration_match.group( 'series') + reference_volume processed_line = processed_line[numeration_match.end():] identified_citation_element = {'type': "JOURNAL", 'misc_txt': cur_misc_txt, 'title': title_text, 'volume': reference_volume, 'year': reference_year, 'page': reference_page, 'is_ibid': is_ibid, 'extra_ibids': [] } count_title += 1 cur_misc_txt = u"" numeration_match = re_numeration_no_ibid_txt.match( processed_line) while numeration_match is not None: reference_volume = numeration_match.group('vol') reference_year = numeration_match.group('yr') reference_page = numeration_match.group('pg') if numeration_match.group('series'): reference_volume = numeration_match.group( 'series') + reference_volume processed_line = processed_line[ numeration_match.end():] identified_citation_element['extra_ibids'].append( {'type': "JOURNAL", 'misc_txt': "", 'title': title_text, 'volume': reference_volume, 'year': reference_year, 'page': reference_page, }) count_title += 1 title_text = "" reference_volume = "" reference_year = "" reference_page = "" numeration_match = re_numeration_no_ibid_txt.match( processed_line) else: cur_misc_txt += "%s" % title_text identified_citation_element = None elif tag_type == "REPORTNUMBER": idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM, tag_match_end) if idx_closing_tag == -1: processed_line = processed_line[tag_match_end:] identified_citation_element = None else: report_num = processed_line[tag_match_end:idx_closing_tag] ending_tag_pos = idx_closing_tag \ + len(CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM) processed_line = processed_line[ending_tag_pos:] identified_citation_element = {'type': "REPORTNUMBER", 'misc_txt': cur_misc_txt, 'report_num': report_num} count_reportnum += 1 cur_misc_txt = u"" elif tag_type == "URL": url_string = identified_urls[0][0] url_desc = identified_urls[0][1] processed_line = processed_line[tag_match_end:] identified_urls[0:1] = [] identified_citation_element = { 'type': "URL", 'misc_txt': "%s" % cur_misc_txt, 'url_string': "%s" % url_string, 'url_desc': "%s" % url_desc } count_url += 1 cur_misc_txt = u"" elif tag_type == "DOI": doi_string = identified_dois[0] processed_line = processed_line[tag_match_end:] identified_dois[0:1] = [] identified_citation_element = { 'type': "DOI", 'misc_txt': "%s" % cur_misc_txt, 'doi_string': "%s" % doi_string } count_doi += 1 cur_misc_txt = u"" elif tag_type.find("AUTH") != -1: auth_type = "" if tag_type.find("stnd") != -1: auth_type = "stnd" idx_closing_tag_nearest = processed_line.find( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND, tag_match_end) elif tag_type.find("etal") != -1: auth_type = "etal" idx_closing_tag_nearest = processed_line.find( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL, tag_match_end) elif tag_type.find("incl") != -1: auth_type = "incl" idx_closing_tag_nearest = processed_line.find( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL, tag_match_end) if idx_closing_tag_nearest == -1: processed_line = processed_line[tag_match_end:] identified_citation_element = None else: auth_txt = processed_line[ tag_match_end:idx_closing_tag_nearest] processed_line = processed_line[ idx_closing_tag_nearest + len("</cds.AUTHxxxx>"):] identified_citation_element = { 'type': "AUTH", 'misc_txt': "%s" % cur_misc_txt, 'auth_txt': "%s" % auth_txt, 'auth_type': "%s" % auth_type } count_auth_group += 1 cur_misc_txt = u"" elif tag_type == "SER": (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_SERIES) identified_citation_element = None elif tag_type == "VOL": (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_VOLUME) identified_citation_element = None elif tag_type == "YR": (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_YEAR) identified_citation_element = None elif tag_type == "PG": (cur_misc_txt, processed_line) = \ convert_unusable_tag_to_misc(processed_line, cur_misc_txt, tag_match_end, CFG_REFEXTRACT_MARKER_CLOSING_PAGE) identified_citation_element = None elif tag_type == "QUOTED": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, 'title') elif tag_type == "ISBN": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, tag_type) elif tag_type == "PUBLISHER": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, 'publisher') elif tag_type == "COLLABORATION": identified_citation_element, processed_line, cur_misc_txt = \ map_tag_to_subfield(tag_type, processed_line[tag_match_end:], cur_misc_txt, 'collaboration') if identified_citation_element: citation_elements.append(identified_citation_element) identified_citation_element = None tag_match = re_tagged_citation.search(processed_line) cur_misc_txt += processed_line if len(cur_misc_txt.strip(" .;,")) > 0: count_misc += 1 identified_citation_element = { 'type': "MISC", 'misc_txt': cur_misc_txt, } citation_elements.append(identified_citation_element) return (citation_elements, line_marker, { 'misc': count_misc, 'title': count_title, 'reportnum': count_reportnum, 'url': count_url, 'doi': count_doi, 'auth_group': count_auth_group })
[ "def", "parse_tagged_reference_line", "(", "line_marker", ",", "line", ",", "identified_dois", ",", "identified_urls", ")", ":", "count_misc", "=", "count_title", "=", "count_reportnum", "=", "count_url", "=", "count_doi", "=", "count_auth_group", "=", "0", "processed_line", "=", "line", "cur_misc_txt", "=", "u\"\"", "tag_match", "=", "re_tagged_citation", ".", "search", "(", "processed_line", ")", "# contains a list of dictionary entries of previously cited items", "citation_elements", "=", "[", "]", "# the last tag element found when working from left-to-right across the", "# line", "identified_citation_element", "=", "None", "while", "tag_match", "is", "not", "None", ":", "# While there are tags inside this reference line...", "tag_match_start", "=", "tag_match", ".", "start", "(", ")", "tag_match_end", "=", "tag_match", ".", "end", "(", ")", "tag_type", "=", "tag_match", ".", "group", "(", "1", ")", "cur_misc_txt", "+=", "processed_line", "[", "0", ":", "tag_match_start", "]", "# Catches both standard titles, and ibid's", "if", "tag_type", ".", "find", "(", "\"JOURNAL\"", ")", "!=", "-", "1", ":", "# This tag is an identified journal TITLE. It should be followed", "# by VOLUME, YEAR and PAGE tags.", "# See if the found title has been tagged as an ibid:", "# <cds.JOURNALibid>", "if", "tag_match", ".", "group", "(", "'ibid'", ")", ":", "is_ibid", "=", "True", "closing_tag_length", "=", "len", "(", "CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID", ")", "idx_closing_tag", "=", "processed_line", ".", "find", "(", "CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID", ",", "tag_match_end", ")", "else", ":", "is_ibid", "=", "False", "closing_tag_length", "=", "len", "(", "CFG_REFEXTRACT_MARKER_CLOSING_TITLE", ")", "# extract the title from the line:", "idx_closing_tag", "=", "processed_line", ".", "find", "(", "CFG_REFEXTRACT_MARKER_CLOSING_TITLE", ",", "tag_match_end", ")", "if", "idx_closing_tag", "==", "-", "1", ":", "# no closing TITLE tag found - get rid of the solitary tag", "processed_line", "=", "processed_line", "[", "tag_match_end", ":", "]", "identified_citation_element", "=", "None", "else", ":", "# Closing tag was found:", "# The title text to be used in the marked-up citation:", "title_text", "=", "processed_line", "[", "tag_match_end", ":", "idx_closing_tag", "]", "# Now trim this matched title and its tags from the start of", "# the line:", "processed_line", "=", "processed_line", "[", "idx_closing_tag", "+", "closing_tag_length", ":", "]", "numeration_match", "=", "re_recognised_numeration_for_title_plus_series", ".", "search", "(", "processed_line", ")", "if", "numeration_match", ":", "# recognised numeration immediately after the title -", "# extract it:", "reference_volume", "=", "numeration_match", ".", "group", "(", "'vol'", ")", "reference_year", "=", "numeration_match", ".", "group", "(", "'yr'", ")", "or", "''", "reference_page", "=", "numeration_match", ".", "group", "(", "'pg'", ")", "# This is used on two accounts:", "# 1. To get the series char from the title, if no series was found with the numeration", "# 2. To always remove any series character from the title match text", "# series_from_title = re_series_from_title.search(title_text)", "#", "if", "numeration_match", ".", "group", "(", "'series'", ")", ":", "reference_volume", "=", "numeration_match", ".", "group", "(", "'series'", ")", "+", "reference_volume", "# Skip past the matched numeration in the working line:", "processed_line", "=", "processed_line", "[", "numeration_match", ".", "end", "(", ")", ":", "]", "# 'id_ibid' saves whether THIS TITLE is an ibid or not. (True or False)", "# 'extra_ibids' are there to hold ibid's without the word 'ibid', which", "# come directly after this title", "# i.e., they are recognised using title numeration instead", "# of ibid notation", "identified_citation_element", "=", "{", "'type'", ":", "\"JOURNAL\"", ",", "'misc_txt'", ":", "cur_misc_txt", ",", "'title'", ":", "title_text", ",", "'volume'", ":", "reference_volume", ",", "'year'", ":", "reference_year", ",", "'page'", ":", "reference_page", ",", "'is_ibid'", ":", "is_ibid", ",", "'extra_ibids'", ":", "[", "]", "}", "count_title", "+=", "1", "cur_misc_txt", "=", "u\"\"", "# Try to find IBID's after this title, on top of previously found titles that were", "# denoted with the word 'IBID'. (i.e. look for IBID's without the word 'IBID' by", "# looking at extra numeration after this title)", "numeration_match", "=", "re_numeration_no_ibid_txt", ".", "match", "(", "processed_line", ")", "while", "numeration_match", "is", "not", "None", ":", "reference_volume", "=", "numeration_match", ".", "group", "(", "'vol'", ")", "reference_year", "=", "numeration_match", ".", "group", "(", "'yr'", ")", "reference_page", "=", "numeration_match", ".", "group", "(", "'pg'", ")", "if", "numeration_match", ".", "group", "(", "'series'", ")", ":", "reference_volume", "=", "numeration_match", ".", "group", "(", "'series'", ")", "+", "reference_volume", "# Skip past the matched numeration in the working line:", "processed_line", "=", "processed_line", "[", "numeration_match", ".", "end", "(", ")", ":", "]", "# Takes the just found title text", "identified_citation_element", "[", "'extra_ibids'", "]", ".", "append", "(", "{", "'type'", ":", "\"JOURNAL\"", ",", "'misc_txt'", ":", "\"\"", ",", "'title'", ":", "title_text", ",", "'volume'", ":", "reference_volume", ",", "'year'", ":", "reference_year", ",", "'page'", ":", "reference_page", ",", "}", ")", "# Increment the stats counters:", "count_title", "+=", "1", "title_text", "=", "\"\"", "reference_volume", "=", "\"\"", "reference_year", "=", "\"\"", "reference_page", "=", "\"\"", "numeration_match", "=", "re_numeration_no_ibid_txt", ".", "match", "(", "processed_line", ")", "else", ":", "# No numeration was recognised after the title. Add the", "# title into a MISC item instead:", "cur_misc_txt", "+=", "\"%s\"", "%", "title_text", "identified_citation_element", "=", "None", "elif", "tag_type", "==", "\"REPORTNUMBER\"", ":", "# This tag is an identified institutional report number:", "# extract the institutional report-number from the line:", "idx_closing_tag", "=", "processed_line", ".", "find", "(", "CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM", ",", "tag_match_end", ")", "# Sanity check - did we find a closing report-number tag?", "if", "idx_closing_tag", "==", "-", "1", ":", "# no closing </cds.REPORTNUMBER> tag found - strip the opening tag and move past this", "# recognised reportnumber as it is unreliable:", "processed_line", "=", "processed_line", "[", "tag_match_end", ":", "]", "identified_citation_element", "=", "None", "else", ":", "# closing tag was found", "report_num", "=", "processed_line", "[", "tag_match_end", ":", "idx_closing_tag", "]", "# now trim this matched institutional report-number", "# and its tags from the start of the line:", "ending_tag_pos", "=", "idx_closing_tag", "+", "len", "(", "CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM", ")", "processed_line", "=", "processed_line", "[", "ending_tag_pos", ":", "]", "identified_citation_element", "=", "{", "'type'", ":", "\"REPORTNUMBER\"", ",", "'misc_txt'", ":", "cur_misc_txt", ",", "'report_num'", ":", "report_num", "}", "count_reportnum", "+=", "1", "cur_misc_txt", "=", "u\"\"", "elif", "tag_type", "==", "\"URL\"", ":", "# This tag is an identified URL:", "# From the \"identified_urls\" list, get this URL and its", "# description string:", "url_string", "=", "identified_urls", "[", "0", "]", "[", "0", "]", "url_desc", "=", "identified_urls", "[", "0", "]", "[", "1", "]", "# Now move past this \"<cds.URL />\"tag in the line:", "processed_line", "=", "processed_line", "[", "tag_match_end", ":", "]", "# Delete the information for this URL from the start of the list", "# of identified URLs:", "identified_urls", "[", "0", ":", "1", "]", "=", "[", "]", "# Save the current misc text", "identified_citation_element", "=", "{", "'type'", ":", "\"URL\"", ",", "'misc_txt'", ":", "\"%s\"", "%", "cur_misc_txt", ",", "'url_string'", ":", "\"%s\"", "%", "url_string", ",", "'url_desc'", ":", "\"%s\"", "%", "url_desc", "}", "count_url", "+=", "1", "cur_misc_txt", "=", "u\"\"", "elif", "tag_type", "==", "\"DOI\"", ":", "# This tag is an identified DOI:", "# From the \"identified_dois\" list, get this DOI and its", "# description string:", "doi_string", "=", "identified_dois", "[", "0", "]", "# Now move past this \"<cds.CDS />\"tag in the line:", "processed_line", "=", "processed_line", "[", "tag_match_end", ":", "]", "# Remove DOI from the list of DOI strings", "identified_dois", "[", "0", ":", "1", "]", "=", "[", "]", "# SAVE the current misc text", "identified_citation_element", "=", "{", "'type'", ":", "\"DOI\"", ",", "'misc_txt'", ":", "\"%s\"", "%", "cur_misc_txt", ",", "'doi_string'", ":", "\"%s\"", "%", "doi_string", "}", "# Increment the stats counters:", "count_doi", "+=", "1", "cur_misc_txt", "=", "u\"\"", "elif", "tag_type", ".", "find", "(", "\"AUTH\"", ")", "!=", "-", "1", ":", "# This tag is an identified Author:", "auth_type", "=", "\"\"", "# extract the title from the line:", "if", "tag_type", ".", "find", "(", "\"stnd\"", ")", "!=", "-", "1", ":", "auth_type", "=", "\"stnd\"", "idx_closing_tag_nearest", "=", "processed_line", ".", "find", "(", "CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND", ",", "tag_match_end", ")", "elif", "tag_type", ".", "find", "(", "\"etal\"", ")", "!=", "-", "1", ":", "auth_type", "=", "\"etal\"", "idx_closing_tag_nearest", "=", "processed_line", ".", "find", "(", "CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL", ",", "tag_match_end", ")", "elif", "tag_type", ".", "find", "(", "\"incl\"", ")", "!=", "-", "1", ":", "auth_type", "=", "\"incl\"", "idx_closing_tag_nearest", "=", "processed_line", ".", "find", "(", "CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL", ",", "tag_match_end", ")", "if", "idx_closing_tag_nearest", "==", "-", "1", ":", "# no closing </cds.AUTH****> tag found - strip the opening tag", "# and move past it", "processed_line", "=", "processed_line", "[", "tag_match_end", ":", "]", "identified_citation_element", "=", "None", "else", ":", "auth_txt", "=", "processed_line", "[", "tag_match_end", ":", "idx_closing_tag_nearest", "]", "# Now move past the ending tag in the line:", "processed_line", "=", "processed_line", "[", "idx_closing_tag_nearest", "+", "len", "(", "\"</cds.AUTHxxxx>\"", ")", ":", "]", "# SAVE the current misc text", "identified_citation_element", "=", "{", "'type'", ":", "\"AUTH\"", ",", "'misc_txt'", ":", "\"%s\"", "%", "cur_misc_txt", ",", "'auth_txt'", ":", "\"%s\"", "%", "auth_txt", ",", "'auth_type'", ":", "\"%s\"", "%", "auth_type", "}", "# Increment the stats counters:", "count_auth_group", "+=", "1", "cur_misc_txt", "=", "u\"\"", "# These following tags may be found separately;", "# They are usually found when a \"JOURNAL\" tag is hit", "# (ONLY immediately afterwards, however)", "# Sitting by themselves means they do not have", "# an associated TITLE tag, and should be MISC", "elif", "tag_type", "==", "\"SER\"", ":", "# This tag is a SERIES tag; Since it was not preceeded by a TITLE", "# tag, it is useless - strip the tag and put it into miscellaneous:", "(", "cur_misc_txt", ",", "processed_line", ")", "=", "convert_unusable_tag_to_misc", "(", "processed_line", ",", "cur_misc_txt", ",", "tag_match_end", ",", "CFG_REFEXTRACT_MARKER_CLOSING_SERIES", ")", "identified_citation_element", "=", "None", "elif", "tag_type", "==", "\"VOL\"", ":", "# This tag is a VOLUME tag; Since it was not preceeded by a TITLE", "# tag, it is useless - strip the tag and put it into miscellaneous:", "(", "cur_misc_txt", ",", "processed_line", ")", "=", "convert_unusable_tag_to_misc", "(", "processed_line", ",", "cur_misc_txt", ",", "tag_match_end", ",", "CFG_REFEXTRACT_MARKER_CLOSING_VOLUME", ")", "identified_citation_element", "=", "None", "elif", "tag_type", "==", "\"YR\"", ":", "# This tag is a YEAR tag; Since it's not preceeded by TITLE and", "# VOLUME tags, it is useless - strip the tag and put the contents", "# into miscellaneous:", "(", "cur_misc_txt", ",", "processed_line", ")", "=", "convert_unusable_tag_to_misc", "(", "processed_line", ",", "cur_misc_txt", ",", "tag_match_end", ",", "CFG_REFEXTRACT_MARKER_CLOSING_YEAR", ")", "identified_citation_element", "=", "None", "elif", "tag_type", "==", "\"PG\"", ":", "# This tag is a PAGE tag; Since it's not preceeded by TITLE,", "# VOLUME and YEAR tags, it is useless - strip the tag and put the", "# contents into miscellaneous:", "(", "cur_misc_txt", ",", "processed_line", ")", "=", "convert_unusable_tag_to_misc", "(", "processed_line", ",", "cur_misc_txt", ",", "tag_match_end", ",", "CFG_REFEXTRACT_MARKER_CLOSING_PAGE", ")", "identified_citation_element", "=", "None", "elif", "tag_type", "==", "\"QUOTED\"", ":", "identified_citation_element", ",", "processed_line", ",", "cur_misc_txt", "=", "map_tag_to_subfield", "(", "tag_type", ",", "processed_line", "[", "tag_match_end", ":", "]", ",", "cur_misc_txt", ",", "'title'", ")", "elif", "tag_type", "==", "\"ISBN\"", ":", "identified_citation_element", ",", "processed_line", ",", "cur_misc_txt", "=", "map_tag_to_subfield", "(", "tag_type", ",", "processed_line", "[", "tag_match_end", ":", "]", ",", "cur_misc_txt", ",", "tag_type", ")", "elif", "tag_type", "==", "\"PUBLISHER\"", ":", "identified_citation_element", ",", "processed_line", ",", "cur_misc_txt", "=", "map_tag_to_subfield", "(", "tag_type", ",", "processed_line", "[", "tag_match_end", ":", "]", ",", "cur_misc_txt", ",", "'publisher'", ")", "elif", "tag_type", "==", "\"COLLABORATION\"", ":", "identified_citation_element", ",", "processed_line", ",", "cur_misc_txt", "=", "map_tag_to_subfield", "(", "tag_type", ",", "processed_line", "[", "tag_match_end", ":", "]", ",", "cur_misc_txt", ",", "'collaboration'", ")", "if", "identified_citation_element", ":", "# Append the found tagged data and current misc text", "citation_elements", ".", "append", "(", "identified_citation_element", ")", "identified_citation_element", "=", "None", "# Look for the next tag in the processed line:", "tag_match", "=", "re_tagged_citation", ".", "search", "(", "processed_line", ")", "# place any remaining miscellaneous text into the", "# appropriate MARC XML fields:", "cur_misc_txt", "+=", "processed_line", "# This MISC element will hold the entire citation in the event", "# that no tags were found.", "if", "len", "(", "cur_misc_txt", ".", "strip", "(", "\" .;,\"", ")", ")", ">", "0", ":", "# Increment the stats counters:", "count_misc", "+=", "1", "identified_citation_element", "=", "{", "'type'", ":", "\"MISC\"", ",", "'misc_txt'", ":", "cur_misc_txt", ",", "}", "citation_elements", ".", "append", "(", "identified_citation_element", ")", "return", "(", "citation_elements", ",", "line_marker", ",", "{", "'misc'", ":", "count_misc", ",", "'title'", ":", "count_title", ",", "'reportnum'", ":", "count_reportnum", ",", "'url'", ":", "count_url", ",", "'doi'", ":", "count_doi", ",", "'auth_group'", ":", "count_auth_group", "}", ")" ]
Given a single tagged reference line, convert it to its MARC-XML representation. Try to find all tags and extract their contents and their types into corresponding dictionary elements. Append each dictionary tag representation onto a list, which is given to 'build_formatted_xml_citation()' where the correct xml output will be generated. This method is dumb, with very few heuristics. It simply looks for tags, and makes dictionaries from the data it finds in a tagged reference line. @param line_marker: (string) The line marker for this single reference line (e.g. [19]) @param line: (string) The tagged reference line. @param identified_dois: (list) a list of dois which were found in this line. The ordering of dois corresponds to the ordering of tags in the line, reading from left to right. @param identified_urls: (list) a list of urls which were found in this line. The ordering of urls corresponds to the ordering of tags in the line, reading from left to right. @param which format to use for references, roughly "<title> <volume> <page>" or "<title>,<volume>,<page>" @return xml_line: (string) the MARC-XML representation of the tagged reference line @return count_*: (integer) the number of * (pieces of info) found in the reference line.
[ "Given", "a", "single", "tagged", "reference", "line", "convert", "it", "to", "its", "MARC", "-", "XML", "representation", ".", "Try", "to", "find", "all", "tags", "and", "extract", "their", "contents", "and", "their", "types", "into", "corresponding", "dictionary", "elements", ".", "Append", "each", "dictionary", "tag", "representation", "onto", "a", "list", "which", "is", "given", "to", "build_formatted_xml_citation", "()", "where", "the", "correct", "xml", "output", "will", "be", "generated", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L910-L1289
inspirehep/refextract
refextract/references/engine.py
map_tag_to_subfield
def map_tag_to_subfield(tag_type, line, cur_misc_txt, dest): """Create a new reference element""" closing_tag = '</cds.%s>' % tag_type # extract the institutional report-number from the line: idx_closing_tag = line.find(closing_tag) # Sanity check - did we find a closing tag? if idx_closing_tag == -1: # no closing </cds.TAG> tag found - strip the opening tag and move past this # recognised reportnumber as it is unreliable: identified_citation_element = None line = line[len('<cds.%s>' % tag_type):] else: tag_content = line[:idx_closing_tag] identified_citation_element = {'type': tag_type, 'misc_txt': cur_misc_txt, dest: tag_content} ending_tag_pos = idx_closing_tag + len(closing_tag) line = line[ending_tag_pos:] cur_misc_txt = u"" return identified_citation_element, line, cur_misc_txt
python
def map_tag_to_subfield(tag_type, line, cur_misc_txt, dest): closing_tag = '</cds.%s>' % tag_type idx_closing_tag = line.find(closing_tag) if idx_closing_tag == -1: identified_citation_element = None line = line[len('<cds.%s>' % tag_type):] else: tag_content = line[:idx_closing_tag] identified_citation_element = {'type': tag_type, 'misc_txt': cur_misc_txt, dest: tag_content} ending_tag_pos = idx_closing_tag + len(closing_tag) line = line[ending_tag_pos:] cur_misc_txt = u"" return identified_citation_element, line, cur_misc_txt
[ "def", "map_tag_to_subfield", "(", "tag_type", ",", "line", ",", "cur_misc_txt", ",", "dest", ")", ":", "closing_tag", "=", "'</cds.%s>'", "%", "tag_type", "# extract the institutional report-number from the line:", "idx_closing_tag", "=", "line", ".", "find", "(", "closing_tag", ")", "# Sanity check - did we find a closing tag?", "if", "idx_closing_tag", "==", "-", "1", ":", "# no closing </cds.TAG> tag found - strip the opening tag and move past this", "# recognised reportnumber as it is unreliable:", "identified_citation_element", "=", "None", "line", "=", "line", "[", "len", "(", "'<cds.%s>'", "%", "tag_type", ")", ":", "]", "else", ":", "tag_content", "=", "line", "[", ":", "idx_closing_tag", "]", "identified_citation_element", "=", "{", "'type'", ":", "tag_type", ",", "'misc_txt'", ":", "cur_misc_txt", ",", "dest", ":", "tag_content", "}", "ending_tag_pos", "=", "idx_closing_tag", "+", "len", "(", "closing_tag", ")", "line", "=", "line", "[", "ending_tag_pos", ":", "]", "cur_misc_txt", "=", "u\"\"", "return", "identified_citation_element", ",", "line", ",", "cur_misc_txt" ]
Create a new reference element
[ "Create", "a", "new", "reference", "element" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1292-L1312
inspirehep/refextract
refextract/references/engine.py
convert_unusable_tag_to_misc
def convert_unusable_tag_to_misc(line, misc_text, tag_match_end, closing_tag): """Function to remove an unwanted, tagged, citation item from a reference line. The tagged item itself is put into the miscellaneous text variable; the data up to the closing tag is then trimmed from the beginning of the working line. For example, the following working line: Example, AN. Testing software; <cds.YR>(2001)</cds.YR>, CERN, Geneva. ...would be trimmed down to: , CERN, Geneva. ...And the Miscellaneous text taken from the start of the line would be: Example, AN. Testing software; (2001) ...(assuming that the details of <cds.YR> and </cds.YR> were passed to the function). @param line: (string) - the reference line. @param misc_text: (string) - the variable containing the miscellaneous text recorded so far. @param tag_match_end: (integer) - the index of the end of the opening tag in the line. @param closing_tag: (string) - the closing tag to look for in the line (e.g. </cds.YR>). @return: (tuple) - containing misc_text (string) and line (string) """ # extract the tagged information: idx_closing_tag = line.find(closing_tag, tag_match_end) # Sanity check - did we find a closing tag? if idx_closing_tag == -1: # no closing tag found - strip the opening tag and move past this # recognised item as it is unusable: line = line[tag_match_end:] else: # closing tag was found misc_text += line[tag_match_end:idx_closing_tag] # now trim the matched item and its tags from the start of the line: line = line[idx_closing_tag + len(closing_tag):] return (misc_text, line)
python
def convert_unusable_tag_to_misc(line, misc_text, tag_match_end, closing_tag): idx_closing_tag = line.find(closing_tag, tag_match_end) if idx_closing_tag == -1: line = line[tag_match_end:] else: misc_text += line[tag_match_end:idx_closing_tag] line = line[idx_closing_tag + len(closing_tag):] return (misc_text, line)
[ "def", "convert_unusable_tag_to_misc", "(", "line", ",", "misc_text", ",", "tag_match_end", ",", "closing_tag", ")", ":", "# extract the tagged information:", "idx_closing_tag", "=", "line", ".", "find", "(", "closing_tag", ",", "tag_match_end", ")", "# Sanity check - did we find a closing tag?", "if", "idx_closing_tag", "==", "-", "1", ":", "# no closing tag found - strip the opening tag and move past this", "# recognised item as it is unusable:", "line", "=", "line", "[", "tag_match_end", ":", "]", "else", ":", "# closing tag was found", "misc_text", "+=", "line", "[", "tag_match_end", ":", "idx_closing_tag", "]", "# now trim the matched item and its tags from the start of the line:", "line", "=", "line", "[", "idx_closing_tag", "+", "len", "(", "closing_tag", ")", ":", "]", "return", "(", "misc_text", ",", "line", ")" ]
Function to remove an unwanted, tagged, citation item from a reference line. The tagged item itself is put into the miscellaneous text variable; the data up to the closing tag is then trimmed from the beginning of the working line. For example, the following working line: Example, AN. Testing software; <cds.YR>(2001)</cds.YR>, CERN, Geneva. ...would be trimmed down to: , CERN, Geneva. ...And the Miscellaneous text taken from the start of the line would be: Example, AN. Testing software; (2001) ...(assuming that the details of <cds.YR> and </cds.YR> were passed to the function). @param line: (string) - the reference line. @param misc_text: (string) - the variable containing the miscellaneous text recorded so far. @param tag_match_end: (integer) - the index of the end of the opening tag in the line. @param closing_tag: (string) - the closing tag to look for in the line (e.g. </cds.YR>). @return: (tuple) - containing misc_text (string) and line (string)
[ "Function", "to", "remove", "an", "unwanted", "tagged", "citation", "item", "from", "a", "reference", "line", ".", "The", "tagged", "item", "itself", "is", "put", "into", "the", "miscellaneous", "text", "variable", ";", "the", "data", "up", "to", "the", "closing", "tag", "is", "then", "trimmed", "from", "the", "beginning", "of", "the", "working", "line", ".", "For", "example", "the", "following", "working", "line", ":", "Example", "AN", ".", "Testing", "software", ";", "<cds", ".", "YR", ">", "(", "2001", ")", "<", "/", "cds", ".", "YR", ">", "CERN", "Geneva", ".", "...", "would", "be", "trimmed", "down", "to", ":", "CERN", "Geneva", ".", "...", "And", "the", "Miscellaneous", "text", "taken", "from", "the", "start", "of", "the", "line", "would", "be", ":", "Example", "AN", ".", "Testing", "software", ";", "(", "2001", ")", "...", "(", "assuming", "that", "the", "details", "of", "<cds", ".", "YR", ">", "and", "<", "/", "cds", ".", "YR", ">", "were", "passed", "to", "the", "function", ")", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1315-L1352
inspirehep/refextract
refextract/references/engine.py
remove_leading_garbage_lines_from_reference_section
def remove_leading_garbage_lines_from_reference_section(ref_sectn): """Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses. """ p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE) while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])): ref_sectn.pop(0) return ref_sectn
python
def remove_leading_garbage_lines_from_reference_section(ref_sectn): p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE) while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])): ref_sectn.pop(0) return ref_sectn
[ "def", "remove_leading_garbage_lines_from_reference_section", "(", "ref_sectn", ")", ":", "p_email", "=", "re", ".", "compile", "(", "ur'^\\s*e\\-?mail'", ",", "re", ".", "UNICODE", ")", "while", "ref_sectn", "and", "(", "ref_sectn", "[", "0", "]", ".", "isspace", "(", ")", "or", "p_email", ".", "match", "(", "ref_sectn", "[", "0", "]", ")", ")", ":", "ref_sectn", ".", "pop", "(", "0", ")", "return", "ref_sectn" ]
Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses.
[ "Sometimes", "the", "first", "lines", "of", "the", "extracted", "references", "are", "completely", "blank", "or", "email", "addresses", ".", "These", "must", "be", "removed", "as", "they", "are", "not", "references", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1364-L1375
inspirehep/refextract
refextract/references/engine.py
get_plaintext_document_body
def get_plaintext_document_body(fpath, keep_layout=False): """Given a file-path to a full-text, return a list of unicode strings whereby each string is a line of the fulltext. In the case of a plain-text document, this simply means reading the contents in from the file. In the case of a PDF however, this means converting the document to plaintext. It raises UnknownDocumentTypeError if the document is not a PDF or plain text. @param fpath: (string) - the path to the fulltext file @return: (list) of strings - each string being a line in the document. """ textbody = [] mime_type = magic.from_file(fpath, mime=True) if mime_type == "text/plain": with open(fpath, "r") as f: textbody = [line.decode("utf-8") for line in f.readlines()] elif mime_type == "application/pdf": textbody = convert_PDF_to_plaintext(fpath, keep_layout) else: raise UnknownDocumentTypeError(mime_type) return textbody
python
def get_plaintext_document_body(fpath, keep_layout=False): textbody = [] mime_type = magic.from_file(fpath, mime=True) if mime_type == "text/plain": with open(fpath, "r") as f: textbody = [line.decode("utf-8") for line in f.readlines()] elif mime_type == "application/pdf": textbody = convert_PDF_to_plaintext(fpath, keep_layout) else: raise UnknownDocumentTypeError(mime_type) return textbody
[ "def", "get_plaintext_document_body", "(", "fpath", ",", "keep_layout", "=", "False", ")", ":", "textbody", "=", "[", "]", "mime_type", "=", "magic", ".", "from_file", "(", "fpath", ",", "mime", "=", "True", ")", "if", "mime_type", "==", "\"text/plain\"", ":", "with", "open", "(", "fpath", ",", "\"r\"", ")", "as", "f", ":", "textbody", "=", "[", "line", ".", "decode", "(", "\"utf-8\"", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", "]", "elif", "mime_type", "==", "\"application/pdf\"", ":", "textbody", "=", "convert_PDF_to_plaintext", "(", "fpath", ",", "keep_layout", ")", "else", ":", "raise", "UnknownDocumentTypeError", "(", "mime_type", ")", "return", "textbody" ]
Given a file-path to a full-text, return a list of unicode strings whereby each string is a line of the fulltext. In the case of a plain-text document, this simply means reading the contents in from the file. In the case of a PDF however, this means converting the document to plaintext. It raises UnknownDocumentTypeError if the document is not a PDF or plain text. @param fpath: (string) - the path to the fulltext file @return: (list) of strings - each string being a line in the document.
[ "Given", "a", "file", "-", "path", "to", "a", "full", "-", "text", "return", "a", "list", "of", "unicode", "strings", "whereby", "each", "string", "is", "a", "line", "of", "the", "fulltext", ".", "In", "the", "case", "of", "a", "plain", "-", "text", "document", "this", "simply", "means", "reading", "the", "contents", "in", "from", "the", "file", ".", "In", "the", "case", "of", "a", "PDF", "however", "this", "means", "converting", "the", "document", "to", "plaintext", ".", "It", "raises", "UnknownDocumentTypeError", "if", "the", "document", "is", "not", "a", "PDF", "or", "plain", "text", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1384-L1408
inspirehep/refextract
refextract/references/engine.py
parse_references
def parse_references(reference_lines, recid=None, override_kbs_files=None, reference_format=u"{title} {volume} ({year}) {page}", linker_callback=None): """Parse a list of references Given a list of raw reference lines (list of strings), output a list of dictionaries containing the parsed references """ # RefExtract knowledge bases kbs = get_kbs(custom_kbs_files=override_kbs_files) # Identify journal titles, report numbers, URLs, DOIs, and authors... processed_references, counts, dummy_bad_titles_count = \ parse_references_elements(reference_lines, kbs, linker_callback) return (build_references(processed_references, reference_format), build_stats(counts))
python
def parse_references(reference_lines, recid=None, override_kbs_files=None, reference_format=u"{title} {volume} ({year}) {page}", linker_callback=None): kbs = get_kbs(custom_kbs_files=override_kbs_files) processed_references, counts, dummy_bad_titles_count = \ parse_references_elements(reference_lines, kbs, linker_callback) return (build_references(processed_references, reference_format), build_stats(counts))
[ "def", "parse_references", "(", "reference_lines", ",", "recid", "=", "None", ",", "override_kbs_files", "=", "None", ",", "reference_format", "=", "u\"{title} {volume} ({year}) {page}\"", ",", "linker_callback", "=", "None", ")", ":", "# RefExtract knowledge bases", "kbs", "=", "get_kbs", "(", "custom_kbs_files", "=", "override_kbs_files", ")", "# Identify journal titles, report numbers, URLs, DOIs, and authors...", "processed_references", ",", "counts", ",", "dummy_bad_titles_count", "=", "parse_references_elements", "(", "reference_lines", ",", "kbs", ",", "linker_callback", ")", "return", "(", "build_references", "(", "processed_references", ",", "reference_format", ")", ",", "build_stats", "(", "counts", ")", ")" ]
Parse a list of references Given a list of raw reference lines (list of strings), output a list of dictionaries containing the parsed references
[ "Parse", "a", "list", "of", "references" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1411-L1428
inspirehep/refextract
refextract/references/engine.py
build_stats
def build_stats(counts): """Return stats information from counts structure.""" stats = { 'status': 0, 'reportnum': counts['reportnum'], 'title': counts['title'], 'author': counts['auth_group'], 'url': counts['url'], 'doi': counts['doi'], 'misc': counts['misc'], } stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats stats["old_stats_str"] = stats_str stats["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") stats["version"] = version return stats
python
def build_stats(counts): stats = { 'status': 0, 'reportnum': counts['reportnum'], 'title': counts['title'], 'author': counts['auth_group'], 'url': counts['url'], 'doi': counts['doi'], 'misc': counts['misc'], } stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats stats["old_stats_str"] = stats_str stats["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") stats["version"] = version return stats
[ "def", "build_stats", "(", "counts", ")", ":", "stats", "=", "{", "'status'", ":", "0", ",", "'reportnum'", ":", "counts", "[", "'reportnum'", "]", ",", "'title'", ":", "counts", "[", "'title'", "]", ",", "'author'", ":", "counts", "[", "'auth_group'", "]", ",", "'url'", ":", "counts", "[", "'url'", "]", ",", "'doi'", ":", "counts", "[", "'doi'", "]", ",", "'misc'", ":", "counts", "[", "'misc'", "]", ",", "}", "stats_str", "=", "\"%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s\"", "%", "stats", "stats", "[", "\"old_stats_str\"", "]", "=", "stats_str", "stats", "[", "\"date\"", "]", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "stats", "[", "\"version\"", "]", "=", "version", "return", "stats" ]
Return stats information from counts structure.
[ "Return", "stats", "information", "from", "counts", "structure", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1431-L1446
inspirehep/refextract
refextract/documents/pdf.py
replace_undesirable_characters
def replace_undesirable_characters(line): """ Replace certain bad characters in a text line. @param line: (string) the text line in which bad characters are to be replaced. @return: (string) the text line after the bad characters have been replaced. """ # These are separate because we want a particular order for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS: line = line.replace(bad_string, replacement) for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS): line = line.replace(bad_char, replacement) return line
python
def replace_undesirable_characters(line): for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS: line = line.replace(bad_string, replacement) for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS): line = line.replace(bad_char, replacement) return line
[ "def", "replace_undesirable_characters", "(", "line", ")", ":", "# These are separate because we want a particular order", "for", "bad_string", ",", "replacement", "in", "UNDESIRABLE_STRING_REPLACEMENTS", ":", "line", "=", "line", ".", "replace", "(", "bad_string", ",", "replacement", ")", "for", "bad_char", ",", "replacement", "in", "iteritems", "(", "UNDESIRABLE_CHAR_REPLACEMENTS", ")", ":", "line", "=", "line", ".", "replace", "(", "bad_char", ",", "replacement", ")", "return", "line" ]
Replace certain bad characters in a text line. @param line: (string) the text line in which bad characters are to be replaced. @return: (string) the text line after the bad characters have been replaced.
[ "Replace", "certain", "bad", "characters", "in", "a", "text", "line", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/pdf.py#L434-L449
inspirehep/refextract
refextract/documents/pdf.py
convert_PDF_to_plaintext
def convert_PDF_to_plaintext(fpath, keep_layout=False): """ Convert PDF to txt using pdftotext Take the path to a PDF file and run pdftotext for this file, capturing the output. @param fpath: (string) path to the PDF file @return: (list) of unicode strings (contents of the PDF file translated into plaintext; each string is a line in the document.) """ if not os.path.isfile(CFG_PATH_PDFTOTEXT): raise IOError('Missing pdftotext executable') if keep_layout: layout_option = "-layout" else: layout_option = "-raw" doclines = [] # Pattern to check for lines with a leading page-break character. # If this pattern is matched, we want to split the page-break into # its own line because we rely upon this for trying to strip headers # and footers, and for some other pattern matching. p_break_in_line = re.compile(ur'^\s*\f(.+)$', re.UNICODE) # build pdftotext command: cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, "-q", "-enc", "UTF-8", fpath, "-"] LOGGER.debug(u"%s", ' '.join(cmd_pdftotext)) # open pipe to pdftotext: pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE) # read back results: for docline in pipe_pdftotext.stdout: unicodeline = docline.decode("utf-8") # Check for a page-break in this line: m_break_in_line = p_break_in_line.match(unicodeline) if m_break_in_line is None: # There was no page-break in this line. Just add the line: doclines.append(unicodeline) else: # If there was a page-break character in the same line as some # text, split it out into its own line so that we can later # try to find headers and footers: doclines.append(u"\f") doclines.append(m_break_in_line.group(1)) LOGGER.debug(u"convert_PDF_to_plaintext found: %s lines of text", len(doclines)) return doclines
python
def convert_PDF_to_plaintext(fpath, keep_layout=False): if not os.path.isfile(CFG_PATH_PDFTOTEXT): raise IOError('Missing pdftotext executable') if keep_layout: layout_option = "-layout" else: layout_option = "-raw" doclines = [] p_break_in_line = re.compile(ur'^\s*\f(.+)$', re.UNICODE) cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, "-q", "-enc", "UTF-8", fpath, "-"] LOGGER.debug(u"%s", ' '.join(cmd_pdftotext)) pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE) for docline in pipe_pdftotext.stdout: unicodeline = docline.decode("utf-8") m_break_in_line = p_break_in_line.match(unicodeline) if m_break_in_line is None: doclines.append(unicodeline) else: doclines.append(u"\f") doclines.append(m_break_in_line.group(1)) LOGGER.debug(u"convert_PDF_to_plaintext found: %s lines of text", len(doclines)) return doclines
[ "def", "convert_PDF_to_plaintext", "(", "fpath", ",", "keep_layout", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "CFG_PATH_PDFTOTEXT", ")", ":", "raise", "IOError", "(", "'Missing pdftotext executable'", ")", "if", "keep_layout", ":", "layout_option", "=", "\"-layout\"", "else", ":", "layout_option", "=", "\"-raw\"", "doclines", "=", "[", "]", "# Pattern to check for lines with a leading page-break character.", "# If this pattern is matched, we want to split the page-break into", "# its own line because we rely upon this for trying to strip headers", "# and footers, and for some other pattern matching.", "p_break_in_line", "=", "re", ".", "compile", "(", "ur'^\\s*\\f(.+)$'", ",", "re", ".", "UNICODE", ")", "# build pdftotext command:", "cmd_pdftotext", "=", "[", "CFG_PATH_PDFTOTEXT", ",", "layout_option", ",", "\"-q\"", ",", "\"-enc\"", ",", "\"UTF-8\"", ",", "fpath", ",", "\"-\"", "]", "LOGGER", ".", "debug", "(", "u\"%s\"", ",", "' '", ".", "join", "(", "cmd_pdftotext", ")", ")", "# open pipe to pdftotext:", "pipe_pdftotext", "=", "subprocess", ".", "Popen", "(", "cmd_pdftotext", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "# read back results:", "for", "docline", "in", "pipe_pdftotext", ".", "stdout", ":", "unicodeline", "=", "docline", ".", "decode", "(", "\"utf-8\"", ")", "# Check for a page-break in this line:", "m_break_in_line", "=", "p_break_in_line", ".", "match", "(", "unicodeline", ")", "if", "m_break_in_line", "is", "None", ":", "# There was no page-break in this line. Just add the line:", "doclines", ".", "append", "(", "unicodeline", ")", "else", ":", "# If there was a page-break character in the same line as some", "# text, split it out into its own line so that we can later", "# try to find headers and footers:", "doclines", ".", "append", "(", "u\"\\f\"", ")", "doclines", ".", "append", "(", "m_break_in_line", ".", "group", "(", "1", ")", ")", "LOGGER", ".", "debug", "(", "u\"convert_PDF_to_plaintext found: %s lines of text\"", ",", "len", "(", "doclines", ")", ")", "return", "doclines" ]
Convert PDF to txt using pdftotext Take the path to a PDF file and run pdftotext for this file, capturing the output. @param fpath: (string) path to the PDF file @return: (list) of unicode strings (contents of the PDF file translated into plaintext; each string is a line in the document.)
[ "Convert", "PDF", "to", "txt", "using", "pdftotext" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/pdf.py#L452-L499
inspirehep/refextract
refextract/authors/regexs.py
get_author_affiliation_numeration_str
def get_author_affiliation_numeration_str(punct=None): """The numeration which can be applied to author names. Numeration is sometimes found next to authors of papers. @return: (string), which can be compiled into a regex; identifies numeration next to an author name. """ # FIXME cater for start or end numeration (ie two puncs) # Number to look for, either general or specific re_number = r'(?:\d\d?)' re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number # Punctuation surrounding the number, either general or specific again if punct is None: re_punct = r"(?:[\{\(\[]?)" else: re_punct = re.escape(punct) # Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!) numeration_str = r""" (?:\s*(%(punct)s)\s* ## Left numeration punctuation (%(num)s\s* ## Core numeration item, either specific or generic %(num_chain)s ## Extra numeration, either generic or empty ) (?:(%(punct)s)) ## Right numeration punctuation )""" % {'num': re_number, 'num_chain': re_chained_numbers, 'punct': re_punct} return numeration_str
python
def get_author_affiliation_numeration_str(punct=None): re_number = r'(?:\d\d?)' re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number if punct is None: re_punct = r"(?:[\{\(\[]?)" else: re_punct = re.escape(punct) numeration_str = r % {'num': re_number, 'num_chain': re_chained_numbers, 'punct': re_punct} return numeration_str
[ "def", "get_author_affiliation_numeration_str", "(", "punct", "=", "None", ")", ":", "# FIXME cater for start or end numeration (ie two puncs)", "# Number to look for, either general or specific", "re_number", "=", "r'(?:\\d\\d?)'", "re_chained_numbers", "=", "r\"(?:(?:[,;]\\s*%s\\.?\\s*))*\"", "%", "re_number", "# Punctuation surrounding the number, either general or specific again", "if", "punct", "is", "None", ":", "re_punct", "=", "r\"(?:[\\{\\(\\[]?)\"", "else", ":", "re_punct", "=", "re", ".", "escape", "(", "punct", ")", "# Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!)", "numeration_str", "=", "r\"\"\"\n (?:\\s*(%(punct)s)\\s* ## Left numeration punctuation\n (%(num)s\\s* ## Core numeration item, either specific or generic\n %(num_chain)s ## Extra numeration, either generic or empty\n )\n (?:(%(punct)s)) ## Right numeration punctuation\n )\"\"\"", "%", "{", "'num'", ":", "re_number", ",", "'num_chain'", ":", "re_chained_numbers", ",", "'punct'", ":", "re_punct", "}", "return", "numeration_str" ]
The numeration which can be applied to author names. Numeration is sometimes found next to authors of papers. @return: (string), which can be compiled into a regex; identifies numeration next to an author name.
[ "The", "numeration", "which", "can", "be", "applied", "to", "author", "names", ".", "Numeration", "is", "sometimes", "found", "next", "to", "authors", "of", "papers", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L36-L64
inspirehep/refextract
refextract/authors/regexs.py
get_initial_surname_author_pattern
def get_initial_surname_author_pattern(incl_numeration=False): """Match an author name of the form: 'initial(s) surname' Return a standard author, with a maximum of 6 initials, and a surname. The author pattern returned will match 'Initials Surname' formats only. The Initials MUST be uppercase, and MUST have at least a dot, hypen or apostrophe between them. @param incl_numeration: (boolean) Return an author pattern with optional numeration after authors. @return (string): The 'Initials Surname' author pattern.""" # Possible inclusion of superscript numeration at the end of author names # Will match the empty string if incl_numeration: append_num_re = get_author_affiliation_numeration_str() + '?' else: append_num_re = "" return ur""" (?: (?:%(uppercase_re)s\w{2,20}\s+)? ## Optionally a first name before the initials (?<!Volume\s) ## Initials (1-5) (cannot follow 'Volume\s') %(uppercase_re)s(?:\s*[.'’\s-]{1,3}\s*%(uppercase_re)s){0,4}[.\s-]{1,2}\s* ## separated by .,-,',etc. (?:%(uppercase_re)s\w{2,20}\s+)? ## Optionally a first name after the initials (?: (?!%(invalid_prefixes)s) ## Invalid prefixes to avoid \w{1,3}(?<!and)(?:(?:[’'`´-]\s?)|\s) ## The surname prefix: 1, 2 or 3 )? ## character prefixes before the surname (e.g. 'van','de') (?!%(invalid_surnames)s) ## Invalid surnames to avoid %(uppercase_re)s ## The surname, which must start with an upper case character (?:[rR]\.|\w{1,20}) ## handle Jr. (?:[\-’'`´][\w’']{1,20})? ## single hyphen allowed jan-el or Figueroa-O'Farrill [’']? ## Eventually an ending ' %(numeration)s ## A possible number to appear after an author name, used for author extraction (?: # Look for editor notation after the author group... \s*,?\s* # Eventually a coma/space %(ed)s )? )""" % { 'uppercase_re': get_uppercase_re(), 'invalid_prefixes': '|'.join(invalid_prefixes), 'invalid_surnames': '|'.join(invalid_surnames), 'ed': re_ed_notation, 'numeration': append_num_re, }
python
def get_initial_surname_author_pattern(incl_numeration=False): if incl_numeration: append_num_re = get_author_affiliation_numeration_str() + '?' else: append_num_re = "" return ur % { 'uppercase_re': get_uppercase_re(), 'invalid_prefixes': '|'.join(invalid_prefixes), 'invalid_surnames': '|'.join(invalid_surnames), 'ed': re_ed_notation, 'numeration': append_num_re, }
[ "def", "get_initial_surname_author_pattern", "(", "incl_numeration", "=", "False", ")", ":", "# Possible inclusion of superscript numeration at the end of author names", "# Will match the empty string", "if", "incl_numeration", ":", "append_num_re", "=", "get_author_affiliation_numeration_str", "(", ")", "+", "'?'", "else", ":", "append_num_re", "=", "\"\"", "return", "ur\"\"\"\n (?:\n (?:%(uppercase_re)s\\w{2,20}\\s+)? ## Optionally a first name before the initials\n\n (?<!Volume\\s) ## Initials (1-5) (cannot follow 'Volume\\s')\n %(uppercase_re)s(?:\\s*[.'’\\s-]{1,3}\\s*%(uppercase_re)s){0,4}[.\\s-]{1,2}\\s* ## separated by .,-,',etc.\n\n (?:%(uppercase_re)s\\w{2,20}\\s+)? ## Optionally a first name after the initials\n\n (?:\n (?!%(invalid_prefixes)s) ## Invalid prefixes to avoid\n \\w{1,3}(?<!and)(?:(?:[’'`´-]\\s?)|\\s) ## The surname prefix: 1, 2 or 3\n )? ## character prefixes before the surname (e.g. 'van','de')\n\n (?!%(invalid_surnames)s) ## Invalid surnames to avoid\n %(uppercase_re)s ## The surname, which must start with an upper case character\n (?:[rR]\\.|\\w{1,20}) ## handle Jr.\n (?:[\\-’'`´][\\w’']{1,20})? ## single hyphen allowed jan-el or Figueroa-O'Farrill\n [’']? ## Eventually an ending '\n\n %(numeration)s ## A possible number to appear after an author name, used for author extraction\n\n (?: # Look for editor notation after the author group...\n \\s*,?\\s* # Eventually a coma/space\n %(ed)s\n )?\n )\"\"\"", "%", "{", "'uppercase_re'", ":", "get_uppercase_re", "(", ")", ",", "'invalid_prefixes'", ":", "'|'", ".", "join", "(", "invalid_prefixes", ")", ",", "'invalid_surnames'", ":", "'|'", ".", "join", "(", "invalid_surnames", ")", ",", "'ed'", ":", "re_ed_notation", ",", "'numeration'", ":", "append_num_re", ",", "}" ]
Match an author name of the form: 'initial(s) surname' Return a standard author, with a maximum of 6 initials, and a surname. The author pattern returned will match 'Initials Surname' formats only. The Initials MUST be uppercase, and MUST have at least a dot, hypen or apostrophe between them. @param incl_numeration: (boolean) Return an author pattern with optional numeration after authors. @return (string): The 'Initials Surname' author pattern.
[ "Match", "an", "author", "name", "of", "the", "form", ":", "initial", "(", "s", ")", "surname" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L83-L130
inspirehep/refextract
refextract/authors/regexs.py
get_surname_initial_author_pattern
def get_surname_initial_author_pattern(incl_numeration=False): """Match an author name of the form: 'surname initial(s)' This is sometimes the represention of the first author found inside an author group. This author pattern is only used to find a maximum of ONE author inside an author group. Authors of this form MUST have either a comma after the initials, or an 'and', which denotes the presence of other authors in the author group. @param incl_numeration: (boolean) Return an author pattern with optional numeration after authors. @return (string): The 'Surname Initials' author pattern.""" # Possible inclusion of superscript numeration at the end of author names # Will match the empty string if incl_numeration: append_num_re = get_author_affiliation_numeration_str() + '?' else: append_num_re = "" return ur""" (?: (?: (?!%(invalid_prefixes)s) ## Invalid prefixes to avoid \w{1,3}(?<!and)(?<!in)(?:(?:[’'`´-]\s?)|\s) )? ## The optional surname prefix: ## 1 or 2, 2-3 character prefixes before the surname (e.g. 'van','de') (?!%(invalid_surnames)s) ## Invalid surnames to avoid %(uppercase_re)s\w{2,20}(?:[\-’'`´]\w{2,20})? ## The surname, which must start with an upper case character (single hyphen allowed) \s*[,.\s]\s* ## The space between the surname and its initials (?<!Volume\s) ## Initials %(uppercase_re)s(?:\s*[.'’\s-]{1,2}\s*%(uppercase_re)s){0,4}\.{0,2} ## Either a comma or an 'and' MUST be present ... OR an end of line marker ## (maybe some space's between authors) ## Uses positive lookahead assertion %(numeration)s ## A possible number to appear after an author name, used for author extraction (?: # Look for editor notation after the author group... \s*,?\s* # Eventually a coma/space %(ed)s )? )""" % { 'uppercase_re': get_uppercase_re(), 'invalid_prefixes': '|'.join(invalid_prefixes), 'invalid_surnames': '|'.join(invalid_surnames), 'ed': re_ed_notation, 'numeration': append_num_re, }
python
def get_surname_initial_author_pattern(incl_numeration=False): if incl_numeration: append_num_re = get_author_affiliation_numeration_str() + '?' else: append_num_re = "" return ur % { 'uppercase_re': get_uppercase_re(), 'invalid_prefixes': '|'.join(invalid_prefixes), 'invalid_surnames': '|'.join(invalid_surnames), 'ed': re_ed_notation, 'numeration': append_num_re, }
[ "def", "get_surname_initial_author_pattern", "(", "incl_numeration", "=", "False", ")", ":", "# Possible inclusion of superscript numeration at the end of author names", "# Will match the empty string", "if", "incl_numeration", ":", "append_num_re", "=", "get_author_affiliation_numeration_str", "(", ")", "+", "'?'", "else", ":", "append_num_re", "=", "\"\"", "return", "ur\"\"\"\n (?:\n (?:\n (?!%(invalid_prefixes)s) ## Invalid prefixes to avoid\n \\w{1,3}(?<!and)(?<!in)(?:(?:[’'`´-]\\s?)|\\s)\n )? ## The optional surname prefix:\n ## 1 or 2, 2-3 character prefixes before the surname (e.g. 'van','de')\n\n (?!%(invalid_surnames)s) ## Invalid surnames to avoid\n %(uppercase_re)s\\w{2,20}(?:[\\-’'`´]\\w{2,20})? ## The surname, which must start with an upper case character (single hyphen allowed)\n\n \\s*[,.\\s]\\s* ## The space between the surname and its initials\n\n (?<!Volume\\s) ## Initials\n %(uppercase_re)s(?:\\s*[.'’\\s-]{1,2}\\s*%(uppercase_re)s){0,4}\\.{0,2}\n\n ## Either a comma or an 'and' MUST be present ... OR an end of line marker\n ## (maybe some space's between authors)\n ## Uses positive lookahead assertion\n %(numeration)s ## A possible number to appear after an author name, used for author extraction\n\n (?: # Look for editor notation after the author group...\n \\s*,?\\s* # Eventually a coma/space\n %(ed)s\n )?\n )\"\"\"", "%", "{", "'uppercase_re'", ":", "get_uppercase_re", "(", ")", ",", "'invalid_prefixes'", ":", "'|'", ".", "join", "(", "invalid_prefixes", ")", ",", "'invalid_surnames'", ":", "'|'", ".", "join", "(", "invalid_surnames", ")", ",", "'ed'", ":", "re_ed_notation", ",", "'numeration'", ":", "append_num_re", ",", "}" ]
Match an author name of the form: 'surname initial(s)' This is sometimes the represention of the first author found inside an author group. This author pattern is only used to find a maximum of ONE author inside an author group. Authors of this form MUST have either a comma after the initials, or an 'and', which denotes the presence of other authors in the author group. @param incl_numeration: (boolean) Return an author pattern with optional numeration after authors. @return (string): The 'Surname Initials' author pattern.
[ "Match", "an", "author", "name", "of", "the", "form", ":", "surname", "initial", "(", "s", ")" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L133-L180
inspirehep/refextract
refextract/authors/regexs.py
make_auth_regex_str
def make_auth_regex_str(etal, initial_surname_author=None, surname_initial_author=None): """ Returns a regular expression to be used to identify groups of author names in a citation. This method contains patterns for default authors, so no arguments are needed for the most reliable form of matching. The returned author pattern is capable of: 1. Identifying single authors, with at least one initial, of the form: 'Initial. [surname prefix...] Surname' 2. Identifying multiple authors, each with at least one initial, of the form: 'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]' ***(Note that a full stop, hyphen or apostrophe after each initial is absolutely vital in identifying authors for both of these above methods. Initials must also be uppercase.)*** 3. Capture 'et al' statements at the end of author groups (allows for authors with et al to be processed differently from 'standard' authors) 4. Identifying a single author surname name positioned before the phrase 'et al', with no initials: 'Surname et al' 5. Identifying two author surname name positioned before the phrase 'et al', with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al' 6. Identifying authors of the form: 'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose to represent the most important cited author (in a list of authors) by listing first their surname, and then their initials. Since this form has little distinguishing characteristics which could be used to create a reliable a pattern, at least one standard author must be present after it in order to improve the accuracy. 7. Capture editor notation, of which can take many forms e.g. 'eds. editors. edited by. etc.'. Authors captured in this way can be treated as 'editor groups', and hence processed differently if needed from standard authors @param etal: (string) The regular expression used to identify 'etal' notation @param author: (string) An optional argument, which replaces the default author regex used to identify author groups (initials, surnames... etc) @return: (string) The full author group identification regex, which will: - detect groups of authors in a range of formats, e.g.: C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al., - detect whether the author group has been marked up as editors of the doc. (therefore they will NOT be marked up as authors) e.g.: ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards | L. Kelloggs (editors) | M. Jackson (eds.) | ... -detect a maximum of two surnames only if the surname(s) is followed by 'et al' (must be separated by 'and' if there are two), e.g.: Amaldi et al., | Hayward and Yellow et al., """ if not initial_surname_author: # Standard author, with a maximum of 6 initials, and a surname. # The Initials MUST be uppercase, and MUST have at least a dot, hypen # or apostrophe between them. initial_surname_author = get_initial_surname_author_pattern() if not surname_initial_author: # The author name of the form: 'surname initial(s)' # This is sometimes the represention of the first author found inside an author group. # This author pattern is only used to find a maximum of ONE author inside an author group. # Authors of this form MUST have either a comma after the initials, or an 'and', # which denotes the presence of other authors in the author group. surname_initial_author = get_surname_initial_author_pattern() # Pattern used to locate a GROUP of author names in a reference # The format of an author can take many forms: # J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al., # (the use of 'et. al' is a giveaway that the preceeding # text was indeed an author name) # This will also match authors which seem to be labeled as editors (with the phrase 'ed.') # In which case, the author will be thrown away later on. # The regex returned has around 100 named groups already (max), so any new groups must be # started using '?:' return ur""" (?:^|\s+|\() ## Must be the start of the line, or a space (or an opening bracket in very few cases) (?P<es> ## Look for editor notation before the author (?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\.\s?)|(?:\.?\s))) ## 'eds?. ' | 'ed ' | 'ed.' |(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\.\s?)|(?:\.?\s))by(?:\s|([:,]\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: ' |(?:\(\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\.\s?)|(?:\.?\s))?\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)' )? ## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,) (?P<author_names> (?: (?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials [A-Z][^0-9_\.\s]{2,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## Surname (?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials (?P<multi_surs> (?:(?:[Aa][Nn][Dd]|\&)\s+) ## Maybe 'and' or '&' tied with another name [A-Z][^0-9_\.\s]{3,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## More surnames (?:[A-Z](?:[ -][A-Z])?\s+)? ## with initials )? (?: # Look for editor notation after the author group... \s*[,\s]?\s* # Eventually a coma/space %(ed)s )? (?P<et2> %(etal)s ## et al, MUST BE PRESENT however, for this author form ) (?: # Look for editor notation after the author group... \s*[,\s]?\s* # Eventually a coma/space %(ed)s )? ) | (?: ## **** (2) , The standard author form.. (e.g. J. Bloggs) ## This author form can either start with a normal 'initial surname' author, ## or it can begin with a single 'surname initial' author (?: ## The first author in the 'author group' %(i_s_author)s | (?P<sur_initial_auth>%(s_i_author)s) ) (?P<multi_auth> (?: ## Then 0 or more author names \s*[,\s]\s* (?: %(i_s_author)s | %(s_i_author)s ) )* (?: ## Maybe 'and' or '&' tied with another name (?: \s*[,\s]\s* ## handle "J. Dan, and H. Pon" (?:[Aa][Nn][DdsS]|\&) \s+ ) (?P<mult_auth_sub> %(i_s_author)s | %(s_i_author)s ) )? ) (?P<et> # 'et al' need not be present for either of \s*[,\s]\s* %(etal)s # 'initial surname' or 'surname initial' authors )? ) ) (?P<ee> \s*[,\s]\s* \(? (?:[Ee][Dd]s|[Ee]ditors)\.? \)? [\.\,]{0,2} )? # End of all author name patterns \)? # A possible closing bracket to finish the author group (?=[\s,.;:]) # Consolidate by checking we are not partially matching # something else """ % {'etal': etal, 'i_s_author': initial_surname_author, 's_i_author': surname_initial_author, 'ed': re_ed_notation}
python
def make_auth_regex_str(etal, initial_surname_author=None, surname_initial_author=None): if not initial_surname_author: initial_surname_author = get_initial_surname_author_pattern() if not surname_initial_author: surname_initial_author = get_surname_initial_author_pattern() return ur % {'etal': etal, 'i_s_author': initial_surname_author, 's_i_author': surname_initial_author, 'ed': re_ed_notation}
[ "def", "make_auth_regex_str", "(", "etal", ",", "initial_surname_author", "=", "None", ",", "surname_initial_author", "=", "None", ")", ":", "if", "not", "initial_surname_author", ":", "# Standard author, with a maximum of 6 initials, and a surname.", "# The Initials MUST be uppercase, and MUST have at least a dot, hypen", "# or apostrophe between them.", "initial_surname_author", "=", "get_initial_surname_author_pattern", "(", ")", "if", "not", "surname_initial_author", ":", "# The author name of the form: 'surname initial(s)'", "# This is sometimes the represention of the first author found inside an author group.", "# This author pattern is only used to find a maximum of ONE author inside an author group.", "# Authors of this form MUST have either a comma after the initials, or an 'and',", "# which denotes the presence of other authors in the author group.", "surname_initial_author", "=", "get_surname_initial_author_pattern", "(", ")", "# Pattern used to locate a GROUP of author names in a reference", "# The format of an author can take many forms:", "# J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al.,", "# (the use of 'et. al' is a giveaway that the preceeding", "# text was indeed an author name)", "# This will also match authors which seem to be labeled as editors (with the phrase 'ed.')", "# In which case, the author will be thrown away later on.", "# The regex returned has around 100 named groups already (max), so any new groups must be", "# started using '?:'", "return", "ur\"\"\"\n (?:^|\\s+|\\() ## Must be the start of the line, or a space (or an opening bracket in very few cases)\n (?P<es> ## Look for editor notation before the author\n (?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\\.\\s?)|(?:\\.?\\s))) ## 'eds?. ' | 'ed ' | 'ed.'\n |(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\\.\\s?)|(?:\\.?\\s))by(?:\\s|([:,]\\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: '\n |(?:\\(\\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\\.\\s?)|(?:\\.?\\s))?\\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)'\n )?\n\n ## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,)\n (?P<author_names>\n (?:\n (?:[A-Z](?:\\s*[.'’-]{1,2}\\s*[A-Z]){0,4}[.\\s]\\s*)? ## Initials\n [A-Z][^0-9_\\.\\s]{2,20}(?:(?:[,\\.]\\s*)|(?:[,\\.]?\\s+)) ## Surname\n (?:[A-Z](?:\\s*[.'’-]{1,2}\\s*[A-Z]){0,4}[.\\s]\\s*)? ## Initials\n (?P<multi_surs>\n (?:(?:[Aa][Nn][Dd]|\\&)\\s+) ## Maybe 'and' or '&' tied with another name\n [A-Z][^0-9_\\.\\s]{3,20}(?:(?:[,\\.]\\s*)|(?:[,\\.]?\\s+)) ## More surnames\n (?:[A-Z](?:[ -][A-Z])?\\s+)? ## with initials\n )?\n (?: # Look for editor notation after the author group...\n \\s*[,\\s]?\\s* # Eventually a coma/space\n %(ed)s\n )?\n (?P<et2>\n %(etal)s ## et al, MUST BE PRESENT however, for this author form\n )\n (?: # Look for editor notation after the author group...\n \\s*[,\\s]?\\s* # Eventually a coma/space\n %(ed)s\n )?\n ) |\n\n (?:\n ## **** (2) , The standard author form.. (e.g. J. Bloggs)\n ## This author form can either start with a normal 'initial surname' author,\n ## or it can begin with a single 'surname initial' author\n\n (?: ## The first author in the 'author group'\n %(i_s_author)s |\n (?P<sur_initial_auth>%(s_i_author)s)\n )\n\n (?P<multi_auth>\n (?: ## Then 0 or more author names\n \\s*[,\\s]\\s*\n (?:\n %(i_s_author)s | %(s_i_author)s\n )\n )*\n\n (?: ## Maybe 'and' or '&' tied with another name\n (?:\n \\s*[,\\s]\\s* ## handle \"J. Dan, and H. Pon\"\n (?:[Aa][Nn][DdsS]|\\&)\n \\s+\n )\n (?P<mult_auth_sub>\n %(i_s_author)s | %(s_i_author)s\n )\n )?\n )\n (?P<et> # 'et al' need not be present for either of\n \\s*[,\\s]\\s*\n %(etal)s # 'initial surname' or 'surname initial' authors\n )?\n )\n )\n (?P<ee>\n \\s*[,\\s]\\s*\n \\(?\n (?:[Ee][Dd]s|[Ee]ditors)\\.?\n \\)?\n [\\.\\,]{0,2}\n )?\n # End of all author name patterns\n\n \\)? # A possible closing bracket to finish the author group\n (?=[\\s,.;:]) # Consolidate by checking we are not partially matching\n # something else\n\n \"\"\"", "%", "{", "'etal'", ":", "etal", ",", "'i_s_author'", ":", "initial_surname_author", ",", "'s_i_author'", ":", "surname_initial_author", ",", "'ed'", ":", "re_ed_notation", "}" ]
Returns a regular expression to be used to identify groups of author names in a citation. This method contains patterns for default authors, so no arguments are needed for the most reliable form of matching. The returned author pattern is capable of: 1. Identifying single authors, with at least one initial, of the form: 'Initial. [surname prefix...] Surname' 2. Identifying multiple authors, each with at least one initial, of the form: 'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]' ***(Note that a full stop, hyphen or apostrophe after each initial is absolutely vital in identifying authors for both of these above methods. Initials must also be uppercase.)*** 3. Capture 'et al' statements at the end of author groups (allows for authors with et al to be processed differently from 'standard' authors) 4. Identifying a single author surname name positioned before the phrase 'et al', with no initials: 'Surname et al' 5. Identifying two author surname name positioned before the phrase 'et al', with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al' 6. Identifying authors of the form: 'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose to represent the most important cited author (in a list of authors) by listing first their surname, and then their initials. Since this form has little distinguishing characteristics which could be used to create a reliable a pattern, at least one standard author must be present after it in order to improve the accuracy. 7. Capture editor notation, of which can take many forms e.g. 'eds. editors. edited by. etc.'. Authors captured in this way can be treated as 'editor groups', and hence processed differently if needed from standard authors @param etal: (string) The regular expression used to identify 'etal' notation @param author: (string) An optional argument, which replaces the default author regex used to identify author groups (initials, surnames... etc) @return: (string) The full author group identification regex, which will: - detect groups of authors in a range of formats, e.g.: C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al., - detect whether the author group has been marked up as editors of the doc. (therefore they will NOT be marked up as authors) e.g.: ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards | L. Kelloggs (editors) | M. Jackson (eds.) | ... -detect a maximum of two surnames only if the surname(s) is followed by 'et al' (must be separated by 'and' if there are two), e.g.: Amaldi et al., | Hayward and Yellow et al.,
[ "Returns", "a", "regular", "expression", "to", "be", "used", "to", "identify", "groups", "of", "author", "names", "in", "a", "citation", ".", "This", "method", "contains", "patterns", "for", "default", "authors", "so", "no", "arguments", "are", "needed", "for", "the", "most", "reliable", "form", "of", "matching", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L192-L350
inspirehep/refextract
refextract/authors/regexs.py
make_collaborations_regex_str
def make_collaborations_regex_str(): """ From the authors knowledge-base, construct a single regex holding the or'd possibilities of patterns which should be included in $h subfields. The word 'Collaboration' is also converted to 'Coll', and used in finding matches. Letter case is not considered during the search. @return: (string) The single pattern built from each line in the author knowledge base. """ def add_to_auth_list(s): """Strip the line, replace spaces with 'backslash s' and append 'the' to the start and 's' to the end. Add the prepared line to the list of extra kb authors.""" s = ur"(?:the\s)?" + s.strip().replace(u' ', ur'\s') + u"s?" auths.append(s) # Build the 'or'd regular expression of the author lines in the author # knowledge base auths = [] fpath = CFG_REFEXTRACT_KBS['collaborations'] try: fh = open(fpath, "r") except IOError: # problem opening KB for reading, or problem while reading from it: LOGGER.debug(u"Error: Could not build knowledge base containing author patterns - failed to read from KB %s s.\n", fpath) raise IOError("Error: Unable to open collaborations kb '%s'" % fpath) for line_num, rawline in enumerate(fh): try: rawline = rawline.decode("utf-8") except UnicodeError: LOGGER.debug(u"Unicode problems in %s for line %d", fpath, line_num) raise UnicodeError( "Error: Unable to parse collaboration kb (line: %s)" % str(line_num)) if rawline.strip() and rawline[0].strip() != '#': add_to_auth_list(rawline) # Shorten collaboration to 'coll' if rawline.lower().endswith('collaboration\n'): coll_version = rawline[:rawline.lower().find( u'collaboration\n')] + ur"coll[\.\,]" add_to_auth_list( coll_version.strip().replace(' ', r'\s') + u"s?") author_match_re = "" if len(auths) > 0: author_match_re = u'|'.join([u"(?:" + a + u")" for a in auths]) author_match_re = ur"(?:(?:[\(\"]?(?P<extra_auth>" + \ author_match_re + ur")[\)\"]?[\,\.]?\s?(?:and\s)?)+)" return author_match_re
python
def make_collaborations_regex_str(): def add_to_auth_list(s): s = ur"(?:the\s)?" + s.strip().replace(u' ', ur'\s') + u"s?" auths.append(s) auths = [] fpath = CFG_REFEXTRACT_KBS['collaborations'] try: fh = open(fpath, "r") except IOError: LOGGER.debug(u"Error: Could not build knowledge base containing author patterns - failed to read from KB %s s.\n", fpath) raise IOError("Error: Unable to open collaborations kb '%s'" % fpath) for line_num, rawline in enumerate(fh): try: rawline = rawline.decode("utf-8") except UnicodeError: LOGGER.debug(u"Unicode problems in %s for line %d", fpath, line_num) raise UnicodeError( "Error: Unable to parse collaboration kb (line: %s)" % str(line_num)) if rawline.strip() and rawline[0].strip() != ' add_to_auth_list(rawline) if rawline.lower().endswith('collaboration\n'): coll_version = rawline[:rawline.lower().find( u'collaboration\n')] + ur"coll[\.\,]" add_to_auth_list( coll_version.strip().replace(' ', r'\s') + u"s?") author_match_re = "" if len(auths) > 0: author_match_re = u'|'.join([u"(?:" + a + u")" for a in auths]) author_match_re = ur"(?:(?:[\(\"]?(?P<extra_auth>" + \ author_match_re + ur")[\)\"]?[\,\.]?\s?(?:and\s)?)+)" return author_match_re
[ "def", "make_collaborations_regex_str", "(", ")", ":", "def", "add_to_auth_list", "(", "s", ")", ":", "\"\"\"Strip the line, replace spaces with 'backslash s' and append 'the'\n to the start and 's' to the end. Add the prepared line to the list of\n extra kb authors.\"\"\"", "s", "=", "ur\"(?:the\\s)?\"", "+", "s", ".", "strip", "(", ")", ".", "replace", "(", "u' '", ",", "ur'\\s'", ")", "+", "u\"s?\"", "auths", ".", "append", "(", "s", ")", "# Build the 'or'd regular expression of the author lines in the author", "# knowledge base", "auths", "=", "[", "]", "fpath", "=", "CFG_REFEXTRACT_KBS", "[", "'collaborations'", "]", "try", ":", "fh", "=", "open", "(", "fpath", ",", "\"r\"", ")", "except", "IOError", ":", "# problem opening KB for reading, or problem while reading from it:", "LOGGER", ".", "debug", "(", "u\"Error: Could not build knowledge base containing author patterns - failed to read from KB %s s.\\n\"", ",", "fpath", ")", "raise", "IOError", "(", "\"Error: Unable to open collaborations kb '%s'\"", "%", "fpath", ")", "for", "line_num", ",", "rawline", "in", "enumerate", "(", "fh", ")", ":", "try", ":", "rawline", "=", "rawline", ".", "decode", "(", "\"utf-8\"", ")", "except", "UnicodeError", ":", "LOGGER", ".", "debug", "(", "u\"Unicode problems in %s for line %d\"", ",", "fpath", ",", "line_num", ")", "raise", "UnicodeError", "(", "\"Error: Unable to parse collaboration kb (line: %s)\"", "%", "str", "(", "line_num", ")", ")", "if", "rawline", ".", "strip", "(", ")", "and", "rawline", "[", "0", "]", ".", "strip", "(", ")", "!=", "'#'", ":", "add_to_auth_list", "(", "rawline", ")", "# Shorten collaboration to 'coll'", "if", "rawline", ".", "lower", "(", ")", ".", "endswith", "(", "'collaboration\\n'", ")", ":", "coll_version", "=", "rawline", "[", ":", "rawline", ".", "lower", "(", ")", ".", "find", "(", "u'collaboration\\n'", ")", "]", "+", "ur\"coll[\\.\\,]\"", "add_to_auth_list", "(", "coll_version", ".", "strip", "(", ")", ".", "replace", "(", "' '", ",", "r'\\s'", ")", "+", "u\"s?\"", ")", "author_match_re", "=", "\"\"", "if", "len", "(", "auths", ")", ">", "0", ":", "author_match_re", "=", "u'|'", ".", "join", "(", "[", "u\"(?:\"", "+", "a", "+", "u\")\"", "for", "a", "in", "auths", "]", ")", "author_match_re", "=", "ur\"(?:(?:[\\(\\\"]?(?P<extra_auth>\"", "+", "author_match_re", "+", "ur\")[\\)\\\"]?[\\,\\.]?\\s?(?:and\\s)?)+)\"", "return", "author_match_re" ]
From the authors knowledge-base, construct a single regex holding the or'd possibilities of patterns which should be included in $h subfields. The word 'Collaboration' is also converted to 'Coll', and used in finding matches. Letter case is not considered during the search. @return: (string) The single pattern built from each line in the author knowledge base.
[ "From", "the", "authors", "knowledge", "-", "base", "construct", "a", "single", "regex", "holding", "the", "or", "d", "possibilities", "of", "patterns", "which", "should", "be", "included", "in", "$h", "subfields", ".", "The", "word", "Collaboration", "is", "also", "converted", "to", "Coll", "and", "used", "in", "finding", "matches", ".", "Letter", "case", "is", "not", "considered", "during", "the", "search", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L398-L445
inspirehep/refextract
refextract/references/find.py
find_reference_section
def find_reference_section(docbody): """Search in document body for its reference section. More precisely, find the first line of the reference section. Effectively, the function starts at the end of a document and works backwards, line-by-line, looking for the title of a reference section. It stops when (if) it finds something that it considers to be the first line of a reference section. @param docbody: (list) of strings - the full document body. @return: (dictionary) : { 'start_line' : (integer) - index in docbody of 1st reference line, 'title_string' : (string) - title of the reference section. 'marker' : (string) - the marker of the first reference line, 'marker_pattern' : (string) - regexp string used to find the marker, 'title_marker_same_line' : (integer) - flag to indicate whether the reference section title was on the same line as the first reference line's marker or not. 1 if it was; 0 if not. } Much of this information is used by later functions to rebuild a reference section. -- OR -- (None) - when the reference section could not be found. """ ref_details = None title_patterns = get_reference_section_title_patterns() # Try to find refs section title: for title_pattern in title_patterns: # Look for title pattern in docbody for reversed_index, line in enumerate(reversed(docbody)): title_match = title_pattern.match(line) if title_match: title = title_match.group('title') index = len(docbody) - 1 - reversed_index temp_ref_details, found_title = find_numeration(docbody[index:index + 6], title) if temp_ref_details: if ref_details and 'title' in ref_details and ref_details['title'] and not temp_ref_details['title']: continue if ref_details and 'marker' in ref_details and ref_details['marker'] and not temp_ref_details['marker']: continue ref_details = temp_ref_details ref_details['start_line'] = index ref_details['title_string'] = title if found_title: break if ref_details: break return ref_details
python
def find_reference_section(docbody): ref_details = None title_patterns = get_reference_section_title_patterns() for title_pattern in title_patterns: for reversed_index, line in enumerate(reversed(docbody)): title_match = title_pattern.match(line) if title_match: title = title_match.group('title') index = len(docbody) - 1 - reversed_index temp_ref_details, found_title = find_numeration(docbody[index:index + 6], title) if temp_ref_details: if ref_details and 'title' in ref_details and ref_details['title'] and not temp_ref_details['title']: continue if ref_details and 'marker' in ref_details and ref_details['marker'] and not temp_ref_details['marker']: continue ref_details = temp_ref_details ref_details['start_line'] = index ref_details['title_string'] = title if found_title: break if ref_details: break return ref_details
[ "def", "find_reference_section", "(", "docbody", ")", ":", "ref_details", "=", "None", "title_patterns", "=", "get_reference_section_title_patterns", "(", ")", "# Try to find refs section title:", "for", "title_pattern", "in", "title_patterns", ":", "# Look for title pattern in docbody", "for", "reversed_index", ",", "line", "in", "enumerate", "(", "reversed", "(", "docbody", ")", ")", ":", "title_match", "=", "title_pattern", ".", "match", "(", "line", ")", "if", "title_match", ":", "title", "=", "title_match", ".", "group", "(", "'title'", ")", "index", "=", "len", "(", "docbody", ")", "-", "1", "-", "reversed_index", "temp_ref_details", ",", "found_title", "=", "find_numeration", "(", "docbody", "[", "index", ":", "index", "+", "6", "]", ",", "title", ")", "if", "temp_ref_details", ":", "if", "ref_details", "and", "'title'", "in", "ref_details", "and", "ref_details", "[", "'title'", "]", "and", "not", "temp_ref_details", "[", "'title'", "]", ":", "continue", "if", "ref_details", "and", "'marker'", "in", "ref_details", "and", "ref_details", "[", "'marker'", "]", "and", "not", "temp_ref_details", "[", "'marker'", "]", ":", "continue", "ref_details", "=", "temp_ref_details", "ref_details", "[", "'start_line'", "]", "=", "index", "ref_details", "[", "'title_string'", "]", "=", "title", "if", "found_title", ":", "break", "if", "ref_details", ":", "break", "return", "ref_details" ]
Search in document body for its reference section. More precisely, find the first line of the reference section. Effectively, the function starts at the end of a document and works backwards, line-by-line, looking for the title of a reference section. It stops when (if) it finds something that it considers to be the first line of a reference section. @param docbody: (list) of strings - the full document body. @return: (dictionary) : { 'start_line' : (integer) - index in docbody of 1st reference line, 'title_string' : (string) - title of the reference section. 'marker' : (string) - the marker of the first reference line, 'marker_pattern' : (string) - regexp string used to find the marker, 'title_marker_same_line' : (integer) - flag to indicate whether the reference section title was on the same line as the first reference line's marker or not. 1 if it was; 0 if not. } Much of this information is used by later functions to rebuild a reference section. -- OR -- (None) - when the reference section could not be found.
[ "Search", "in", "document", "body", "for", "its", "reference", "section", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/find.py#L45-L97
inspirehep/refextract
refextract/references/find.py
find_numeration
def find_numeration(docbody, title): """Find numeration pattern 1st try to find numeration in the title e.g. References [4] Riotto... 2nd find the numeration alone in the line after the title e.g. References 1 Riotto 3rnd find the numeration in the following line e.g. References [1] Riotto """ ref_details, found_title = find_numeration_in_title(docbody, title) if not ref_details: ref_details, found_title = find_numeration_in_body(docbody) return ref_details, found_title
python
def find_numeration(docbody, title): ref_details, found_title = find_numeration_in_title(docbody, title) if not ref_details: ref_details, found_title = find_numeration_in_body(docbody) return ref_details, found_title
[ "def", "find_numeration", "(", "docbody", ",", "title", ")", ":", "ref_details", ",", "found_title", "=", "find_numeration_in_title", "(", "docbody", ",", "title", ")", "if", "not", "ref_details", ":", "ref_details", ",", "found_title", "=", "find_numeration_in_body", "(", "docbody", ")", "return", "ref_details", ",", "found_title" ]
Find numeration pattern 1st try to find numeration in the title e.g. References [4] Riotto... 2nd find the numeration alone in the line after the title e.g. References 1 Riotto 3rnd find the numeration in the following line e.g. References [1] Riotto
[ "Find", "numeration", "pattern" ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/find.py#L181-L203
inspirehep/refextract
refextract/references/find.py
find_reference_section_no_title_generic
def find_reference_section_no_title_generic(docbody, marker_patterns): """This function would generally be used when it was not possible to locate the start of a document's reference section by means of its title. Instead, this function will look for reference lines that have numeric markers of the format [1], [2], {1}, {2}, etc. @param docbody: (list) of strings -each string is a line in the document. @return: (dictionary) : { 'start_line' : (integer) - index in docbody of 1st reference line, 'title_string' : (None) - title of the reference section (None since no title), 'marker' : (string) - the marker of the first reference line, 'marker_pattern' : (string) - the regexp string used to find the marker, 'title_marker_same_line' : (integer) 0 - to signal title not on same line as marker. } Much of this information is used by later functions to rebuild a reference section. -- OR -- (None) - when the reference section could not be found. """ if not docbody: return None ref_start_line = ref_line_marker = None # try to find first reference line in the reference section: found_ref_sect = False for reversed_index, line in enumerate(reversed(docbody)): mark_match = regex_match_list(line.strip(), marker_patterns) if mark_match and mark_match.group('marknum') == '1': # Get marker recognition pattern: mark_pattern = mark_match.re.pattern # Look for [2] in next 10 lines: next_test_lines = 10 index = len(docbody) - reversed_index zone_to_check = docbody[index:index + next_test_lines] if len(zone_to_check) < 5: # We found a 1 towards the end, we assume # we only have one reference found = True else: # Check for number 2 found = False for l in zone_to_check: mark_match2 = regex_match_list(l.strip(), marker_patterns) if mark_match2 and mark_match2.group('marknum') == '2': found = True break if found: # Found next reference line: found_ref_sect = True ref_start_line = len(docbody) - 1 - reversed_index ref_line_marker = mark_match.group('mark') ref_line_marker_pattern = mark_pattern break if found_ref_sect: ref_sectn_details = { 'start_line': ref_start_line, 'title_string': None, 'marker': ref_line_marker.strip(), 'marker_pattern': ref_line_marker_pattern, 'title_marker_same_line': False, } else: # didn't manage to find the reference section ref_sectn_details = None return ref_sectn_details
python
def find_reference_section_no_title_generic(docbody, marker_patterns): if not docbody: return None ref_start_line = ref_line_marker = None found_ref_sect = False for reversed_index, line in enumerate(reversed(docbody)): mark_match = regex_match_list(line.strip(), marker_patterns) if mark_match and mark_match.group('marknum') == '1': mark_pattern = mark_match.re.pattern next_test_lines = 10 index = len(docbody) - reversed_index zone_to_check = docbody[index:index + next_test_lines] if len(zone_to_check) < 5: found = True else: found = False for l in zone_to_check: mark_match2 = regex_match_list(l.strip(), marker_patterns) if mark_match2 and mark_match2.group('marknum') == '2': found = True break if found: found_ref_sect = True ref_start_line = len(docbody) - 1 - reversed_index ref_line_marker = mark_match.group('mark') ref_line_marker_pattern = mark_pattern break if found_ref_sect: ref_sectn_details = { 'start_line': ref_start_line, 'title_string': None, 'marker': ref_line_marker.strip(), 'marker_pattern': ref_line_marker_pattern, 'title_marker_same_line': False, } else: ref_sectn_details = None return ref_sectn_details
[ "def", "find_reference_section_no_title_generic", "(", "docbody", ",", "marker_patterns", ")", ":", "if", "not", "docbody", ":", "return", "None", "ref_start_line", "=", "ref_line_marker", "=", "None", "# try to find first reference line in the reference section:", "found_ref_sect", "=", "False", "for", "reversed_index", ",", "line", "in", "enumerate", "(", "reversed", "(", "docbody", ")", ")", ":", "mark_match", "=", "regex_match_list", "(", "line", ".", "strip", "(", ")", ",", "marker_patterns", ")", "if", "mark_match", "and", "mark_match", ".", "group", "(", "'marknum'", ")", "==", "'1'", ":", "# Get marker recognition pattern:", "mark_pattern", "=", "mark_match", ".", "re", ".", "pattern", "# Look for [2] in next 10 lines:", "next_test_lines", "=", "10", "index", "=", "len", "(", "docbody", ")", "-", "reversed_index", "zone_to_check", "=", "docbody", "[", "index", ":", "index", "+", "next_test_lines", "]", "if", "len", "(", "zone_to_check", ")", "<", "5", ":", "# We found a 1 towards the end, we assume", "# we only have one reference", "found", "=", "True", "else", ":", "# Check for number 2", "found", "=", "False", "for", "l", "in", "zone_to_check", ":", "mark_match2", "=", "regex_match_list", "(", "l", ".", "strip", "(", ")", ",", "marker_patterns", ")", "if", "mark_match2", "and", "mark_match2", ".", "group", "(", "'marknum'", ")", "==", "'2'", ":", "found", "=", "True", "break", "if", "found", ":", "# Found next reference line:", "found_ref_sect", "=", "True", "ref_start_line", "=", "len", "(", "docbody", ")", "-", "1", "-", "reversed_index", "ref_line_marker", "=", "mark_match", ".", "group", "(", "'mark'", ")", "ref_line_marker_pattern", "=", "mark_pattern", "break", "if", "found_ref_sect", ":", "ref_sectn_details", "=", "{", "'start_line'", ":", "ref_start_line", ",", "'title_string'", ":", "None", ",", "'marker'", ":", "ref_line_marker", ".", "strip", "(", ")", ",", "'marker_pattern'", ":", "ref_line_marker_pattern", ",", "'title_marker_same_line'", ":", "False", ",", "}", "else", ":", "# didn't manage to find the reference section", "ref_sectn_details", "=", "None", "return", "ref_sectn_details" ]
This function would generally be used when it was not possible to locate the start of a document's reference section by means of its title. Instead, this function will look for reference lines that have numeric markers of the format [1], [2], {1}, {2}, etc. @param docbody: (list) of strings -each string is a line in the document. @return: (dictionary) : { 'start_line' : (integer) - index in docbody of 1st reference line, 'title_string' : (None) - title of the reference section (None since no title), 'marker' : (string) - the marker of the first reference line, 'marker_pattern' : (string) - the regexp string used to find the marker, 'title_marker_same_line' : (integer) 0 - to signal title not on same line as marker. } Much of this information is used by later functions to rebuild a reference section. -- OR -- (None) - when the reference section could not be found.
[ "This", "function", "would", "generally", "be", "used", "when", "it", "was", "not", "possible", "to", "locate", "the", "start", "of", "a", "document", "s", "reference", "section", "by", "means", "of", "its", "title", ".", "Instead", "this", "function", "will", "look", "for", "reference", "lines", "that", "have", "numeric", "markers", "of", "the", "format", "[", "1", "]", "[", "2", "]", "{", "1", "}", "{", "2", "}", "etc", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/find.py#L281-L354
inspirehep/refextract
refextract/references/text.py
extract_references_from_fulltext
def extract_references_from_fulltext(fulltext): """Locate and extract the reference section from a fulltext document. Return the extracted reference section as a list of strings, whereby each string in the list is considered to be a single reference line. E.g. a string could be something like: '[19] Wilson, A. Unpublished (1986). @param fulltext: (list) of strings, whereby each string is a line of the document. @return: (list) of strings, where each string is an extracted reference line. """ # Try to remove pagebreaks, headers, footers fulltext = remove_page_boundary_lines(fulltext) status = 0 # How ref section found flag how_found_start = 0 # Find start of refs section ref_sect_start = get_reference_section_beginning(fulltext) if ref_sect_start is None: # No References refs = [] status = 4 LOGGER.debug(u"extract_references_from_fulltext: ref_sect_start is None") else: # If a reference section was found, however weak ref_sect_end = \ find_end_of_reference_section(fulltext, ref_sect_start["start_line"], ref_sect_start["marker"], ref_sect_start["marker_pattern"]) if ref_sect_end is None: # No End to refs? Not safe to extract refs = [] status = 5 LOGGER.debug(u"extract_references_from_fulltext: no end to refs!") else: # If the end of the reference section was found.. start extraction refs = get_reference_lines(fulltext, ref_sect_start["start_line"], ref_sect_end, ref_sect_start["title_string"], ref_sect_start["marker_pattern"], ref_sect_start["title_marker_same_line"]) return refs, status, how_found_start
python
def extract_references_from_fulltext(fulltext): fulltext = remove_page_boundary_lines(fulltext) status = 0 how_found_start = 0 ref_sect_start = get_reference_section_beginning(fulltext) if ref_sect_start is None: refs = [] status = 4 LOGGER.debug(u"extract_references_from_fulltext: ref_sect_start is None") else: ref_sect_end = \ find_end_of_reference_section(fulltext, ref_sect_start["start_line"], ref_sect_start["marker"], ref_sect_start["marker_pattern"]) if ref_sect_end is None: refs = [] status = 5 LOGGER.debug(u"extract_references_from_fulltext: no end to refs!") else: refs = get_reference_lines(fulltext, ref_sect_start["start_line"], ref_sect_end, ref_sect_start["title_string"], ref_sect_start["marker_pattern"], ref_sect_start["title_marker_same_line"]) return refs, status, how_found_start
[ "def", "extract_references_from_fulltext", "(", "fulltext", ")", ":", "# Try to remove pagebreaks, headers, footers", "fulltext", "=", "remove_page_boundary_lines", "(", "fulltext", ")", "status", "=", "0", "# How ref section found flag", "how_found_start", "=", "0", "# Find start of refs section", "ref_sect_start", "=", "get_reference_section_beginning", "(", "fulltext", ")", "if", "ref_sect_start", "is", "None", ":", "# No References", "refs", "=", "[", "]", "status", "=", "4", "LOGGER", ".", "debug", "(", "u\"extract_references_from_fulltext: ref_sect_start is None\"", ")", "else", ":", "# If a reference section was found, however weak", "ref_sect_end", "=", "find_end_of_reference_section", "(", "fulltext", ",", "ref_sect_start", "[", "\"start_line\"", "]", ",", "ref_sect_start", "[", "\"marker\"", "]", ",", "ref_sect_start", "[", "\"marker_pattern\"", "]", ")", "if", "ref_sect_end", "is", "None", ":", "# No End to refs? Not safe to extract", "refs", "=", "[", "]", "status", "=", "5", "LOGGER", ".", "debug", "(", "u\"extract_references_from_fulltext: no end to refs!\"", ")", "else", ":", "# If the end of the reference section was found.. start extraction", "refs", "=", "get_reference_lines", "(", "fulltext", ",", "ref_sect_start", "[", "\"start_line\"", "]", ",", "ref_sect_end", ",", "ref_sect_start", "[", "\"title_string\"", "]", ",", "ref_sect_start", "[", "\"marker_pattern\"", "]", ",", "ref_sect_start", "[", "\"title_marker_same_line\"", "]", ")", "return", "refs", ",", "status", ",", "how_found_start" ]
Locate and extract the reference section from a fulltext document. Return the extracted reference section as a list of strings, whereby each string in the list is considered to be a single reference line. E.g. a string could be something like: '[19] Wilson, A. Unpublished (1986). @param fulltext: (list) of strings, whereby each string is a line of the document. @return: (list) of strings, where each string is an extracted reference line.
[ "Locate", "and", "extract", "the", "reference", "section", "from", "a", "fulltext", "document", ".", "Return", "the", "extracted", "reference", "section", "as", "a", "list", "of", "strings", "whereby", "each", "string", "in", "the", "list", "is", "considered", "to", "be", "a", "single", "reference", "line", ".", "E", ".", "g", ".", "a", "string", "could", "be", "something", "like", ":", "[", "19", "]", "Wilson", "A", ".", "Unpublished", "(", "1986", ")", "." ]
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L43-L88