code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
import logging
import json
import re
import os
import time
import datetime
import feedparser
import dateutil.parser
from os.path import expanduser
from scibot.telebot import telegram_bot_sendtext
from scibot.streamer import listen_stream_and_rt
from schedule import Scheduler
# logging parameters
logger = logging.getLogger("bot logger")
# handler determines where the logs go: stdout/file
file_handler = logging.FileHandler(f"{datetime.date.today()}_scibot.log")
logger.setLevel(logging.DEBUG)
file_handler.setLevel(logging.DEBUG)
fmt_file = (
"%(levelname)s %(asctime)s [%(filename)s: %(funcName)s:%(lineno)d] %(message)s"
)
file_formatter = logging.Formatter(fmt_file)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
class Settings:
"""Twitter bot application settings.
Enter the RSS feed you want to tweet, or keywords you want to retweet.
"""
IGNORE_ERRORS = [327, 139]
# RSS feeds to read and post tweets from.
feed_urls = [
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1X9MO_201KJGQLdG05NdxtaqKjTZuIPIGlgpiDZr31QjkgZUbj/?limit=300&utm_campaign=pubmed-2&fc=20210922175019",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1XSES1Yl3kEgnfOg6EStNFyWMogtYXic2VVXS8rpsyNHTjv1HK/?limit=200&utm_campaign=pubmed-2&fc=20210510224301",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1jAe3RzQKmf7SOUEM-Dt7QQtMWNG2UffuIIo_GGKHPfoKqhY9f/?limit=200&utm_campaign=pubmed-2&fc=20210510224348",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1bCr63ThlO22Eg5TxBaIQ5mzH02TqtmtM1QIkqa66iqK4SsMJm/?limit=200&utm_campaign=pubmed-2&fc=20210510224551",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1hEma6JdH30sOOO0DiTP1jZh-6ZgoypoEsw_B9tXZejk_E8QuX/?limit=200&utm_campaign=pubmed-2&fc=20210510230918",
]
# rss best results no time harm reduction and psychedelics
feed_older_literature = feedparser.parse("https://pubmed.ncbi.nlm.nih.gov/rss/search/1h_Yu2rLTrK0AIYDN2V5HLWSksLTr4a6SUZjZzoAPcf-Qk0gCJ/?limit=200&utm_campaign=pubmed-2&fc=20210901021150")["entries"]
pre_combined_feed = [feedparser.parse(url)["entries"] for url in feed_urls]
# (combined_feed)
combined_feed = [item for feed in pre_combined_feed for item in feed]
combined_feed.sort(
key=lambda x: dateutil.parser.parse(x["published"]), reverse=True
)
# Log file to save all tweeted RSS links (one URL per line).
posted_urls_output_file = expanduser("~/drugscibot/publications.json")
# Log file to save all retweeted tweets (one tweetid per line).
posted_retweets_output_file = expanduser("~/drugscibot/posted-retweets.log")
# Log file to save all retweeted tweets (one tweetid per line).
faved_tweets_output_file = expanduser("~/drugscibot/faved-tweets.log")
# Log file to save followers list.
users_json_file = expanduser("~/drugscibot/users.json")
# Include tweets with these words when retweeting.
retweet_include_words = [
"drugpolicy",
"drugspolicy",
"transformdrugspolicy",
"transformdrugpolicy",
"drugchecking",
"regulatestimulants",
"regulatedrugs",
"sensibledrugpolicy",
"drugpolicyreform",
"safeconsumption",
"harmreduction",
"druguse",
"regular",
"reduccion de dano",
"dosis minima",
"regulacion",
"droga",
"sicoactiva",
"psicoactiva",
"politica de droga",
# "cion de riesgo",
"legalizacion",
"safesuply",
"safersuply",
]
# Do not include tweets with these words when retweeting.
retweet_exclude_words = [
"sex",
"sexual",
"sexwork",
"sexualwork",
"fuck",
"vaping",
"vape",
"cigarretes",
"nicotine",
"smoke",
"smoking",
"constellationsfest",# to be deleted after the festival
"zigaretten",
]
add_hashtag = [
"psilocybin",
"psilocybine",
"psychedelic",
"hallucinogenic",
"overdose",
"microdosing",
"drug-policy",
"drugspolicy",
"mdma",
"drug checking",
"drugpolicy",
"drug policy",
"ayahuasca",
"psychopharmacology",
"neurogenesis",
"5-meo-dmt",
"serotonergic",
"ketamine",
"psychotherapy",
"harm reduction",
"methadone",
] # trip
# do not retweet if search results include only a single of these keywords
watch_add_hashtag = [
"alzheimer",
"depression",
"anxiety",
"dmt",
"droga",
"lsd",
"therapy",
"psychiatry",
"mentalhealth",
"trip",
"regula",
"regular",
"mental health",
"clinical trial",
"consciousness",
"meta-analysis",
"dopamine",
"serotonin",
"psychological",
"metaanalysis",
"reform",
]
# list of the distribution
mylist_id = "1306244304000749569"
class SafeScheduler(Scheduler):
"""
An implementation of Scheduler that catches jobs that fail, logs their
exception tracebacks as errors, optionally reschedules the jobs for their
next run time, and keeps going.
Use this to run jobs that may or may not crash without worrying about
whether other jobs will run or if they'll crash the entire script.
"""
def __init__(self, reschedule_on_failure=True):
"""
Args:
reschedule_on_failure: if is True, jobs will be rescheduled for their
next run as if they had completed successfully. If False, they'll run
on the next run_pending() tick.
"""
self.reschedule_on_failure = reschedule_on_failure
super().__init__()
def _run_job(self, job):
try:
super()._run_job(job)
except Exception as e:
logger.exception(e)
telegram_bot_sendtext(f"[Job Error] {e}")
job.last_run = datetime.datetime.now()
job._schedule_next_run()
def shorten_text(text: str, maxlength: int) -> str:
"""
Truncate text and append three dots (...) at the end if length exceeds
maxlength chars.
Args:
text: The to shorten.
maxlength: The maximum character length of the text string.
Returns: Shortened text string.
"""
return (text[:maxlength] + "...") if len(text) > maxlength else text
def insert_hashtag(title: str) -> str:
"""
Add hashtag on title for keywords found on Settings.add_hashtag
Args:
title: Text to parse for inserting hash symbols
Returns: Text with inserted hashtags
"""
for x in Settings.add_hashtag:
if re.search(fr"\b{x}", title.lower()):
pos = (re.search(fr"\b{x}", title.lower())).start()
if " " in x:
title = title[:pos] + "#" + title[pos:].replace(" ", "", 1)
else:
title = title[:pos] + "#" + title[pos:]
return title
def compose_message(item: feedparser.FeedParserDict) -> str:
"""
Compose a tweet from an RSS item (title, link, description)
and return final tweet message.
Args:
item: feedparser.FeedParserDict
An RSS item
Returns: mMssage suited for a Twitter status update.
"""
title = insert_hashtag(item["title"])
message = shorten_text(title, maxlength=250) + " 1/5 " + item["link"]
return message
def is_in_logfile(content: str, filename: str) -> bool:
"""
Does the content exist on any line in the log file?
Args:
content: Content to search file for.
filename: Full path to file to search.
Returns: `True` if content is found in file, otherwise `False`.
"""
if os.path.isfile(filename):
with open(filename, "r") as jsonFile:
article_log = json.load(jsonFile)
if content in article_log:
return True
return False
def write_to_logfile(content: dict, filename: str) -> None:
"""
Append content to json file.
Args:
content: Content to append to file
filename: Full path to file that should be appended.
Returns: None
"""
try:
with open(filename, "w") as fp:
json.dump(content, fp, indent=4)
except IOError as e:
logger.exception(e)
def scheduled_job(read_rss_and_tweet, retweet_own, search_and_retweet):
# listen_stream_and_rt('#INSIGHT2021')
schedule = SafeScheduler()
# job 1
schedule.every().day.at("22:20").do(read_rss_and_tweet)
schedule.every().day.at("06:20").do(read_rss_and_tweet)
schedule.every().day.at("14:20").do(read_rss_and_tweet)
# job 2
schedule.every().day.at("01:10").do(retweet_own)
schedule.every().day.at("09:10").do(retweet_own)
schedule.every().day.at("17:10").do(retweet_own)
# job 3
schedule.every().day.at("00:20").do(search_and_retweet, "list_search")
schedule.every().day.at("03:20").do(search_and_retweet, "list_search")
schedule.every().day.at("06:20").do(search_and_retweet, "list_search")
schedule.every().day.at("09:20").do(search_and_retweet, "list_search")
schedule.every().day.at("12:20").do(search_and_retweet, "list_search")
schedule.every().day.at("15:20").do(search_and_retweet, "list_search")
schedule.every().day.at("18:20").do(search_and_retweet, "list_search")
schedule.every().day.at("21:20").do(search_and_retweet, "list_search")
schedule.every().day.at("01:25").do(search_and_retweet, "list_search")
schedule.every().day.at("04:25").do(search_and_retweet, "list_search")
schedule.every().day.at("07:25").do(search_and_retweet, "list_search")
schedule.every().day.at("10:25").do(search_and_retweet, "list_search")
schedule.every().day.at("13:25").do(search_and_retweet, "list_search")
schedule.every().day.at("16:25").do(search_and_retweet, "list_search")
schedule.every().day.at("19:25").do(search_and_retweet, "list_search")
schedule.every().day.at("22:25").do(search_and_retweet, "list_search")
# job love
schedule.every(5).minutes.do(search_and_retweet, "give_love")
while 1:
schedule.run_pending()
time.sleep(1)
|
scienceBot
|
/scienceBot-0.1.1.1.tar.gz/scienceBot-0.1.1.1/scibot/tools.py
|
tools.py
|
import logging
import json
import re
import os
import time
import datetime
import feedparser
import dateutil.parser
from os.path import expanduser
from scibot.telebot import telegram_bot_sendtext
from scibot.streamer import listen_stream_and_rt
from schedule import Scheduler
# logging parameters
logger = logging.getLogger("bot logger")
# handler determines where the logs go: stdout/file
file_handler = logging.FileHandler(f"{datetime.date.today()}_scibot.log")
logger.setLevel(logging.DEBUG)
file_handler.setLevel(logging.DEBUG)
fmt_file = (
"%(levelname)s %(asctime)s [%(filename)s: %(funcName)s:%(lineno)d] %(message)s"
)
file_formatter = logging.Formatter(fmt_file)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
class Settings:
"""Twitter bot application settings.
Enter the RSS feed you want to tweet, or keywords you want to retweet.
"""
IGNORE_ERRORS = [327, 139]
# RSS feeds to read and post tweets from.
feed_urls = [
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1X9MO_201KJGQLdG05NdxtaqKjTZuIPIGlgpiDZr31QjkgZUbj/?limit=300&utm_campaign=pubmed-2&fc=20210922175019",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1XSES1Yl3kEgnfOg6EStNFyWMogtYXic2VVXS8rpsyNHTjv1HK/?limit=200&utm_campaign=pubmed-2&fc=20210510224301",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1jAe3RzQKmf7SOUEM-Dt7QQtMWNG2UffuIIo_GGKHPfoKqhY9f/?limit=200&utm_campaign=pubmed-2&fc=20210510224348",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1bCr63ThlO22Eg5TxBaIQ5mzH02TqtmtM1QIkqa66iqK4SsMJm/?limit=200&utm_campaign=pubmed-2&fc=20210510224551",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1hEma6JdH30sOOO0DiTP1jZh-6ZgoypoEsw_B9tXZejk_E8QuX/?limit=200&utm_campaign=pubmed-2&fc=20210510230918",
]
# rss best results no time harm reduction and psychedelics
feed_older_literature = feedparser.parse("https://pubmed.ncbi.nlm.nih.gov/rss/search/1h_Yu2rLTrK0AIYDN2V5HLWSksLTr4a6SUZjZzoAPcf-Qk0gCJ/?limit=200&utm_campaign=pubmed-2&fc=20210901021150")["entries"]
pre_combined_feed = [feedparser.parse(url)["entries"] for url in feed_urls]
# (combined_feed)
combined_feed = [item for feed in pre_combined_feed for item in feed]
combined_feed.sort(
key=lambda x: dateutil.parser.parse(x["published"]), reverse=True
)
# Log file to save all tweeted RSS links (one URL per line).
posted_urls_output_file = expanduser("~/drugscibot/publications.json")
# Log file to save all retweeted tweets (one tweetid per line).
posted_retweets_output_file = expanduser("~/drugscibot/posted-retweets.log")
# Log file to save all retweeted tweets (one tweetid per line).
faved_tweets_output_file = expanduser("~/drugscibot/faved-tweets.log")
# Log file to save followers list.
users_json_file = expanduser("~/drugscibot/users.json")
# Include tweets with these words when retweeting.
retweet_include_words = [
"drugpolicy",
"drugspolicy",
"transformdrugspolicy",
"transformdrugpolicy",
"drugchecking",
"regulatestimulants",
"regulatedrugs",
"sensibledrugpolicy",
"drugpolicyreform",
"safeconsumption",
"harmreduction",
"druguse",
"regular",
"reduccion de dano",
"dosis minima",
"regulacion",
"droga",
"sicoactiva",
"psicoactiva",
"politica de droga",
# "cion de riesgo",
"legalizacion",
"safesuply",
"safersuply",
]
# Do not include tweets with these words when retweeting.
retweet_exclude_words = [
"sex",
"sexual",
"sexwork",
"sexualwork",
"fuck",
"vaping",
"vape",
"cigarretes",
"nicotine",
"smoke",
"smoking",
"constellationsfest",# to be deleted after the festival
"zigaretten",
]
add_hashtag = [
"psilocybin",
"psilocybine",
"psychedelic",
"hallucinogenic",
"overdose",
"microdosing",
"drug-policy",
"drugspolicy",
"mdma",
"drug checking",
"drugpolicy",
"drug policy",
"ayahuasca",
"psychopharmacology",
"neurogenesis",
"5-meo-dmt",
"serotonergic",
"ketamine",
"psychotherapy",
"harm reduction",
"methadone",
] # trip
# do not retweet if search results include only a single of these keywords
watch_add_hashtag = [
"alzheimer",
"depression",
"anxiety",
"dmt",
"droga",
"lsd",
"therapy",
"psychiatry",
"mentalhealth",
"trip",
"regula",
"regular",
"mental health",
"clinical trial",
"consciousness",
"meta-analysis",
"dopamine",
"serotonin",
"psychological",
"metaanalysis",
"reform",
]
# list of the distribution
mylist_id = "1306244304000749569"
class SafeScheduler(Scheduler):
"""
An implementation of Scheduler that catches jobs that fail, logs their
exception tracebacks as errors, optionally reschedules the jobs for their
next run time, and keeps going.
Use this to run jobs that may or may not crash without worrying about
whether other jobs will run or if they'll crash the entire script.
"""
def __init__(self, reschedule_on_failure=True):
"""
Args:
reschedule_on_failure: if is True, jobs will be rescheduled for their
next run as if they had completed successfully. If False, they'll run
on the next run_pending() tick.
"""
self.reschedule_on_failure = reschedule_on_failure
super().__init__()
def _run_job(self, job):
try:
super()._run_job(job)
except Exception as e:
logger.exception(e)
telegram_bot_sendtext(f"[Job Error] {e}")
job.last_run = datetime.datetime.now()
job._schedule_next_run()
def shorten_text(text: str, maxlength: int) -> str:
"""
Truncate text and append three dots (...) at the end if length exceeds
maxlength chars.
Args:
text: The to shorten.
maxlength: The maximum character length of the text string.
Returns: Shortened text string.
"""
return (text[:maxlength] + "...") if len(text) > maxlength else text
def insert_hashtag(title: str) -> str:
"""
Add hashtag on title for keywords found on Settings.add_hashtag
Args:
title: Text to parse for inserting hash symbols
Returns: Text with inserted hashtags
"""
for x in Settings.add_hashtag:
if re.search(fr"\b{x}", title.lower()):
pos = (re.search(fr"\b{x}", title.lower())).start()
if " " in x:
title = title[:pos] + "#" + title[pos:].replace(" ", "", 1)
else:
title = title[:pos] + "#" + title[pos:]
return title
def compose_message(item: feedparser.FeedParserDict) -> str:
"""
Compose a tweet from an RSS item (title, link, description)
and return final tweet message.
Args:
item: feedparser.FeedParserDict
An RSS item
Returns: mMssage suited for a Twitter status update.
"""
title = insert_hashtag(item["title"])
message = shorten_text(title, maxlength=250) + " 1/5 " + item["link"]
return message
def is_in_logfile(content: str, filename: str) -> bool:
"""
Does the content exist on any line in the log file?
Args:
content: Content to search file for.
filename: Full path to file to search.
Returns: `True` if content is found in file, otherwise `False`.
"""
if os.path.isfile(filename):
with open(filename, "r") as jsonFile:
article_log = json.load(jsonFile)
if content in article_log:
return True
return False
def write_to_logfile(content: dict, filename: str) -> None:
"""
Append content to json file.
Args:
content: Content to append to file
filename: Full path to file that should be appended.
Returns: None
"""
try:
with open(filename, "w") as fp:
json.dump(content, fp, indent=4)
except IOError as e:
logger.exception(e)
def scheduled_job(read_rss_and_tweet, retweet_own, search_and_retweet):
# listen_stream_and_rt('#INSIGHT2021')
schedule = SafeScheduler()
# job 1
schedule.every().day.at("22:20").do(read_rss_and_tweet)
schedule.every().day.at("06:20").do(read_rss_and_tweet)
schedule.every().day.at("14:20").do(read_rss_and_tweet)
# job 2
schedule.every().day.at("01:10").do(retweet_own)
schedule.every().day.at("09:10").do(retweet_own)
schedule.every().day.at("17:10").do(retweet_own)
# job 3
schedule.every().day.at("00:20").do(search_and_retweet, "list_search")
schedule.every().day.at("03:20").do(search_and_retweet, "list_search")
schedule.every().day.at("06:20").do(search_and_retweet, "list_search")
schedule.every().day.at("09:20").do(search_and_retweet, "list_search")
schedule.every().day.at("12:20").do(search_and_retweet, "list_search")
schedule.every().day.at("15:20").do(search_and_retweet, "list_search")
schedule.every().day.at("18:20").do(search_and_retweet, "list_search")
schedule.every().day.at("21:20").do(search_and_retweet, "list_search")
schedule.every().day.at("01:25").do(search_and_retweet, "list_search")
schedule.every().day.at("04:25").do(search_and_retweet, "list_search")
schedule.every().day.at("07:25").do(search_and_retweet, "list_search")
schedule.every().day.at("10:25").do(search_and_retweet, "list_search")
schedule.every().day.at("13:25").do(search_and_retweet, "list_search")
schedule.every().day.at("16:25").do(search_and_retweet, "list_search")
schedule.every().day.at("19:25").do(search_and_retweet, "list_search")
schedule.every().day.at("22:25").do(search_and_retweet, "list_search")
# job love
schedule.every(5).minutes.do(search_and_retweet, "give_love")
while 1:
schedule.run_pending()
time.sleep(1)
| 0.499268 | 0.164852 |
import json
import os
import sys
import re
import time
from os.path import expanduser
from random import randint
import calendar
import tweepy
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from scibot.streamer import listen_stream_and_rt
from scibot.telebot import telegram_bot_sendtext
from scibot.tools import (
logger,
Settings,
insert_hashtag,
shorten_text,
compose_message,
is_in_logfile,
write_to_logfile,
scheduled_job,
)
env_path = expanduser("~/.env")
load_dotenv(dotenv_path=env_path, override=True)
def main():
"""
Main function of scibot
"""
logger.info("\n### sciBot started ###\n\n")
if len(sys.argv) > 1:
try:
check_json_exists(
Settings.users_json_file,
{"test": {"follower": False, "interactions": 0}},
)
check_json_exists(Settings.faved_tweets_output_file, {"test": {}})
check_json_exists(
Settings.posted_retweets_output_file,
{"test": {}},
)
check_json_exists(
Settings.posted_urls_output_file,
{"test": {}},
)
if sys.argv[1].lower() == "rss":
read_rss_and_tweet()
elif sys.argv[1].lower() == "str":
listen_stream_and_rt(['#ConstellationsFest', '#ConstellationFest'])
elif sys.argv[1].lower() == "rtg":
search_and_retweet("global_search")
elif sys.argv[1].lower() == "glv":
search_and_retweet("give_love")
elif sys.argv[1].lower() == "rtl":
search_and_retweet("list_search")
elif sys.argv[1].lower() == "rto":
retweet_old_own()
elif sys.argv[1].lower() == "sch":
listen_stream_and_rt(['#ConstellationsFest', '#ConstellationFest'])
scheduled_job(read_rss_and_tweet, retweet_old_own, search_and_retweet)
except Exception as e:
logger.exception(e, exc_info=True)
telegram_bot_sendtext(f"[Exception] {e}")
except IOError as errno:
logger.exception(f"[ERROR] IOError {errno}")
telegram_bot_sendtext(f"[ERROR] IOError {errno}")
else:
display_help()
logger.info("\n\n### sciBot finished ###")
# Setup API:
def twitter_setup():
"""
Setup Twitter connection for a developer account
Returns: tweepy.API object
"""
# Authenticate and access using keys:
auth = tweepy.OAuthHandler(os.getenv("CONSUMER_KEY"), os.getenv("CONSUMER_SECRET"))
auth.set_access_token(os.getenv("ACCESS_TOKEN"), os.getenv("ACCESS_SECRET"))
# Return API access:
api = tweepy.API(auth, wait_on_rate_limit=True)
return api
def check_json_exists(file_path: os.path, init_dict: dict) -> None:
"""
Create a folder and json file if does not exists
Args:
file_path: Log files file path
init_dict: Dictionary to initiate a json file
Returns: None
"""
if not os.path.exists(os.path.dirname(os.path.abspath(file_path))):
os.makedirs(file_path)
if not os.path.isfile(file_path):
with open(file_path, "w") as json_file:
json.dump(init_dict, json_file, indent=4)
def get_followers_list() -> list:
"""
Read json file of followers from Settings.users_json_file
Returns: List of followers
"""
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
return [x for x in users_dic if users_dic[x]["follower"] is True]
def update_thread(text: str, tweet: tweepy.Status, api: tweepy.API) -> tweepy.Status:
"""
Add a tweet to a initiated thread
Args:
text: text to add to tweet as thread
tweet: tweepy status to add reply to
api: tweepy.API object
Returns: post a reply to a tweet
"""
return api.update_status(
status=text, in_reply_to_status_id=tweet.id, auto_populate_reply_metadata=True
)
def post_thread(dict_one_pub: dict, maxlength: int, count: int = 1) -> int:
"""
Initiate and post a thread of tweets
Args:
dict_one_pub: dictionary object with processed publication item
maxlength: length of the message to post (max tweet is 280 char)
count: count of replies on the thread
Returns: tweet id of the first tweet of thread
"""
api = twitter_setup()
original_tweet = api.update_status(status=compose_message(dict_one_pub))
telegram_bot_sendtext(f"Posting thread:, twitter.com/drugscibot/status/{original_tweet.id}")
text = dict_one_pub["abstract"]
max_len = round(len(text)/3)
if max_len < 240:
maxlength = max_len
for index in range(0, len(text), maxlength):
if count < 4:
count += 1
time.sleep(2)
thread_message = (
insert_hashtag(text[index : index + maxlength]) + f"... {count}/5"
)
if count == 2:
reply1_tweet = update_thread(thread_message, original_tweet, api)
if count == 3:
reply2_tweet = update_thread(thread_message, reply1_tweet, api)
if count == 4:
reply3_tweet = update_thread(thread_message, reply2_tweet, api)
time.sleep(2)
count += 1
last_msg = shorten_text(dict_one_pub["pub_date"] + " " + dict_one_pub["author-s"], 250) + f" {count}/{count}"
update_thread(last_msg, reply3_tweet, api)
return original_tweet.id
def return_doi_str(article):
"""return doi link if exists"""
title_search = re.search('(DOI:<a href=")(.*)(">)', str(article))
if title_search:
return title_search.group(2)
else:
return article.link
def make_literature_dict(feed: list) -> dict:
"""
filter publications from an RSS feed having an abstract, parse html abstract as plane string
Args:
feed: list of RSS feed items
Returns: dictionary of processed publications
"""
dict_publications = {}
for item in feed:
if hasattr(item, "content") and not 'No abstract' in item.description:
authors_list = [x["name"] for x in item.authors]
dict_publications[item.id] = {
"title": item.title,
"abstract": BeautifulSoup(item.content[0].value, "html.parser")
.get_text()
.split("ABSTRACT")[1],
"link": return_doi_str(item),
"description": item.description,
"pub_date": f"Date: {calendar.month_name[item.published_parsed.tm_mon]} {item.published_parsed.tm_year}",
"author-s": f"Authors: {', '.join(authors_list)}" if len(authors_list) >1 else f"Author: {', '.join(authors_list)}",
}
return dict_publications
def read_rss_and_tweet() -> None:
"""
Read RSS objects and tweet one calling the post thread function
Returns: None, updates log file with the posted article id
"""
dict_publications = make_literature_dict(Settings.combined_feed)
with open(Settings.posted_urls_output_file, "r") as jsonFile:
article_log = json.load(jsonFile)
if all(item in article_log.keys() for item in dict_publications.keys()):
telegram_bot_sendtext("rss empty trying older articles")
dict_publications = make_literature_dict(Settings.feed_older_literature)
for article in sorted(dict_publications.keys(), reverse=True):
if not is_in_logfile(article, Settings.posted_urls_output_file):
try:
article_log[article] = {
"count": 1,
"tweet_id": post_thread(dict_publications[article], 240),
}
write_to_logfile(article_log, Settings.posted_urls_output_file)
break
except tweepy.TweepError as e:
logger.error(f"RSS error, possible duplicate {e}, {article}")
write_to_logfile(article_log, Settings.posted_urls_output_file)
continue
def json_add_new_friend(user_id: str) -> None:
"""
add user friends to the interactions json file
Args:
user_id: user id to add to the interactions file
Returns: None, updates interaction file
"""
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
if user_id not in users_dic:
users_dic[user_id] = {"follower": True, "interactions": 1}
else:
users_dic[user_id]["follower"] = True
with open(Settings.users_json_file, "w") as json_file:
json.dump(users_dic, json_file, indent=4)
def post_tweet(message: str) -> None:
"""
Post tweet message to account.
Args:
message: Message to post on Twitter
Returns: None
"""
try:
twitter_api = twitter_setup()
logger.info(f"post_tweet():{message}")
twitter_api.update_status(status=message)
except tweepy.TweepError as e:
logger.error(e)
def filter_repeated_tweets(result_search: list, flag: str) -> list:
"""
Args:
result_search:
flag:
Returns:
"""
if flag == "give_love":
out_file = Settings.faved_tweets_output_file
else:
out_file = Settings.posted_retweets_output_file
unique_results = {}
for status in result_search:
if hasattr(status, "retweeted_status"):
check_id = status.retweeted_status.id_str
else:
check_id = status.id_str
if not is_in_logfile(check_id, out_file):
unique_results[status.full_text] = status
return [unique_results[x] for x in unique_results]
def json_add_user(user_id: str) -> None:
"""
add user to the interactions json file
Args:
user_id: user id
Returns: None
"""
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
if user_id not in users_dic:
users_dic[user_id] = {"follower": False, "interactions": 1}
else:
users_dic[user_id]["interactions"] += 1
with open(Settings.users_json_file, "w") as json_file:
json.dump(users_dic, json_file, indent=4)
def get_query() -> str:
"""
Create Twitter search query with included words minus the
excluded words.
Returns: string with the Twitter search query
"""
include = " OR ".join(Settings.retweet_include_words)
exclude = " -".join(Settings.retweet_exclude_words)
exclude = "-" + exclude if exclude else ""
return include + " " + exclude
def check_interactions(tweet: tweepy.Status) -> None:
"""
check if previously interacted with a user
Args:
tweet:
Returns:
"""
if tweet.author.screen_name.lower() == "viewsondrugsbot":
pass # don't fav your self
auth_id = tweet.author.id_str
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
user_list = [
users_dic[x]["interactions"]
for x in users_dic
if users_dic[x]["follower"] == False
]
down_limit = round(sum(user_list) / len(user_list))
if auth_id in users_dic:
if users_dic[auth_id]["interactions"] >= down_limit:
return True
else:
return False
else:
return False
def try_retweet(
twitter_api: tweepy.API, tweet_text: str, in_tweet_id: str, self_followers: list
) -> None:
"""
try to retweet, if already retweeted try next fom the list
of recent tweets
Args:
twitter_api:
tweet_text:
in_tweet_id:
self_followers:
Returns:
"""
tweet_id = find_simple_users(twitter_api, in_tweet_id, self_followers)
if not is_in_logfile(in_tweet_id, Settings.posted_retweets_output_file):
try:
twitter_api.retweet(id=tweet_id)
logger.info(f"Trying to rt {tweet_id}")
write_to_logfile({in_tweet_id: {}}, Settings.posted_retweets_output_file)
_status = twitter_api.get_status(tweet_id)
json_add_user(_status.author.id_str)
if tweet_id == in_tweet_id:
id_mess = f"{tweet_id} original"
else:
id_mess = f"{tweet_id} from a nested profile"
message_log = (
"Retweeted and saved to file > https://twitter.com/i/status/{}".format(
id_mess
)
)
logger.info(message_log)
telegram_bot_sendtext(message_log)
return True
except tweepy.TweepError as e:
if e.api_code in Settings.IGNORE_ERRORS:
write_to_logfile(
{in_tweet_id: {}}, Settings.posted_retweets_output_file
)
logger.exception(e)
return False
else:
logger.error(e)
return True
else:
logger.info(
"Already retweeted {} (id {})".format(
shorten_text(tweet_text, maxlength=140), tweet_id
)
)
def get_longest_text(status: tweepy.Status) -> str:
"""
Get the text of a quoted status
Args:
status: tweepy.Status object
Returns: text of the quoted tweet
"""
if hasattr(status, "retweeted_status"):
return status.retweeted_status.full_text
else:
return status.full_text
def find_simple_users(
twitter_api: tweepy.API, tweet_id: str, followers_list: list
) -> int:
"""
retweet/fav from users retweeting something interesting
Args:
twitter_api:
tweet_id:
followers_list:
Returns: id of the retweeted/faved tweet
"""
# get original retweeter:
down_lev_tweet = twitter_api.get_status(tweet_id)
if hasattr(down_lev_tweet, "retweeted_status"):
retweeters = twitter_api.retweets(down_lev_tweet.retweeted_status.id_str)
else:
retweeters = twitter_api.retweets(tweet_id)
future_friends = []
for retweet in retweeters:
if check_interactions(retweet):
continue
try:
follows_friends_ratio = (
retweet.author.followers_count / retweet.author.friends_count
)
except ZeroDivisionError:
follows_friends_ratio = 0
future_friends_dic = {
"id_str": retweet.author.id_str,
"friends": retweet.author.friends_count,
"followers": retweet.author.followers_count,
"follows_friends_ratio": follows_friends_ratio,
}
if future_friends_dic["friends"] > future_friends_dic["followers"]:
future_friends.append(
(
future_friends_dic["follows_friends_ratio"],
retweet.id_str,
future_friends_dic,
)
)
else:
future_friends.append(
(future_friends_dic["followers"], retweet.id_str, future_friends_dic)
)
if future_friends:
try: # give prioroty to non followers of self
min_friend = min(
[x for x in future_friends if x[2]["id_str"] not in followers_list]
)
logger.info(
f"try retweeting/fav https://twitter.com/i/status/{min_friend[1]} from potential friend profile: {min_friend[2]['id_str']} friends= {min_friend[2]['friends']}, followrs={min_friend[2]['followers']}"
)
return min_friend[1]
except:
min_friend = min(future_friends)
logger.info(
f"try retweeting/fav https://twitter.com/i/status/{min_friend[1]} from potential friend profile: {min_friend[2]['id_str']} friends= {min_friend[2]['friends']}, followrs={min_friend[2]['followers']}"
)
return min_friend[1]
else:
logger.info(
f"try retweeting from original post: https://twitter.com/i/status/{tweet_id}"
)
return tweet_id
def filter_tweet(search_results: list, twitter_api):
"""
function to ensure that retweets are on-topic
by the hashtag list
Args:
search_results:
twitter_api:
flag:
Returns:
"""
filtered_search_results = []
for status in search_results:
faved_sum = (
status.retweet_count,
status.favorite_count,
status.retweet_count + status.favorite_count,
)
if status.is_quote_status:
try:
quoted_tweet = twitter_api.get_status(
status.quoted_status_id_str, tweet_mode="extended"
)
except tweepy.TweepError as e:
telegram_bot_sendtext(f"ERROR {e}, twitter.com/anyuser/status/{status.id_str}")
quoted_tweet = ""
continue
end_status = get_longest_text(status) + get_longest_text(quoted_tweet)
else:
end_status = get_longest_text(status)
if len(end_status.split()) > 3 and faved_sum[2] > 1:
joined_list = Settings.add_hashtag + Settings.retweet_include_words
# remove elements from the exclude words list
keyword_matches = [
x
for x in joined_list + Settings.watch_add_hashtag
if x in end_status.lower()
and not any(
[
x
for x in Settings.retweet_exclude_words
if x in end_status.lower()
]
)
]
if keyword_matches:
if any(
[x for x in keyword_matches if x not in Settings.watch_add_hashtag]
):
print(keyword_matches, status.full_text)
filtered_search_results.append(
(faved_sum, status.id_str, status.full_text)
)
else:
logger.info(f">> skipped, {keyword_matches}, {end_status}")
return sorted(filtered_search_results)
def try_give_love(twitter_api, in_tweet_id, self_followers):
"""
try to favorite a post from simple users
Args:
twitter_api:
in_tweet_id:
self_followers:
Returns:
"""
# todo add flag to use sleep or fav immediately
tweet_id = find_simple_users(twitter_api, in_tweet_id, self_followers)
if not is_in_logfile(in_tweet_id, Settings.faved_tweets_output_file):
try:
time.sleep(randint(0, 250))
twitter_api.create_favorite(id=tweet_id)
write_to_logfile({in_tweet_id: {}}, Settings.faved_tweets_output_file)
_status = twitter_api.get_status(tweet_id)
json_add_user(_status.author.id_str)
message_log = (
"faved tweet succesful: https://twitter.com/i/status/{}".format(
tweet_id
)
)
logger.info(message_log)
telegram_bot_sendtext(message_log)
return True
except tweepy.TweepError as e:
if e.api_code in Settings.IGNORE_ERRORS:
write_to_logfile({in_tweet_id: {}}, Settings.faved_tweets_output_file)
logger.debug(f"throw a en error {e}")
logger.exception(e)
telegram_bot_sendtext(f"{e}")
return False
else:
logger.error(e)
telegram_bot_sendtext(f"{e}")
return True
else:
logger.info("Already faved (id {})".format(tweet_id))
def fav_or_tweet(max_val, flag, twitter_api):
"""
use a tweet or a fav function depending on the flag called
Args:
max_val:
flag:
twitter_api:
Returns:
"""
self_followers = get_followers_list()
count = 0
while count < len(max_val):
tweet_id = max_val[-1 - count][1]
tweet_text = max_val[-1 - count][2]
logger.info(f"{len(tweet_text.split())}, {tweet_text}")
if flag == "give_love":
use_function = try_give_love(twitter_api, tweet_id, self_followers)
log_message = "fav"
else:
use_function = try_retweet(
twitter_api, tweet_text, tweet_id, self_followers
)
log_message = "retweet"
if use_function:
logger.info(f"{log_message}ed: id={tweet_id} text={tweet_text}")
break
else:
count += 1
time.sleep(2)
if count >= len(max_val):
logger.debug("no more tweets to post")
continue
def search_and_retweet(flag: str = "global_search", count: int = 100):
"""
Search for a query in tweets, and retweet those tweets.
Args:
flag: A query to search for on Twitter. it can be `global_search` to search globally
or `list_search` reduced to a list defined on mylist_id
count: Number of tweets to search for. You should probably keep this low
when you use search_and_retweet() on a schedule (e.g. cronjob)
Returns: None
"""
try:
twitter_api = twitter_setup()
if flag == "global_search":
# search results retweets globally forgiven keywords
search_results = twitter_api.search(
q=get_query(), count=count, tweet_mode="extended"
) # standard search results
elif flag == "list_search":
# search list retwwets most commented ad rt from the experts lists
search_results = twitter_api.list_timeline(
list_id=Settings.mylist_id, count=count, tweet_mode="extended"
) # list to tweet from
elif flag == "give_love":
search_results = twitter_api.list_timeline(
list_id=Settings.mylist_id, count=count, tweet_mode="extended"
) + twitter_api.list_timeline(
list_id=1396081589768138755, count=count, tweet_mode="extended"
)
except tweepy.TweepError as e:
logger.exception(e.reason)
telegram_bot_sendtext(f"ERROR: {e}, {search_results[0]}")
return
except Exception as e:
telegram_bot_sendtext(f"ERROR: {e}")
# get the most faved + rtweeted and retweet it
max_val = filter_tweet(filter_repeated_tweets(search_results, flag), twitter_api)
fav_or_tweet(max_val, flag, twitter_api)
def retweet(tweet: tweepy.Status):
"""
re-tweet self last tweeted message.
Args:
tweet: tweet object
Returns: None
"""
try:
twitter_api = twitter_setup()
if not hasattr(tweet, 'retweeted'):
print(tweet)
twitter_api.retweet(id=tweet.id)
logger.info(f"retweeted: twitter.com/i/status/{tweet.id}")
telegram_bot_sendtext(f"Self retweeted: twitter.com/drugscibot/status/{tweet.id}")
else:
twitter_api.unretweet(tweet.id)
twitter_api.retweet(tweet.id)
telegram_bot_sendtext(f"Self re-retweeted: twitter.com/drugscibot/status/{tweet.id}")
except tweepy.TweepError as e:
logger.exception(e)
telegram_bot_sendtext(f"ERROR:{e}")
def retweet_old_own():
"""
Returns: None
"""
twitter_api = twitter_setup()
with open(Settings.posted_urls_output_file, "r") as jsonFile:
article_log = json.load(jsonFile)
article_log_reversed = {article_log[x]['tweet_id']:{**article_log[x], **{'id':x}} for x in article_log}
min_val = min(article_log[x]["count"] for x in article_log)
for art in sorted(list(article_log_reversed), key=None, reverse=False):
tweet = twitter_api.statuses_lookup([article_log_reversed[art]["tweet_id"]])
if tweet and article_log_reversed[art]["count"] <= min_val:
retweet(tweet[0])
article_log[article_log_reversed[art]['id']]["count"] += 1
break
with open(Settings.posted_urls_output_file, "w") as fp:
json.dump(article_log, fp, indent=4)
def display_help():
"""
Show available commands.
Returns: Prints available commands
"""
print("Syntax: python {} [command]".format(sys.argv[0]))
print()
print(" Commands:")
print(" rss Read URL and post new items to Twitter")
print(" rtg Search and retweet keywords from global feed")
print(" rtl Search and retweet keywords from list feed")
print(" glv Fav tweets from list or globally")
print(" rto Retweet last own tweet")
print(" sch Run scheduled jobs on infinite loop")
print(" help Show this help screen")
if __name__ == "__main__":
main()
|
scienceBot
|
/scienceBot-0.1.1.1.tar.gz/scienceBot-0.1.1.1/scibot/what_a_c.py
|
what_a_c.py
|
import json
import os
import sys
import re
import time
from os.path import expanduser
from random import randint
import calendar
import tweepy
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from scibot.streamer import listen_stream_and_rt
from scibot.telebot import telegram_bot_sendtext
from scibot.tools import (
logger,
Settings,
insert_hashtag,
shorten_text,
compose_message,
is_in_logfile,
write_to_logfile,
scheduled_job,
)
env_path = expanduser("~/.env")
load_dotenv(dotenv_path=env_path, override=True)
def main():
"""
Main function of scibot
"""
logger.info("\n### sciBot started ###\n\n")
if len(sys.argv) > 1:
try:
check_json_exists(
Settings.users_json_file,
{"test": {"follower": False, "interactions": 0}},
)
check_json_exists(Settings.faved_tweets_output_file, {"test": {}})
check_json_exists(
Settings.posted_retweets_output_file,
{"test": {}},
)
check_json_exists(
Settings.posted_urls_output_file,
{"test": {}},
)
if sys.argv[1].lower() == "rss":
read_rss_and_tweet()
elif sys.argv[1].lower() == "str":
listen_stream_and_rt(['#ConstellationsFest', '#ConstellationFest'])
elif sys.argv[1].lower() == "rtg":
search_and_retweet("global_search")
elif sys.argv[1].lower() == "glv":
search_and_retweet("give_love")
elif sys.argv[1].lower() == "rtl":
search_and_retweet("list_search")
elif sys.argv[1].lower() == "rto":
retweet_old_own()
elif sys.argv[1].lower() == "sch":
listen_stream_and_rt(['#ConstellationsFest', '#ConstellationFest'])
scheduled_job(read_rss_and_tweet, retweet_old_own, search_and_retweet)
except Exception as e:
logger.exception(e, exc_info=True)
telegram_bot_sendtext(f"[Exception] {e}")
except IOError as errno:
logger.exception(f"[ERROR] IOError {errno}")
telegram_bot_sendtext(f"[ERROR] IOError {errno}")
else:
display_help()
logger.info("\n\n### sciBot finished ###")
# Setup API:
def twitter_setup():
"""
Setup Twitter connection for a developer account
Returns: tweepy.API object
"""
# Authenticate and access using keys:
auth = tweepy.OAuthHandler(os.getenv("CONSUMER_KEY"), os.getenv("CONSUMER_SECRET"))
auth.set_access_token(os.getenv("ACCESS_TOKEN"), os.getenv("ACCESS_SECRET"))
# Return API access:
api = tweepy.API(auth, wait_on_rate_limit=True)
return api
def check_json_exists(file_path: os.path, init_dict: dict) -> None:
"""
Create a folder and json file if does not exists
Args:
file_path: Log files file path
init_dict: Dictionary to initiate a json file
Returns: None
"""
if not os.path.exists(os.path.dirname(os.path.abspath(file_path))):
os.makedirs(file_path)
if not os.path.isfile(file_path):
with open(file_path, "w") as json_file:
json.dump(init_dict, json_file, indent=4)
def get_followers_list() -> list:
"""
Read json file of followers from Settings.users_json_file
Returns: List of followers
"""
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
return [x for x in users_dic if users_dic[x]["follower"] is True]
def update_thread(text: str, tweet: tweepy.Status, api: tweepy.API) -> tweepy.Status:
"""
Add a tweet to a initiated thread
Args:
text: text to add to tweet as thread
tweet: tweepy status to add reply to
api: tweepy.API object
Returns: post a reply to a tweet
"""
return api.update_status(
status=text, in_reply_to_status_id=tweet.id, auto_populate_reply_metadata=True
)
def post_thread(dict_one_pub: dict, maxlength: int, count: int = 1) -> int:
"""
Initiate and post a thread of tweets
Args:
dict_one_pub: dictionary object with processed publication item
maxlength: length of the message to post (max tweet is 280 char)
count: count of replies on the thread
Returns: tweet id of the first tweet of thread
"""
api = twitter_setup()
original_tweet = api.update_status(status=compose_message(dict_one_pub))
telegram_bot_sendtext(f"Posting thread:, twitter.com/drugscibot/status/{original_tweet.id}")
text = dict_one_pub["abstract"]
max_len = round(len(text)/3)
if max_len < 240:
maxlength = max_len
for index in range(0, len(text), maxlength):
if count < 4:
count += 1
time.sleep(2)
thread_message = (
insert_hashtag(text[index : index + maxlength]) + f"... {count}/5"
)
if count == 2:
reply1_tweet = update_thread(thread_message, original_tweet, api)
if count == 3:
reply2_tweet = update_thread(thread_message, reply1_tweet, api)
if count == 4:
reply3_tweet = update_thread(thread_message, reply2_tweet, api)
time.sleep(2)
count += 1
last_msg = shorten_text(dict_one_pub["pub_date"] + " " + dict_one_pub["author-s"], 250) + f" {count}/{count}"
update_thread(last_msg, reply3_tweet, api)
return original_tweet.id
def return_doi_str(article):
"""return doi link if exists"""
title_search = re.search('(DOI:<a href=")(.*)(">)', str(article))
if title_search:
return title_search.group(2)
else:
return article.link
def make_literature_dict(feed: list) -> dict:
"""
filter publications from an RSS feed having an abstract, parse html abstract as plane string
Args:
feed: list of RSS feed items
Returns: dictionary of processed publications
"""
dict_publications = {}
for item in feed:
if hasattr(item, "content") and not 'No abstract' in item.description:
authors_list = [x["name"] for x in item.authors]
dict_publications[item.id] = {
"title": item.title,
"abstract": BeautifulSoup(item.content[0].value, "html.parser")
.get_text()
.split("ABSTRACT")[1],
"link": return_doi_str(item),
"description": item.description,
"pub_date": f"Date: {calendar.month_name[item.published_parsed.tm_mon]} {item.published_parsed.tm_year}",
"author-s": f"Authors: {', '.join(authors_list)}" if len(authors_list) >1 else f"Author: {', '.join(authors_list)}",
}
return dict_publications
def read_rss_and_tweet() -> None:
"""
Read RSS objects and tweet one calling the post thread function
Returns: None, updates log file with the posted article id
"""
dict_publications = make_literature_dict(Settings.combined_feed)
with open(Settings.posted_urls_output_file, "r") as jsonFile:
article_log = json.load(jsonFile)
if all(item in article_log.keys() for item in dict_publications.keys()):
telegram_bot_sendtext("rss empty trying older articles")
dict_publications = make_literature_dict(Settings.feed_older_literature)
for article in sorted(dict_publications.keys(), reverse=True):
if not is_in_logfile(article, Settings.posted_urls_output_file):
try:
article_log[article] = {
"count": 1,
"tweet_id": post_thread(dict_publications[article], 240),
}
write_to_logfile(article_log, Settings.posted_urls_output_file)
break
except tweepy.TweepError as e:
logger.error(f"RSS error, possible duplicate {e}, {article}")
write_to_logfile(article_log, Settings.posted_urls_output_file)
continue
def json_add_new_friend(user_id: str) -> None:
"""
add user friends to the interactions json file
Args:
user_id: user id to add to the interactions file
Returns: None, updates interaction file
"""
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
if user_id not in users_dic:
users_dic[user_id] = {"follower": True, "interactions": 1}
else:
users_dic[user_id]["follower"] = True
with open(Settings.users_json_file, "w") as json_file:
json.dump(users_dic, json_file, indent=4)
def post_tweet(message: str) -> None:
"""
Post tweet message to account.
Args:
message: Message to post on Twitter
Returns: None
"""
try:
twitter_api = twitter_setup()
logger.info(f"post_tweet():{message}")
twitter_api.update_status(status=message)
except tweepy.TweepError as e:
logger.error(e)
def filter_repeated_tweets(result_search: list, flag: str) -> list:
"""
Args:
result_search:
flag:
Returns:
"""
if flag == "give_love":
out_file = Settings.faved_tweets_output_file
else:
out_file = Settings.posted_retweets_output_file
unique_results = {}
for status in result_search:
if hasattr(status, "retweeted_status"):
check_id = status.retweeted_status.id_str
else:
check_id = status.id_str
if not is_in_logfile(check_id, out_file):
unique_results[status.full_text] = status
return [unique_results[x] for x in unique_results]
def json_add_user(user_id: str) -> None:
"""
add user to the interactions json file
Args:
user_id: user id
Returns: None
"""
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
if user_id not in users_dic:
users_dic[user_id] = {"follower": False, "interactions": 1}
else:
users_dic[user_id]["interactions"] += 1
with open(Settings.users_json_file, "w") as json_file:
json.dump(users_dic, json_file, indent=4)
def get_query() -> str:
"""
Create Twitter search query with included words minus the
excluded words.
Returns: string with the Twitter search query
"""
include = " OR ".join(Settings.retweet_include_words)
exclude = " -".join(Settings.retweet_exclude_words)
exclude = "-" + exclude if exclude else ""
return include + " " + exclude
def check_interactions(tweet: tweepy.Status) -> None:
"""
check if previously interacted with a user
Args:
tweet:
Returns:
"""
if tweet.author.screen_name.lower() == "viewsondrugsbot":
pass # don't fav your self
auth_id = tweet.author.id_str
with open(Settings.users_json_file, "r") as json_file:
users_dic = json.load(json_file)
user_list = [
users_dic[x]["interactions"]
for x in users_dic
if users_dic[x]["follower"] == False
]
down_limit = round(sum(user_list) / len(user_list))
if auth_id in users_dic:
if users_dic[auth_id]["interactions"] >= down_limit:
return True
else:
return False
else:
return False
def try_retweet(
twitter_api: tweepy.API, tweet_text: str, in_tweet_id: str, self_followers: list
) -> None:
"""
try to retweet, if already retweeted try next fom the list
of recent tweets
Args:
twitter_api:
tweet_text:
in_tweet_id:
self_followers:
Returns:
"""
tweet_id = find_simple_users(twitter_api, in_tweet_id, self_followers)
if not is_in_logfile(in_tweet_id, Settings.posted_retweets_output_file):
try:
twitter_api.retweet(id=tweet_id)
logger.info(f"Trying to rt {tweet_id}")
write_to_logfile({in_tweet_id: {}}, Settings.posted_retweets_output_file)
_status = twitter_api.get_status(tweet_id)
json_add_user(_status.author.id_str)
if tweet_id == in_tweet_id:
id_mess = f"{tweet_id} original"
else:
id_mess = f"{tweet_id} from a nested profile"
message_log = (
"Retweeted and saved to file > https://twitter.com/i/status/{}".format(
id_mess
)
)
logger.info(message_log)
telegram_bot_sendtext(message_log)
return True
except tweepy.TweepError as e:
if e.api_code in Settings.IGNORE_ERRORS:
write_to_logfile(
{in_tweet_id: {}}, Settings.posted_retweets_output_file
)
logger.exception(e)
return False
else:
logger.error(e)
return True
else:
logger.info(
"Already retweeted {} (id {})".format(
shorten_text(tweet_text, maxlength=140), tweet_id
)
)
def get_longest_text(status: tweepy.Status) -> str:
"""
Get the text of a quoted status
Args:
status: tweepy.Status object
Returns: text of the quoted tweet
"""
if hasattr(status, "retweeted_status"):
return status.retweeted_status.full_text
else:
return status.full_text
def find_simple_users(
twitter_api: tweepy.API, tweet_id: str, followers_list: list
) -> int:
"""
retweet/fav from users retweeting something interesting
Args:
twitter_api:
tweet_id:
followers_list:
Returns: id of the retweeted/faved tweet
"""
# get original retweeter:
down_lev_tweet = twitter_api.get_status(tweet_id)
if hasattr(down_lev_tweet, "retweeted_status"):
retweeters = twitter_api.retweets(down_lev_tweet.retweeted_status.id_str)
else:
retweeters = twitter_api.retweets(tweet_id)
future_friends = []
for retweet in retweeters:
if check_interactions(retweet):
continue
try:
follows_friends_ratio = (
retweet.author.followers_count / retweet.author.friends_count
)
except ZeroDivisionError:
follows_friends_ratio = 0
future_friends_dic = {
"id_str": retweet.author.id_str,
"friends": retweet.author.friends_count,
"followers": retweet.author.followers_count,
"follows_friends_ratio": follows_friends_ratio,
}
if future_friends_dic["friends"] > future_friends_dic["followers"]:
future_friends.append(
(
future_friends_dic["follows_friends_ratio"],
retweet.id_str,
future_friends_dic,
)
)
else:
future_friends.append(
(future_friends_dic["followers"], retweet.id_str, future_friends_dic)
)
if future_friends:
try: # give prioroty to non followers of self
min_friend = min(
[x for x in future_friends if x[2]["id_str"] not in followers_list]
)
logger.info(
f"try retweeting/fav https://twitter.com/i/status/{min_friend[1]} from potential friend profile: {min_friend[2]['id_str']} friends= {min_friend[2]['friends']}, followrs={min_friend[2]['followers']}"
)
return min_friend[1]
except:
min_friend = min(future_friends)
logger.info(
f"try retweeting/fav https://twitter.com/i/status/{min_friend[1]} from potential friend profile: {min_friend[2]['id_str']} friends= {min_friend[2]['friends']}, followrs={min_friend[2]['followers']}"
)
return min_friend[1]
else:
logger.info(
f"try retweeting from original post: https://twitter.com/i/status/{tweet_id}"
)
return tweet_id
def filter_tweet(search_results: list, twitter_api):
"""
function to ensure that retweets are on-topic
by the hashtag list
Args:
search_results:
twitter_api:
flag:
Returns:
"""
filtered_search_results = []
for status in search_results:
faved_sum = (
status.retweet_count,
status.favorite_count,
status.retweet_count + status.favorite_count,
)
if status.is_quote_status:
try:
quoted_tweet = twitter_api.get_status(
status.quoted_status_id_str, tweet_mode="extended"
)
except tweepy.TweepError as e:
telegram_bot_sendtext(f"ERROR {e}, twitter.com/anyuser/status/{status.id_str}")
quoted_tweet = ""
continue
end_status = get_longest_text(status) + get_longest_text(quoted_tweet)
else:
end_status = get_longest_text(status)
if len(end_status.split()) > 3 and faved_sum[2] > 1:
joined_list = Settings.add_hashtag + Settings.retweet_include_words
# remove elements from the exclude words list
keyword_matches = [
x
for x in joined_list + Settings.watch_add_hashtag
if x in end_status.lower()
and not any(
[
x
for x in Settings.retweet_exclude_words
if x in end_status.lower()
]
)
]
if keyword_matches:
if any(
[x for x in keyword_matches if x not in Settings.watch_add_hashtag]
):
print(keyword_matches, status.full_text)
filtered_search_results.append(
(faved_sum, status.id_str, status.full_text)
)
else:
logger.info(f">> skipped, {keyword_matches}, {end_status}")
return sorted(filtered_search_results)
def try_give_love(twitter_api, in_tweet_id, self_followers):
"""
try to favorite a post from simple users
Args:
twitter_api:
in_tweet_id:
self_followers:
Returns:
"""
# todo add flag to use sleep or fav immediately
tweet_id = find_simple_users(twitter_api, in_tweet_id, self_followers)
if not is_in_logfile(in_tweet_id, Settings.faved_tweets_output_file):
try:
time.sleep(randint(0, 250))
twitter_api.create_favorite(id=tweet_id)
write_to_logfile({in_tweet_id: {}}, Settings.faved_tweets_output_file)
_status = twitter_api.get_status(tweet_id)
json_add_user(_status.author.id_str)
message_log = (
"faved tweet succesful: https://twitter.com/i/status/{}".format(
tweet_id
)
)
logger.info(message_log)
telegram_bot_sendtext(message_log)
return True
except tweepy.TweepError as e:
if e.api_code in Settings.IGNORE_ERRORS:
write_to_logfile({in_tweet_id: {}}, Settings.faved_tweets_output_file)
logger.debug(f"throw a en error {e}")
logger.exception(e)
telegram_bot_sendtext(f"{e}")
return False
else:
logger.error(e)
telegram_bot_sendtext(f"{e}")
return True
else:
logger.info("Already faved (id {})".format(tweet_id))
def fav_or_tweet(max_val, flag, twitter_api):
"""
use a tweet or a fav function depending on the flag called
Args:
max_val:
flag:
twitter_api:
Returns:
"""
self_followers = get_followers_list()
count = 0
while count < len(max_val):
tweet_id = max_val[-1 - count][1]
tweet_text = max_val[-1 - count][2]
logger.info(f"{len(tweet_text.split())}, {tweet_text}")
if flag == "give_love":
use_function = try_give_love(twitter_api, tweet_id, self_followers)
log_message = "fav"
else:
use_function = try_retweet(
twitter_api, tweet_text, tweet_id, self_followers
)
log_message = "retweet"
if use_function:
logger.info(f"{log_message}ed: id={tweet_id} text={tweet_text}")
break
else:
count += 1
time.sleep(2)
if count >= len(max_val):
logger.debug("no more tweets to post")
continue
def search_and_retweet(flag: str = "global_search", count: int = 100):
"""
Search for a query in tweets, and retweet those tweets.
Args:
flag: A query to search for on Twitter. it can be `global_search` to search globally
or `list_search` reduced to a list defined on mylist_id
count: Number of tweets to search for. You should probably keep this low
when you use search_and_retweet() on a schedule (e.g. cronjob)
Returns: None
"""
try:
twitter_api = twitter_setup()
if flag == "global_search":
# search results retweets globally forgiven keywords
search_results = twitter_api.search(
q=get_query(), count=count, tweet_mode="extended"
) # standard search results
elif flag == "list_search":
# search list retwwets most commented ad rt from the experts lists
search_results = twitter_api.list_timeline(
list_id=Settings.mylist_id, count=count, tweet_mode="extended"
) # list to tweet from
elif flag == "give_love":
search_results = twitter_api.list_timeline(
list_id=Settings.mylist_id, count=count, tweet_mode="extended"
) + twitter_api.list_timeline(
list_id=1396081589768138755, count=count, tweet_mode="extended"
)
except tweepy.TweepError as e:
logger.exception(e.reason)
telegram_bot_sendtext(f"ERROR: {e}, {search_results[0]}")
return
except Exception as e:
telegram_bot_sendtext(f"ERROR: {e}")
# get the most faved + rtweeted and retweet it
max_val = filter_tweet(filter_repeated_tweets(search_results, flag), twitter_api)
fav_or_tweet(max_val, flag, twitter_api)
def retweet(tweet: tweepy.Status):
"""
re-tweet self last tweeted message.
Args:
tweet: tweet object
Returns: None
"""
try:
twitter_api = twitter_setup()
if not hasattr(tweet, 'retweeted'):
print(tweet)
twitter_api.retweet(id=tweet.id)
logger.info(f"retweeted: twitter.com/i/status/{tweet.id}")
telegram_bot_sendtext(f"Self retweeted: twitter.com/drugscibot/status/{tweet.id}")
else:
twitter_api.unretweet(tweet.id)
twitter_api.retweet(tweet.id)
telegram_bot_sendtext(f"Self re-retweeted: twitter.com/drugscibot/status/{tweet.id}")
except tweepy.TweepError as e:
logger.exception(e)
telegram_bot_sendtext(f"ERROR:{e}")
def retweet_old_own():
"""
Returns: None
"""
twitter_api = twitter_setup()
with open(Settings.posted_urls_output_file, "r") as jsonFile:
article_log = json.load(jsonFile)
article_log_reversed = {article_log[x]['tweet_id']:{**article_log[x], **{'id':x}} for x in article_log}
min_val = min(article_log[x]["count"] for x in article_log)
for art in sorted(list(article_log_reversed), key=None, reverse=False):
tweet = twitter_api.statuses_lookup([article_log_reversed[art]["tweet_id"]])
if tweet and article_log_reversed[art]["count"] <= min_val:
retweet(tweet[0])
article_log[article_log_reversed[art]['id']]["count"] += 1
break
with open(Settings.posted_urls_output_file, "w") as fp:
json.dump(article_log, fp, indent=4)
def display_help():
"""
Show available commands.
Returns: Prints available commands
"""
print("Syntax: python {} [command]".format(sys.argv[0]))
print()
print(" Commands:")
print(" rss Read URL and post new items to Twitter")
print(" rtg Search and retweet keywords from global feed")
print(" rtl Search and retweet keywords from list feed")
print(" glv Fav tweets from list or globally")
print(" rto Retweet last own tweet")
print(" sch Run scheduled jobs on infinite loop")
print(" help Show this help screen")
if __name__ == "__main__":
main()
| 0.338077 | 0.105948 |
import json
import logging
from datetime import datetime
import requests
from sb3 import auth, client
class SbSessionEx:
"""SbSessionEx provides extra methods for working with ScienceBase GraphQL
as well as authentication using Keycloak.
"""
_env = None
_graphql_url = None
_realm = None
_auth_server_url = None
_username = None
_token = None
_refresh_token = None
_client_id = None
_token_server_uri = None
_token_expire = None
_token_expire_refresh = None
_token_expire_time = None
_is_logged_in = False
def __init__(self, env=None):
self._env = env
self._logging = logging
self._auth_server_url = "https://www.sciencebase.gov/auth"
if env == "beta":
self._graphql_url = "https://api-beta.staging.sciencebase.gov/graphql"
self._realm = "ScienceBase-B"
elif env == "dev":
self._graphql_url = "http://localhost:4000/graphql"
self._realm = "ScienceBase-B"
else:
self._graphql_url = "https://api.sciencebase.gov/graphql"
self._realm = "ScienceBase"
def get_graphql_url(self):
'''get_graphql_url
'''
return self._graphql_url
def login(self, username, password):
"""Log into ScienceBase using Keycloak
:param username: The ScienceBase user to log in as
:param password: The ScienceBase password for the given user
:return: The SbSessionEx object with the user logged in
"""
self._username = username
try:
authenticator = _keycloak_login(username, password, self._realm, self._auth_server_url)
self._token = authenticator.get_access_token()
self._refresh_token = authenticator.get_refresh_token()
self._client_id = authenticator.keycloak_client_config.client_id
self._token_server_uri = (
authenticator.keycloak_client_config.get_token_server_uri()
)
self._token_expire = authenticator.get_token_expire()
self._token_expire_refresh = authenticator.get_refresh_token_expire()
self._token_expire_time = (
self._token_expire + (datetime.today()).timestamp()
)
self._is_logged_in = True
except Exception:
self._logging.error(f"Keycloak login failed for {username} -- cloud services not available")
self._is_logged_in = False
return self
def is_logged_in(self):
'''is_logged_in
'''
return self._is_logged_in
def get_current_user(self):
'''get_current_user
'''
return self._username
def get_logger(self):
'''get_logger
'''
return self._logging
def refresh_token(self):
"""Refresh tokens in ScienceBaseEx"""
data = {
"client_id": self._client_id,
"grant_type": "refresh_token",
"refresh_token": self._refresh_token,
}
token_resp = requests.post(self._token_server_uri, data=data)
self._logging.info(token_resp.headers)
self._logging.info(json.loads(token_resp.content.decode("utf-8")))
if token_resp.status_code == 200:
auth_token_json = token_resp.json()
self._logging.info(auth_token_json)
self._token = auth_token_json["access_token"]
self._refresh_token = auth_token_json["refresh_token"]
self._token_expire = auth_token_json["expires_in"]
self._token_expire_refresh = auth_token_json["refresh_expires_in"]
self._token_expire_time = (
self._token_expire + (datetime.today()).timestamp()
)
self._logging.info("Token Refreshed.")
else:
raise Exception("Token Refreshed Failed.")
def refresh_token_before_expire(self, refresh_amount):
"""Refresh token if token has not expired, but will expire with in some time,
if token will expire with in that time then refresh will be triggered
:refresh_amount: Amount subtracted (is seconds) from expired token value, that will trigger token refresh
:return: True, if refresh is done, False, refresh is not triggered
"""
current_time = (datetime.today()).timestamp() + refresh_amount
if self._token_expire_time - current_time < 0:
self.refresh_token()
return True
return False
def refresh_token_time_remaining(self, refresh_amount):
"""Use for printing remaining time
useful for debugging session timeout
"""
current_time = (datetime.today()).timestamp() + refresh_amount
return self._token_expire_time - current_time
def upload_cloud_file_upload_session(self, item_id, filename, mimetype):
'''upload_large_file_upload_session
'''
return client.upload_cloud_file_upload_session(item_id, filename, mimetype, self)
def bulk_cloud_download(self, selected_rows):
'''generate bulk cloud download tokenized links
'''
return client.bulk_cloud_download(selected_rows, self)
def upload_s3_files(self, input):
'''upload external S3 bucket files to ScienceBase Item
'''
return client.upload_s3_files(input, self)
def publish_to_public_bucket(self, input):
'''publish file from public S3 bucket
'''
return client.publish_to_public_bucket(input, self)
def unpublish_from_public_bucket(self, input):
'''unpublish file from public S3 bucket
'''
return client.unpublish_from_public_bucket(input, self)
def delete_cloud_file(self, input):
'''delete files from ScienceBase item and S3 content bucket and/or S3 publish bucket
'''
return client.delete_cloud_file(input, self)
def get_header(self):
'''get_header
'''
return {
"content-type": "application/json",
"accept": "application/json",
"authorization": "Bearer " + self._token,
}
def _keycloak_login(username, password, realm, server_url):
'''keycloak_login
'''
# Developer note: For some reason this method will not work inside the SbSessionEx class
authenticator = auth.DirectAccessAuthenticator({
"realm": realm,
"auth-server-url": server_url,
"ssl-required": "external",
"resource": "sciencebasepy",
"public-client": True,
"confidential-port": 0,
})
authenticator.authenticate(username, password)
return authenticator
|
sciencebasepy
|
/sciencebasepy-2.0.13-py3-none-any.whl/sb3/SbSessionEx.py
|
SbSessionEx.py
|
import json
import logging
from datetime import datetime
import requests
from sb3 import auth, client
class SbSessionEx:
"""SbSessionEx provides extra methods for working with ScienceBase GraphQL
as well as authentication using Keycloak.
"""
_env = None
_graphql_url = None
_realm = None
_auth_server_url = None
_username = None
_token = None
_refresh_token = None
_client_id = None
_token_server_uri = None
_token_expire = None
_token_expire_refresh = None
_token_expire_time = None
_is_logged_in = False
def __init__(self, env=None):
self._env = env
self._logging = logging
self._auth_server_url = "https://www.sciencebase.gov/auth"
if env == "beta":
self._graphql_url = "https://api-beta.staging.sciencebase.gov/graphql"
self._realm = "ScienceBase-B"
elif env == "dev":
self._graphql_url = "http://localhost:4000/graphql"
self._realm = "ScienceBase-B"
else:
self._graphql_url = "https://api.sciencebase.gov/graphql"
self._realm = "ScienceBase"
def get_graphql_url(self):
'''get_graphql_url
'''
return self._graphql_url
def login(self, username, password):
"""Log into ScienceBase using Keycloak
:param username: The ScienceBase user to log in as
:param password: The ScienceBase password for the given user
:return: The SbSessionEx object with the user logged in
"""
self._username = username
try:
authenticator = _keycloak_login(username, password, self._realm, self._auth_server_url)
self._token = authenticator.get_access_token()
self._refresh_token = authenticator.get_refresh_token()
self._client_id = authenticator.keycloak_client_config.client_id
self._token_server_uri = (
authenticator.keycloak_client_config.get_token_server_uri()
)
self._token_expire = authenticator.get_token_expire()
self._token_expire_refresh = authenticator.get_refresh_token_expire()
self._token_expire_time = (
self._token_expire + (datetime.today()).timestamp()
)
self._is_logged_in = True
except Exception:
self._logging.error(f"Keycloak login failed for {username} -- cloud services not available")
self._is_logged_in = False
return self
def is_logged_in(self):
'''is_logged_in
'''
return self._is_logged_in
def get_current_user(self):
'''get_current_user
'''
return self._username
def get_logger(self):
'''get_logger
'''
return self._logging
def refresh_token(self):
"""Refresh tokens in ScienceBaseEx"""
data = {
"client_id": self._client_id,
"grant_type": "refresh_token",
"refresh_token": self._refresh_token,
}
token_resp = requests.post(self._token_server_uri, data=data)
self._logging.info(token_resp.headers)
self._logging.info(json.loads(token_resp.content.decode("utf-8")))
if token_resp.status_code == 200:
auth_token_json = token_resp.json()
self._logging.info(auth_token_json)
self._token = auth_token_json["access_token"]
self._refresh_token = auth_token_json["refresh_token"]
self._token_expire = auth_token_json["expires_in"]
self._token_expire_refresh = auth_token_json["refresh_expires_in"]
self._token_expire_time = (
self._token_expire + (datetime.today()).timestamp()
)
self._logging.info("Token Refreshed.")
else:
raise Exception("Token Refreshed Failed.")
def refresh_token_before_expire(self, refresh_amount):
"""Refresh token if token has not expired, but will expire with in some time,
if token will expire with in that time then refresh will be triggered
:refresh_amount: Amount subtracted (is seconds) from expired token value, that will trigger token refresh
:return: True, if refresh is done, False, refresh is not triggered
"""
current_time = (datetime.today()).timestamp() + refresh_amount
if self._token_expire_time - current_time < 0:
self.refresh_token()
return True
return False
def refresh_token_time_remaining(self, refresh_amount):
"""Use for printing remaining time
useful for debugging session timeout
"""
current_time = (datetime.today()).timestamp() + refresh_amount
return self._token_expire_time - current_time
def upload_cloud_file_upload_session(self, item_id, filename, mimetype):
'''upload_large_file_upload_session
'''
return client.upload_cloud_file_upload_session(item_id, filename, mimetype, self)
def bulk_cloud_download(self, selected_rows):
'''generate bulk cloud download tokenized links
'''
return client.bulk_cloud_download(selected_rows, self)
def upload_s3_files(self, input):
'''upload external S3 bucket files to ScienceBase Item
'''
return client.upload_s3_files(input, self)
def publish_to_public_bucket(self, input):
'''publish file from public S3 bucket
'''
return client.publish_to_public_bucket(input, self)
def unpublish_from_public_bucket(self, input):
'''unpublish file from public S3 bucket
'''
return client.unpublish_from_public_bucket(input, self)
def delete_cloud_file(self, input):
'''delete files from ScienceBase item and S3 content bucket and/or S3 publish bucket
'''
return client.delete_cloud_file(input, self)
def get_header(self):
'''get_header
'''
return {
"content-type": "application/json",
"accept": "application/json",
"authorization": "Bearer " + self._token,
}
def _keycloak_login(username, password, realm, server_url):
'''keycloak_login
'''
# Developer note: For some reason this method will not work inside the SbSessionEx class
authenticator = auth.DirectAccessAuthenticator({
"realm": realm,
"auth-server-url": server_url,
"ssl-required": "external",
"resource": "sciencebasepy",
"public-client": True,
"confidential-port": 0,
})
authenticator.authenticate(username, password)
return authenticator
| 0.526586 | 0.105349 |
import json
import requests
class KeycloakClientConfig:
"""Stores the Keycloak OIDC JSON with some properties pull out for convenience
"""
_token_server_uri = None
_auth_server_uri = None
def __init__(self, oidc_config):
"""
pass in either a dict from the JSON config or a a path to the JSON config
:param oidc_config: dict of the loaded JSON config
"""
self.oidc_config = oidc_config
self.client_id = oidc_config["resource"]
self._auth_server_uri = (
f"{oidc_config['auth-server-url']}/realms/"
f"{oidc_config['realm']}/protocol/openid-connect/auth"
)
self._token_server_uri = (
f"{oidc_config['auth-server-url']}/realms/"
f"{oidc_config['realm']}/protocol/openid-connect/token"
)
def get_token_server_uri(self):
'''get_token_server_uri
'''
return self._token_server_uri
def get_auth_server_uri(self):
'''get_auth_server_uri
'''
return self._auth_server_uri
class DirectAccessAuthenticator:
'''DirectAccessAuthenticator
Authenticates with the Keycloak server using Direct Access
'''
def __init__(self, keycloak_config):
if isinstance(keycloak_config, KeycloakClientConfig):
self.keycloak_client_config = keycloak_config
elif isinstance(keycloak_config, dict):
self.keycloak_client_config = KeycloakClientConfig(keycloak_config)
else:
raise ValueError(
"keycloak_config must be an instance of KeycloakClientConfig or dict of the config data"
)
self.auth_token = None
def get_access_token(self):
'''get_access_token
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_access_token"
)
return self.auth_token["access_token"]
def get_refresh_token(self):
'''get_refresh_token
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_refresh_token"
)
return self.auth_token["refresh_token"]
def get_token_expire(self):
'''get_token_expire
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_refresh_token"
)
return self.auth_token["expires_in"]
def get_refresh_token_expire(self):
'''get_refresh_token_expire
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_refresh_token"
)
return self.auth_token["refresh_expires_in"]
def authenticate(self, username=None, password=None):
'''authenticate
'''
payload = {
"client_id": self.keycloak_client_config.client_id,
"grant_type": "password",
"username": username,
"password": password,
}
token_resp = requests.post(
self.keycloak_client_config.get_token_server_uri(), data=payload
)
token_resp_json = token_resp.json()
if token_resp.status_code != 200:
raise Exception("Authentication Failed")
self.auth_token = token_resp_json
def __str__(self):
return json.dumps(self.auth_token)
|
sciencebasepy
|
/sciencebasepy-2.0.13-py3-none-any.whl/sb3/auth.py
|
auth.py
|
import json
import requests
class KeycloakClientConfig:
"""Stores the Keycloak OIDC JSON with some properties pull out for convenience
"""
_token_server_uri = None
_auth_server_uri = None
def __init__(self, oidc_config):
"""
pass in either a dict from the JSON config or a a path to the JSON config
:param oidc_config: dict of the loaded JSON config
"""
self.oidc_config = oidc_config
self.client_id = oidc_config["resource"]
self._auth_server_uri = (
f"{oidc_config['auth-server-url']}/realms/"
f"{oidc_config['realm']}/protocol/openid-connect/auth"
)
self._token_server_uri = (
f"{oidc_config['auth-server-url']}/realms/"
f"{oidc_config['realm']}/protocol/openid-connect/token"
)
def get_token_server_uri(self):
'''get_token_server_uri
'''
return self._token_server_uri
def get_auth_server_uri(self):
'''get_auth_server_uri
'''
return self._auth_server_uri
class DirectAccessAuthenticator:
'''DirectAccessAuthenticator
Authenticates with the Keycloak server using Direct Access
'''
def __init__(self, keycloak_config):
if isinstance(keycloak_config, KeycloakClientConfig):
self.keycloak_client_config = keycloak_config
elif isinstance(keycloak_config, dict):
self.keycloak_client_config = KeycloakClientConfig(keycloak_config)
else:
raise ValueError(
"keycloak_config must be an instance of KeycloakClientConfig or dict of the config data"
)
self.auth_token = None
def get_access_token(self):
'''get_access_token
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_access_token"
)
return self.auth_token["access_token"]
def get_refresh_token(self):
'''get_refresh_token
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_refresh_token"
)
return self.auth_token["refresh_token"]
def get_token_expire(self):
'''get_token_expire
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_refresh_token"
)
return self.auth_token["expires_in"]
def get_refresh_token_expire(self):
'''get_refresh_token_expire
'''
if self.auth_token is None:
raise ValueError(
"auth_token is missing, authenticate() must run successfully "
"before calling get_refresh_token"
)
return self.auth_token["refresh_expires_in"]
def authenticate(self, username=None, password=None):
'''authenticate
'''
payload = {
"client_id": self.keycloak_client_config.client_id,
"grant_type": "password",
"username": username,
"password": password,
}
token_resp = requests.post(
self.keycloak_client_config.get_token_server_uri(), data=payload
)
token_resp_json = token_resp.json()
if token_resp.status_code != 200:
raise Exception("Authentication Failed")
self.auth_token = token_resp_json
def __str__(self):
return json.dumps(self.auth_token)
| 0.630571 | 0.061904 |
## Creative Commons Attribution 4.0 International
Creative Commons Attribution 4.0 International (CC BY 4.0) URL:
<http://creativecommons.org/licenses/by/4.0/>
Creative Commons Corporation (“Creative Commons”) is not a law firm and does not
provide legal services or legal advice. Distribution of Creative Commons public
licenses does not create a lawyer-client or other relationship. Creative Commons
makes its licenses and related information available on an “as-is” basis.
Creative Commons gives no warranties regarding its licenses, any material
licensed under their terms and conditions, or any related information. Creative
Commons disclaims all liability for damages resulting from their use to the
fullest extent possible.
**Using Creative Commons Public Licenses:** Creative Commons public licenses
provide a standard set of terms and conditions that creators and other rights
holders may use to share original works of authorship and other material subject
to copyright and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
**Considerations for licensors:** Our public licenses are intended for use by
those authorized to give the public permission to use material in ways otherwise
restricted by copyright and certain other rights. Our licenses are irrevocable.
Licensors should read and understand the terms and conditions of the license
they choose before applying it. Licensors should also secure all rights
necessary before applying our licenses so that the public can reuse the material
as expected. Licensors should clearly mark any material not subject to the
license. This includes other CC-licensed material, or material used under an
exception or limitation to copyright. More considerations for licensors.
**Considerations for the public:** By using one of our public licenses, a
licensor grants the public permission to use the licensed material under
specified terms and conditions. If the licensor’s permission is not necessary
for any reason–for example, because of any applicable exception or limitation to
copyright–then that use is not regulated by the license. Our licenses grant only
permissions under copyright and certain other rights that a licensor has
authority to grant. Use of the licensed material may still be restricted for
other reasons, including because others have copyright or other rights in the
material. A licensor may make special requests, such as asking that all changes
be marked or described. Although not required by our licenses, you are
encouraged to respect those requests where reasonable.
## Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree to be
bound by the terms and conditions of this Creative Commons Attribution 4.0
International Public License ("Public License"). To the extent this Public
License may be interpreted as a contract, You are granted the Licensed Rights in
consideration of Your acceptance of these terms and conditions, and the Licensor
grants You such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and conditions.
**Section 1 – Definitions.**
1. **Adapted Material** means material subject to Copyright and Similar Rights
that is derived from or based upon the Licensed Material and in which the
Licensed Material is translated, altered, arranged, transformed, or
otherwise modified in a manner requiring permission under the Copyright and
Similar Rights held by the Licensor. For purposes of this Public License,
where the Licensed Material is a musical work, performance, or sound
recording, Adapted Material is always produced where the Licensed Material
is synched in timed relation with a moving image.
2. **Adapter's License** means the license You apply to Your Copyright and
Similar Rights in Your contributions to Adapted Material in accordance with
the terms and conditions of this Public License.
3. **Copyright and Similar Rights** means copyright and/or similar rights
closely related to copyright including, without limitation, performance,
broadcast, sound recording, and Sui Generis Database Rights, without regard
to how the rights are labeled or categorized. For purposes of this Public
License, the rights specified in
Section [2(b)(1)-(2)](https://creativecommons.org/licenses/by/4.0/legalcode#s2b) are
not Copyright and Similar Rights.
4. **Effective Technological Measures** means those measures that, in the
absence of proper authority, may not be circumvented under laws fulfilling
obligations under Article 11 of the WIPO Copyright Treaty adopted on
December 20, 1996, and/or similar international agreements.
5. **Exceptions and Limitations** means fair use, fair dealing, and/or any
other exception or limitation to Copyright and Similar Rights that applies
to Your use of the Licensed Material.
6. **Licensed Material** means the artistic or literary work, database, or
other material to which the Licensor applied this Public License.
7. **Licensed Rights** means the rights granted to You subject to the terms and
conditions of this Public License, which are limited to all Copyright and
Similar Rights that apply to Your use of the Licensed Material and that the
Licensor has authority to license.
8. **Licensor** means the individual(s) or entity(ies) granting rights under
this Public License.
9. **Share** means to provide material to the public by any means or process
that requires permission under the Licensed Rights, such as reproduction,
public display, public performance, distribution, dissemination,
communication, or importation, and to make material available to the public
including in ways that members of the public may access the material from a
place and at a time individually chosen by them.
10. **Sui Generis Database Rights** means rights other than copyright resulting
from Directive 96/9/EC of the European Parliament and of the Council of 11
March 1996 on the legal protection of databases, as amended and/or
succeeded, as well as other essentially equivalent rights anywhere in the
world.
11. **You** means the individual or entity exercising the Licensed Rights under
this Public License. **Your** has a corresponding meaning.
**Section 2 – Scope.**
1. **License grant**.
1. Subject to the terms and conditions of this Public License, the Licensor
hereby grants You a worldwide, royalty-free, non-sublicensable,
non-exclusive, irrevocable license to exercise the Licensed Rights in
the Licensed Material to:
1. reproduce and Share the Licensed Material, in whole or in part; and
2. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions
and Limitations apply to Your use, this Public License does not apply,
and You do not need to comply with its terms and conditions.
3. Term. The term of this Public License is specified in
Section [6(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s6a).
4. Media and formats; technical modifications allowed. The Licensor
authorizes You to exercise the Licensed Rights in all media and formats
whether now known or hereafter created, and to make technical
modifications necessary to do so. The Licensor waives and/or agrees not
to assert any right or authority to forbid You from making technical
modifications necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective Technological
Measures. For purposes of this Public License, simply making
modifications authorized by this
Section [2(a)(4)](https://creativecommons.org/licenses/by/4.0/legalcode#s2a4) never
produces Adapted Material.
5. Downstream recipients.
1. Offer from the Licensor – Licensed Material. Every recipient of the
Licensed Material automatically receives an offer from the Licensor
to exercise the Licensed Rights under the terms and conditions of
this Public License.
2. No downstream restrictions. You may not offer or impose any
additional or different terms or conditions on, or apply any
Effective Technological Measures to, the Licensed Material if doing
so restricts exercise of the Licensed Rights by any recipient of the
Licensed Material.
6. No endorsement. Nothing in this Public License constitutes or may be
construed as permission to assert or imply that You are, or that Your
use of the Licensed Material is, connected with, or sponsored, endorsed,
or granted official status by, the Licensor or others designated to
receive attribution as provided in
Section [3(a)(1)(A)(i)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1Ai).
2. **Other rights**.
1. Moral rights, such as the right of integrity, are not licensed under
this Public License, nor are publicity, privacy, and/or other similar
personality rights; however, to the extent possible, the Licensor waives
and/or agrees not to assert any such rights held by the Licensor to the
limited extent necessary to allow You to exercise the Licensed Rights,
but not otherwise.
2. Patent and trademark rights are not licensed under this Public License.
3. To the extent possible, the Licensor waives any right to collect
royalties from You for the exercise of the Licensed Rights, whether
directly or through a collecting society under any voluntary or waivable
statutory or compulsory licensing scheme. In all other cases the
Licensor expressly reserves any right to collect such royalties.
**Section 3 – License Conditions.**
Your exercise of the Licensed Rights is expressly made subject to the following
conditions.
1. **Attribution**.
1. If You Share the Licensed Material (including in modified form), You
must:
1. retain the following if it is supplied by the Licensor with the
Licensed Material:
1. identification of the creator(s) of the Licensed Material and
any others designated to receive attribution, in any reasonable
manner requested by the Licensor (including by pseudonym if
designated);
2. a copyright notice;
3. a notice that refers to this Public License;
4. a notice that refers to the disclaimer of warranties;
5. a URI or hyperlink to the Licensed Material to the extent
reasonably practicable;
2. indicate if You modified the Licensed Material and retain an
indication of any previous modifications; and
3. indicate the Licensed Material is licensed under this Public
License, and include the text of, or the URI or hyperlink to, this
Public License.
2. You may satisfy the conditions in
Section [3(a)(1)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1) in
any reasonable manner based on the medium, means, and context in which
You Share the Licensed Material. For example, it may be reasonable to
satisfy the conditions by providing a URI or hyperlink to a resource
that includes the required information.
3. If requested by the Licensor, You must remove any of the information
required by
Section [3(a)(1)(A)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1A) to
the extent reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's License You
apply must not prevent recipients of the Adapted Material from complying
with this Public License.
**Section 4 – Sui Generis Database Rights.**
> Where the Licensed Rights include Sui Generis Database Rights that apply to
> Your use of the Licensed Material:
1. for the avoidance of doubt,
Section [2(a)(1)](https://creativecommons.org/licenses/by/4.0/legalcode#s2a1) grants
You the right to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
2. if You include all or a substantial portion of the database contents in a
database in which You have Sui Generis Database Rights, then the database in
which You have Sui Generis Database Rights (but not its individual contents)
is Adapted Material; and
3. You must comply with the conditions in
Section [3(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a) if
You Share all or a substantial portion of the contents of the database.
For the avoidance of doubt, this
Section [4](https://creativecommons.org/licenses/by/4.0/legalcode#s4) supplements
and does not replace Your obligations under this Public License where the
Licensed Rights include other Copyright and Similar Rights.
**Section 5 – Disclaimer of Warranties and Limitation of Liability.**
1. **Unless otherwise separately undertaken by the Licensor, to the extent
possible, the Licensor offers the Licensed Material as-is and as-available,
and makes no representations or warranties of any kind concerning the
Licensed Material, whether express, implied, statutory, or other. This
includes, without limitation, warranties of title, merchantability, fitness
for a particular purpose, non-infringement, absence of latent or other
defects, accuracy, or the presence or absence of errors, whether or not
known or discoverable. Where disclaimers of warranties are not allowed in
full or in part, this disclaimer may not apply to You.**
2. **To the extent possible, in no event will the Licensor be liable to You on
any legal theory (including, without limitation, negligence) or otherwise
for any direct, special, indirect, incidental, consequential, punitive,
exemplary, or other losses, costs, expenses, or damages arising out of this
Public License or use of the Licensed Material, even if the Licensor has
been advised of the possibility of such losses, costs, expenses, or damages.
Where a limitation of liability is not allowed in full or in part, this
limitation may not apply to You.**
3. The disclaimer of warranties and limitation of liability provided above
shall be interpreted in a manner that, to the extent possible, most closely
approximates an absolute disclaimer and waiver of all liability.
**Section 6 – Term and Termination.**
1. This Public License applies for the term of the Copyright and Similar Rights
licensed here. However, if You fail to comply with this Public License, then
Your rights under this Public License terminate automatically.
2. Where Your right to use the Licensed Material has terminated under
Section [6(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s6a),
it reinstates:
1. automatically as of the date the violation is cured, provided it is
cured within 30 days of Your discovery of the violation; or
2. upon express reinstatement by the Licensor.
> For the avoidance of doubt, this
> Section [6(b)](https://creativecommons.org/licenses/by/4.0/legalcode#s6b) does
> not affect any right the Licensor may have to seek remedies for Your
> violations of this Public License.
1. For the avoidance of doubt, the Licensor may also offer the Licensed
Material under separate terms or conditions or stop distributing the
Licensed Material at any time; however, doing so will not terminate this
Public License.
2. Sections [1](https://creativecommons.org/licenses/by/4.0/legalcode#s1), [5](https://creativecommons.org/licenses/by/4.0/legalcode#s5), [6](https://creativecommons.org/licenses/by/4.0/legalcode#s6), [7](https://creativecommons.org/licenses/by/4.0/legalcode#s7),
and [8](https://creativecommons.org/licenses/by/4.0/legalcode#s8) survive
termination of this Public License.
**Section 7 – Other Terms and Conditions.**
1. The Licensor shall not be bound by any additional or different terms or
conditions communicated by You unless expressly agreed.
2. Any arrangements, understandings, or agreements regarding the Licensed
Material not stated herein are separate from and independent of the terms
and conditions of this Public License.
**Section 8 – Interpretation.**
1. For the avoidance of doubt, this Public License does not, and shall not be
interpreted to, reduce, limit, restrict, or impose conditions on any use of
the Licensed Material that could lawfully be made without permission under
this Public License.
2. To the extent possible, if any provision of this Public License is deemed
unenforceable, it shall be automatically reformed to the minimum extent
necessary to make it enforceable. If the provision cannot be reformed, it
shall be severed from this Public License without affecting the
enforceability of the remaining terms and conditions.
3. No term or condition of this Public License will be waived and no failure to
comply consented to unless expressly agreed to by the Licensor.
4. Nothing in this Public License constitutes or may be interpreted as a
limitation upon, or waiver of, any privileges and immunities that apply to
the Licensor or You, including from the legal processes of any jurisdiction
or authority.
Creative Commons is not a party to its public licenses. Notwithstanding,
Creative Commons may elect to apply one of its public licenses to material it
publishes and in those instances will be considered the “Licensor.” The text of
the Creative Commons public licenses is dedicated to the public domain under the
CC0 Public Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as otherwise
permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the use of the
trademark “Creative Commons” or any other trademark or logo of Creative Commons
without its prior written consent including, without limitation, in connection
with any unauthorized modifications to any of its public licenses or any other
arrangements, understandings, or agreements concerning use of licensed material.
For the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.
|
sciencebasepy
|
/sciencebasepy-2.0.13-py3-none-any.whl/sciencebasepy-2.0.13.dist-info/LICENSE.md
|
LICENSE.md
|
## Creative Commons Attribution 4.0 International
Creative Commons Attribution 4.0 International (CC BY 4.0) URL:
<http://creativecommons.org/licenses/by/4.0/>
Creative Commons Corporation (“Creative Commons”) is not a law firm and does not
provide legal services or legal advice. Distribution of Creative Commons public
licenses does not create a lawyer-client or other relationship. Creative Commons
makes its licenses and related information available on an “as-is” basis.
Creative Commons gives no warranties regarding its licenses, any material
licensed under their terms and conditions, or any related information. Creative
Commons disclaims all liability for damages resulting from their use to the
fullest extent possible.
**Using Creative Commons Public Licenses:** Creative Commons public licenses
provide a standard set of terms and conditions that creators and other rights
holders may use to share original works of authorship and other material subject
to copyright and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
**Considerations for licensors:** Our public licenses are intended for use by
those authorized to give the public permission to use material in ways otherwise
restricted by copyright and certain other rights. Our licenses are irrevocable.
Licensors should read and understand the terms and conditions of the license
they choose before applying it. Licensors should also secure all rights
necessary before applying our licenses so that the public can reuse the material
as expected. Licensors should clearly mark any material not subject to the
license. This includes other CC-licensed material, or material used under an
exception or limitation to copyright. More considerations for licensors.
**Considerations for the public:** By using one of our public licenses, a
licensor grants the public permission to use the licensed material under
specified terms and conditions. If the licensor’s permission is not necessary
for any reason–for example, because of any applicable exception or limitation to
copyright–then that use is not regulated by the license. Our licenses grant only
permissions under copyright and certain other rights that a licensor has
authority to grant. Use of the licensed material may still be restricted for
other reasons, including because others have copyright or other rights in the
material. A licensor may make special requests, such as asking that all changes
be marked or described. Although not required by our licenses, you are
encouraged to respect those requests where reasonable.
## Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree to be
bound by the terms and conditions of this Creative Commons Attribution 4.0
International Public License ("Public License"). To the extent this Public
License may be interpreted as a contract, You are granted the Licensed Rights in
consideration of Your acceptance of these terms and conditions, and the Licensor
grants You such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and conditions.
**Section 1 – Definitions.**
1. **Adapted Material** means material subject to Copyright and Similar Rights
that is derived from or based upon the Licensed Material and in which the
Licensed Material is translated, altered, arranged, transformed, or
otherwise modified in a manner requiring permission under the Copyright and
Similar Rights held by the Licensor. For purposes of this Public License,
where the Licensed Material is a musical work, performance, or sound
recording, Adapted Material is always produced where the Licensed Material
is synched in timed relation with a moving image.
2. **Adapter's License** means the license You apply to Your Copyright and
Similar Rights in Your contributions to Adapted Material in accordance with
the terms and conditions of this Public License.
3. **Copyright and Similar Rights** means copyright and/or similar rights
closely related to copyright including, without limitation, performance,
broadcast, sound recording, and Sui Generis Database Rights, without regard
to how the rights are labeled or categorized. For purposes of this Public
License, the rights specified in
Section [2(b)(1)-(2)](https://creativecommons.org/licenses/by/4.0/legalcode#s2b) are
not Copyright and Similar Rights.
4. **Effective Technological Measures** means those measures that, in the
absence of proper authority, may not be circumvented under laws fulfilling
obligations under Article 11 of the WIPO Copyright Treaty adopted on
December 20, 1996, and/or similar international agreements.
5. **Exceptions and Limitations** means fair use, fair dealing, and/or any
other exception or limitation to Copyright and Similar Rights that applies
to Your use of the Licensed Material.
6. **Licensed Material** means the artistic or literary work, database, or
other material to which the Licensor applied this Public License.
7. **Licensed Rights** means the rights granted to You subject to the terms and
conditions of this Public License, which are limited to all Copyright and
Similar Rights that apply to Your use of the Licensed Material and that the
Licensor has authority to license.
8. **Licensor** means the individual(s) or entity(ies) granting rights under
this Public License.
9. **Share** means to provide material to the public by any means or process
that requires permission under the Licensed Rights, such as reproduction,
public display, public performance, distribution, dissemination,
communication, or importation, and to make material available to the public
including in ways that members of the public may access the material from a
place and at a time individually chosen by them.
10. **Sui Generis Database Rights** means rights other than copyright resulting
from Directive 96/9/EC of the European Parliament and of the Council of 11
March 1996 on the legal protection of databases, as amended and/or
succeeded, as well as other essentially equivalent rights anywhere in the
world.
11. **You** means the individual or entity exercising the Licensed Rights under
this Public License. **Your** has a corresponding meaning.
**Section 2 – Scope.**
1. **License grant**.
1. Subject to the terms and conditions of this Public License, the Licensor
hereby grants You a worldwide, royalty-free, non-sublicensable,
non-exclusive, irrevocable license to exercise the Licensed Rights in
the Licensed Material to:
1. reproduce and Share the Licensed Material, in whole or in part; and
2. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions
and Limitations apply to Your use, this Public License does not apply,
and You do not need to comply with its terms and conditions.
3. Term. The term of this Public License is specified in
Section [6(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s6a).
4. Media and formats; technical modifications allowed. The Licensor
authorizes You to exercise the Licensed Rights in all media and formats
whether now known or hereafter created, and to make technical
modifications necessary to do so. The Licensor waives and/or agrees not
to assert any right or authority to forbid You from making technical
modifications necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective Technological
Measures. For purposes of this Public License, simply making
modifications authorized by this
Section [2(a)(4)](https://creativecommons.org/licenses/by/4.0/legalcode#s2a4) never
produces Adapted Material.
5. Downstream recipients.
1. Offer from the Licensor – Licensed Material. Every recipient of the
Licensed Material automatically receives an offer from the Licensor
to exercise the Licensed Rights under the terms and conditions of
this Public License.
2. No downstream restrictions. You may not offer or impose any
additional or different terms or conditions on, or apply any
Effective Technological Measures to, the Licensed Material if doing
so restricts exercise of the Licensed Rights by any recipient of the
Licensed Material.
6. No endorsement. Nothing in this Public License constitutes or may be
construed as permission to assert or imply that You are, or that Your
use of the Licensed Material is, connected with, or sponsored, endorsed,
or granted official status by, the Licensor or others designated to
receive attribution as provided in
Section [3(a)(1)(A)(i)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1Ai).
2. **Other rights**.
1. Moral rights, such as the right of integrity, are not licensed under
this Public License, nor are publicity, privacy, and/or other similar
personality rights; however, to the extent possible, the Licensor waives
and/or agrees not to assert any such rights held by the Licensor to the
limited extent necessary to allow You to exercise the Licensed Rights,
but not otherwise.
2. Patent and trademark rights are not licensed under this Public License.
3. To the extent possible, the Licensor waives any right to collect
royalties from You for the exercise of the Licensed Rights, whether
directly or through a collecting society under any voluntary or waivable
statutory or compulsory licensing scheme. In all other cases the
Licensor expressly reserves any right to collect such royalties.
**Section 3 – License Conditions.**
Your exercise of the Licensed Rights is expressly made subject to the following
conditions.
1. **Attribution**.
1. If You Share the Licensed Material (including in modified form), You
must:
1. retain the following if it is supplied by the Licensor with the
Licensed Material:
1. identification of the creator(s) of the Licensed Material and
any others designated to receive attribution, in any reasonable
manner requested by the Licensor (including by pseudonym if
designated);
2. a copyright notice;
3. a notice that refers to this Public License;
4. a notice that refers to the disclaimer of warranties;
5. a URI or hyperlink to the Licensed Material to the extent
reasonably practicable;
2. indicate if You modified the Licensed Material and retain an
indication of any previous modifications; and
3. indicate the Licensed Material is licensed under this Public
License, and include the text of, or the URI or hyperlink to, this
Public License.
2. You may satisfy the conditions in
Section [3(a)(1)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1) in
any reasonable manner based on the medium, means, and context in which
You Share the Licensed Material. For example, it may be reasonable to
satisfy the conditions by providing a URI or hyperlink to a resource
that includes the required information.
3. If requested by the Licensor, You must remove any of the information
required by
Section [3(a)(1)(A)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1A) to
the extent reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's License You
apply must not prevent recipients of the Adapted Material from complying
with this Public License.
**Section 4 – Sui Generis Database Rights.**
> Where the Licensed Rights include Sui Generis Database Rights that apply to
> Your use of the Licensed Material:
1. for the avoidance of doubt,
Section [2(a)(1)](https://creativecommons.org/licenses/by/4.0/legalcode#s2a1) grants
You the right to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
2. if You include all or a substantial portion of the database contents in a
database in which You have Sui Generis Database Rights, then the database in
which You have Sui Generis Database Rights (but not its individual contents)
is Adapted Material; and
3. You must comply with the conditions in
Section [3(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a) if
You Share all or a substantial portion of the contents of the database.
For the avoidance of doubt, this
Section [4](https://creativecommons.org/licenses/by/4.0/legalcode#s4) supplements
and does not replace Your obligations under this Public License where the
Licensed Rights include other Copyright and Similar Rights.
**Section 5 – Disclaimer of Warranties and Limitation of Liability.**
1. **Unless otherwise separately undertaken by the Licensor, to the extent
possible, the Licensor offers the Licensed Material as-is and as-available,
and makes no representations or warranties of any kind concerning the
Licensed Material, whether express, implied, statutory, or other. This
includes, without limitation, warranties of title, merchantability, fitness
for a particular purpose, non-infringement, absence of latent or other
defects, accuracy, or the presence or absence of errors, whether or not
known or discoverable. Where disclaimers of warranties are not allowed in
full or in part, this disclaimer may not apply to You.**
2. **To the extent possible, in no event will the Licensor be liable to You on
any legal theory (including, without limitation, negligence) or otherwise
for any direct, special, indirect, incidental, consequential, punitive,
exemplary, or other losses, costs, expenses, or damages arising out of this
Public License or use of the Licensed Material, even if the Licensor has
been advised of the possibility of such losses, costs, expenses, or damages.
Where a limitation of liability is not allowed in full or in part, this
limitation may not apply to You.**
3. The disclaimer of warranties and limitation of liability provided above
shall be interpreted in a manner that, to the extent possible, most closely
approximates an absolute disclaimer and waiver of all liability.
**Section 6 – Term and Termination.**
1. This Public License applies for the term of the Copyright and Similar Rights
licensed here. However, if You fail to comply with this Public License, then
Your rights under this Public License terminate automatically.
2. Where Your right to use the Licensed Material has terminated under
Section [6(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s6a),
it reinstates:
1. automatically as of the date the violation is cured, provided it is
cured within 30 days of Your discovery of the violation; or
2. upon express reinstatement by the Licensor.
> For the avoidance of doubt, this
> Section [6(b)](https://creativecommons.org/licenses/by/4.0/legalcode#s6b) does
> not affect any right the Licensor may have to seek remedies for Your
> violations of this Public License.
1. For the avoidance of doubt, the Licensor may also offer the Licensed
Material under separate terms or conditions or stop distributing the
Licensed Material at any time; however, doing so will not terminate this
Public License.
2. Sections [1](https://creativecommons.org/licenses/by/4.0/legalcode#s1), [5](https://creativecommons.org/licenses/by/4.0/legalcode#s5), [6](https://creativecommons.org/licenses/by/4.0/legalcode#s6), [7](https://creativecommons.org/licenses/by/4.0/legalcode#s7),
and [8](https://creativecommons.org/licenses/by/4.0/legalcode#s8) survive
termination of this Public License.
**Section 7 – Other Terms and Conditions.**
1. The Licensor shall not be bound by any additional or different terms or
conditions communicated by You unless expressly agreed.
2. Any arrangements, understandings, or agreements regarding the Licensed
Material not stated herein are separate from and independent of the terms
and conditions of this Public License.
**Section 8 – Interpretation.**
1. For the avoidance of doubt, this Public License does not, and shall not be
interpreted to, reduce, limit, restrict, or impose conditions on any use of
the Licensed Material that could lawfully be made without permission under
this Public License.
2. To the extent possible, if any provision of this Public License is deemed
unenforceable, it shall be automatically reformed to the minimum extent
necessary to make it enforceable. If the provision cannot be reformed, it
shall be severed from this Public License without affecting the
enforceability of the remaining terms and conditions.
3. No term or condition of this Public License will be waived and no failure to
comply consented to unless expressly agreed to by the Licensor.
4. Nothing in this Public License constitutes or may be interpreted as a
limitation upon, or waiver of, any privileges and immunities that apply to
the Licensor or You, including from the legal processes of any jurisdiction
or authority.
Creative Commons is not a party to its public licenses. Notwithstanding,
Creative Commons may elect to apply one of its public licenses to material it
publishes and in those instances will be considered the “Licensor.” The text of
the Creative Commons public licenses is dedicated to the public domain under the
CC0 Public Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as otherwise
permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the use of the
trademark “Creative Commons” or any other trademark or logo of Creative Commons
without its prior written consent including, without limitation, in connection
with any unauthorized modifications to any of its public licenses or any other
arrangements, understandings, or agreements concerning use of licensed material.
For the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.
| 0.692018 | 0.78611 |
# ScienceBeam Alignment
[](LICENSE)
ScienceBeam Alignment provides generic low-level sequence alignment utility functions, similar to Python's [SequenceMatcher](https://docs.python.org/3/library/difflib.html).
This project is currently mainly used for training data generation, related to the [ScienceBeam project](https://github.com/elifesciences/sciencebeam). Although this project itself has no ScienceBeam dependency and can be considered a standalone sequence alignment library. It is however more targeted at document size sequences rather than massive gene sequences.
## Pre-requisites
- Python 2 or 3
## API
### SequenceMatcher
The mostly drop-in replacement of Python's [SequenceMatcher](https://docs.python.org/3/library/difflib.html)
is provided by [fuzzywuzzy](https://github.com/seatgeek/fuzzywuzzy)'s [StringMatcher](https://github.com/seatgeek/fuzzywuzzy/blob/master/fuzzywuzzy/StringMatcher.py).
In that respect, `sciencebeam-alignment` merely provides a wrapper with fallback.
### WordSequenceMatcher
A wrapper around the aforementioned `SequenceMatcher`, but matching on word level tokens only.
It currently only implements `get_matching_blocks`.
The main advantage is that it is much faster for long texts, because it won't have to match individual characters. It isn't recommended for short texts, where character level alignment is probably more desirable.
example match results:
```python
>>> from sciencebeam_alignment.word_sequence_matcher import (
... WordSequenceMatcher
... )
>>> WordSequenceMatcher(a='word1', b='word2').get_matching_blocks()
[]
>>> WordSequenceMatcher(a='a word1 b', b='x word1 y').get_matching_blocks()
[(2, 2, 5)]
```
### GlobalSequenceMatcher and LocalSequenceMatcher
The [GlobalSequenceMatcher and LocalSequenceMatcher](https://github.com/elifesciences/sciencebeam-alignment/blob/develop/sciencebeam_alignment/align.py) implements the [Needleman-Wunsch](https://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm) [global alignment](https://en.wikipedia.org/wiki/Sequence_alignment#Global_and_local_alignments) as well as the [Smith-Waterman](https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm) local alignment algorithms. The implementation is somewhat inspired by [python-alignment](https://github.com/eseraygun/python-alignment).
It does implement `get_matching_blocks` to match Python's [SequenceMatcher](https://docs.python.org/3/library/difflib.html).
By passing in a scoring object, the results can be influenced (e.g. gaps can be penalized more than mismatches).
It does also provide an optimized implementation using [Cython](https://cython.org/). The level of optimization depends on the type of passed in sequences and scoring. The fastest being with integer sequences and simple scoring. Especially with longer sequences, the potential speed ups can be significant.
```python
>>> from sciencebeam_alignment.align import LocalSequenceMatcher, SimpleScoring
>>> DEFAULT_SCORING = SimpleScoring(match_score=3, mismatch_score=-1, gap_score=-2)
>>> LocalSequenceMatcher(a='a word1 b', b='x word2 y', scoring=DEFAULT_SCORING).get_matching_blocks()
[(1, 1, 5), (7, 7, 1), (9, 9, 0)]
```
In addition, the `get_multiple_matching_blocks` can be used to retrieve multiple matching blocks with the same score:
```python
>>> from sciencebeam_alignment.align import GlobalSequenceMatcher, SimpleScoring
>>> DEFAULT_SCORING = SimpleScoring(match_score=3, mismatch_score=-1, gap_score=-2)
>>> matcher = GlobalSequenceMatcher(a='xyzabc', b='abcxyz', scoring=DEFAULT_SCORING)
>>> list(matcher.get_multiple_matching_blocks(limit=2))
[[(3, 0, 3)], [(0, 3, 3)]]
```
`get_multiple_matching_blocks` returns a generator. The number of variations can be limited using the `limit` argument or by simply stopping early.
The `GlobalSequenceMatcher` can also be used to calculate the [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance) (or _edit distance_). An example is provided in `sciencebeam_alignment.levenshtein`:
```python
>>> from sciencebeam_alignment.levenshtein import get_levenshtein_distance
>>> get_levenshtein_distance('kitten', 'sitting')
3
>>> from sciencebeam_alignment.levenshtein import get_levenshtein_ratio
>>> get_levenshtein_ratio('kitten', 'sitting')
0.5714285714285714
```
Calculating the levenshtein distance is mainly provided as an example. You might want to consider using [python-Levenshtein](https://github.com/ztane/python-Levenshtein).
To check whether the fast implementation is enabled:
```python
>>> from sciencebeam_alignment.align import native_enabled
>>> native_enabled
True
```
## Development
Development can be done either using Docker (default) or a virtual environment.
All commands are available via `make`.
### Development using Docker
Build and run tests:
```bash
make build test
```
Or intended for CI:
```bash
make ci-build-and-test
```
### Development using a virtual environment
`make` targets with the `dev-` prefix are intended for the use with the virtual environment.
This requires that you already have Python installed.
#### Setup (virtual environment)
```bash
make dev-venv
```
To update the dependencies:
```bash
make dev-install
```
#### Cython (virtual environment)
Compile code using Cython:
```bash
make dev-cython-clean dev-cython-compile
```
#### Tests (virtual environment)
```base
make dev-test
```
Or:
```base
make dev-watch
```
|
sciencebeam-alignment
|
/sciencebeam_alignment-0.0.5.tar.gz/sciencebeam_alignment-0.0.5/README.md
|
README.md
|
>>> from sciencebeam_alignment.word_sequence_matcher import (
... WordSequenceMatcher
... )
>>> WordSequenceMatcher(a='word1', b='word2').get_matching_blocks()
[]
>>> WordSequenceMatcher(a='a word1 b', b='x word1 y').get_matching_blocks()
[(2, 2, 5)]
>>> from sciencebeam_alignment.align import LocalSequenceMatcher, SimpleScoring
>>> DEFAULT_SCORING = SimpleScoring(match_score=3, mismatch_score=-1, gap_score=-2)
>>> LocalSequenceMatcher(a='a word1 b', b='x word2 y', scoring=DEFAULT_SCORING).get_matching_blocks()
[(1, 1, 5), (7, 7, 1), (9, 9, 0)]
>>> from sciencebeam_alignment.align import GlobalSequenceMatcher, SimpleScoring
>>> DEFAULT_SCORING = SimpleScoring(match_score=3, mismatch_score=-1, gap_score=-2)
>>> matcher = GlobalSequenceMatcher(a='xyzabc', b='abcxyz', scoring=DEFAULT_SCORING)
>>> list(matcher.get_multiple_matching_blocks(limit=2))
[[(3, 0, 3)], [(0, 3, 3)]]
>>> from sciencebeam_alignment.levenshtein import get_levenshtein_distance
>>> get_levenshtein_distance('kitten', 'sitting')
3
>>> from sciencebeam_alignment.levenshtein import get_levenshtein_ratio
>>> get_levenshtein_ratio('kitten', 'sitting')
0.5714285714285714
>>> from sciencebeam_alignment.align import native_enabled
>>> native_enabled
True
make build test
make ci-build-and-test
make dev-venv
make dev-install
make dev-cython-clean dev-cython-compile
make dev-test
make dev-watch
| 0.635222 | 0.988256 |
from __future__ import absolute_import, print_function
import logging
import timeit
import numpy as np
from sciencebeam_alignment.align import (
SimpleScoring,
CustomScoring,
LocalSequenceMatcher,
require_native
)
DEFAULT_MATCH_SCORE = 2
DEFAULT_MISMATCH_SCORE = -1
DEFAULT_GAP_SCORE = -3
DEFAULT_SCORING = SimpleScoring(
DEFAULT_MATCH_SCORE, DEFAULT_MISMATCH_SCORE, DEFAULT_GAP_SCORE
)
CUSTOM_SCORING = CustomScoring(
lambda a, b: DEFAULT_MATCH_SCORE if a == b else DEFAULT_MISMATCH_SCORE,
DEFAULT_GAP_SCORE
)
SHORT_STRING1 = 'abc'
SHORT_STRING2 = 'def'
LONG_STRING1 = 'abcefghijk' * 100
LONG_STRING2 = ''.join(list(reversed(LONG_STRING1)))
def encode_str(s):
return np.array([int(ord(x)) for x in s], dtype=np.int32)
LONG_ENCODED1 = encode_str(LONG_STRING1)
LONG_ENCODED2 = encode_str(LONG_STRING2)
def test_align_with_scoring_fn_py():
with require_native(False):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, CUSTOM_SCORING).get_matching_blocks()
def test_align_with_scoring_fn():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, CUSTOM_SCORING).get_matching_blocks()
def test_align_with_simple_scoring():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, DEFAULT_SCORING).get_matching_blocks()
def test_align_with_simple_scoring_int():
with require_native(True):
LocalSequenceMatcher(LONG_ENCODED1, LONG_ENCODED2, DEFAULT_SCORING).get_matching_blocks()
def test_align_with_simple_scoring_str():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, DEFAULT_SCORING).get_matching_blocks()
def report_timing(fn, number=1):
timeit_result_ms = timeit.timeit(
fn + "()",
setup="from __main__ import " + fn,
number=number
) * 1000
print("{} ({}x):\n{:f} ms / it ({:f} ms total)\n".format(
fn,
number,
timeit_result_ms / number,
timeit_result_ms
))
def main():
print("len LONG_STRING1: {}\n".format(len(LONG_STRING1)))
print("len LONG_ENCODED1: {}\n".format(len(LONG_ENCODED1)))
report_timing("test_align_with_scoring_fn_py")
report_timing("test_align_with_scoring_fn", 3)
report_timing("test_align_with_simple_scoring", 3)
report_timing("test_align_with_simple_scoring_int", 3)
report_timing("test_align_with_simple_scoring_str", 3)
if __name__ == "__main__":
logging.basicConfig(level='INFO')
main()
|
sciencebeam-alignment
|
/sciencebeam_alignment-0.0.5.tar.gz/sciencebeam_alignment-0.0.5/sciencebeam_alignment/align_performance.py
|
align_performance.py
|
from __future__ import absolute_import, print_function
import logging
import timeit
import numpy as np
from sciencebeam_alignment.align import (
SimpleScoring,
CustomScoring,
LocalSequenceMatcher,
require_native
)
DEFAULT_MATCH_SCORE = 2
DEFAULT_MISMATCH_SCORE = -1
DEFAULT_GAP_SCORE = -3
DEFAULT_SCORING = SimpleScoring(
DEFAULT_MATCH_SCORE, DEFAULT_MISMATCH_SCORE, DEFAULT_GAP_SCORE
)
CUSTOM_SCORING = CustomScoring(
lambda a, b: DEFAULT_MATCH_SCORE if a == b else DEFAULT_MISMATCH_SCORE,
DEFAULT_GAP_SCORE
)
SHORT_STRING1 = 'abc'
SHORT_STRING2 = 'def'
LONG_STRING1 = 'abcefghijk' * 100
LONG_STRING2 = ''.join(list(reversed(LONG_STRING1)))
def encode_str(s):
return np.array([int(ord(x)) for x in s], dtype=np.int32)
LONG_ENCODED1 = encode_str(LONG_STRING1)
LONG_ENCODED2 = encode_str(LONG_STRING2)
def test_align_with_scoring_fn_py():
with require_native(False):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, CUSTOM_SCORING).get_matching_blocks()
def test_align_with_scoring_fn():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, CUSTOM_SCORING).get_matching_blocks()
def test_align_with_simple_scoring():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, DEFAULT_SCORING).get_matching_blocks()
def test_align_with_simple_scoring_int():
with require_native(True):
LocalSequenceMatcher(LONG_ENCODED1, LONG_ENCODED2, DEFAULT_SCORING).get_matching_blocks()
def test_align_with_simple_scoring_str():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, DEFAULT_SCORING).get_matching_blocks()
def report_timing(fn, number=1):
timeit_result_ms = timeit.timeit(
fn + "()",
setup="from __main__ import " + fn,
number=number
) * 1000
print("{} ({}x):\n{:f} ms / it ({:f} ms total)\n".format(
fn,
number,
timeit_result_ms / number,
timeit_result_ms
))
def main():
print("len LONG_STRING1: {}\n".format(len(LONG_STRING1)))
print("len LONG_ENCODED1: {}\n".format(len(LONG_ENCODED1)))
report_timing("test_align_with_scoring_fn_py")
report_timing("test_align_with_scoring_fn", 3)
report_timing("test_align_with_simple_scoring", 3)
report_timing("test_align_with_simple_scoring_int", 3)
report_timing("test_align_with_simple_scoring_str", 3)
if __name__ == "__main__":
logging.basicConfig(level='INFO')
main()
| 0.577376 | 0.210644 |
import logging
import warnings
from collections import deque
from itertools import islice
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import numpy as np
from six import (
with_metaclass,
string_types,
binary_type
)
try:
from sciencebeam_alignment.align_fast_utils import ( # pylint: disable=no-name-in-module
native_compute_inner_alignment_matrix_simple_scoring_int,
native_compute_inner_alignment_matrix_simple_scoring_any,
native_compute_inner_alignment_matrix_scoring_fn_any,
native_alignment_matrix_single_path_traceback
)
native_enabled = True
except Exception as e: # pylint: disable=broad-except
warnings.warn('fast implementation not available due to: %s' % e)
native_enabled = False
MIN_INT = -2147483647
def get_logger():
return logging.getLogger(__name__)
@contextmanager
def require_native(required=True):
global native_enabled # pylint: disable=W0603
was_enabled = native_enabled
native_enabled = required
yield
native_enabled = was_enabled
def _is_array_of_type(a, dtype):
return np.issubdtype(a.dtype, dtype)
# pylint: disable=too-many-arguments
def compute_inner_alignment_matrix_simple_scoring_py(
scoring_matrix, a, b, match_score, mismatch_score, gap_score, min_score):
"""Pure python fallback implementation.
Calculates the inner alignment matrix for a and b using simple scoring parameters:
match_score, mismatch_score, gap_score, min_score
Arguments:
scoring_matrix {matrix} -- Output matrix (1 + len(a), 1 + len(b))
a {sequence} -- First sequence (string or list)
b {sequence} -- Second sequence (string or list)
match_score {int} -- Score for a match
mismatch_score {int} -- Score for a mismatch
gap_score {int} -- Score for a gap (increase to peanilise gaps between matches)
min_score {int} -- Minimum score (e.g. zero if scores shouldn't be allowed to go negative)
"""
m = len(a) + 1
n = len(b) + 1
for i in range(1, m):
for j in range(1, n):
scoring_matrix[i, j] = max(
min_score,
# Match elements.
scoring_matrix[i - 1, j - 1] +
(match_score if a[i - 1] == b[j - 1] else mismatch_score),
# Gap on sequenceA.
scoring_matrix[i, j - 1] + gap_score,
# Gap on sequenceB.
scoring_matrix[i - 1, j] + gap_score
)
def compute_inner_alignment_matrix_scoring_fn_py(
scoring_matrix, a, b, scoring_fn, gap_score, min_score):
"""Pure python fallback implementation.
Same as compute_inner_alignment_matrix_simple_scoring_py but uses a function
to calculate match / mismatch (may be slower but more flexible).
Arguments:
scoring_matrix {matrix} -- Output matrix (1 + len(a), 1 + len(b))
a {sequence} -- First sequence (string or list)
b {sequence} -- Second sequence (string or list)
scoring_fn {function} -- Function to return the score between two items (e.g. characters)
gap_score {int} -- Score for a gap (increase to peanilise gaps between matches)
min_score {int} -- Minimum score (e.g. zero if scores shouldn't be allowed to go negative)
"""
m = len(a) + 1
n = len(b) + 1
for i in range(1, m):
for j in range(1, n):
scoring_matrix[i, j] = max(
min_score,
# Match elements.
scoring_matrix[i - 1, j - 1] +
scoring_fn(a[i - 1], b[j - 1]),
# Gap on sequenceA.
scoring_matrix[i, j - 1] + gap_score,
# Gap on sequenceB.
scoring_matrix[i - 1, j] + gap_score
)
def compute_inner_alignment_matrix_simple_scoring(
scoring_matrix, a, b, match_score, mismatch_score, gap_score, min_score):
try:
if (
native_enabled and
_is_array_of_type(a, np.int32) and _is_array_of_type(b, np.int32)
):
native_compute_inner_alignment_matrix_simple_scoring_int(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
return
elif native_enabled:
native_compute_inner_alignment_matrix_simple_scoring_any(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
return
except AttributeError:
pass
compute_inner_alignment_matrix_simple_scoring_py(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
def compute_inner_alignment_matrix_custom_scoring(
scoring_matrix, a, b, scoring_fn, gap_score, min_score):
if native_enabled:
native_compute_inner_alignment_matrix_scoring_fn_any(
scoring_matrix, a, b,
scoring_fn, gap_score, min_score
)
else:
compute_inner_alignment_matrix_scoring_fn_py(
scoring_matrix, a, b,
scoring_fn, gap_score, min_score
)
def compute_inner_alignment_matrix(
scoring_matrix, a, b, scoring, min_score):
if isinstance(scoring, CustomScoring):
compute_inner_alignment_matrix_custom_scoring(
scoring_matrix, a, b,
scoring.scoring_fn, scoring.gap_score, min_score
)
else:
compute_inner_alignment_matrix_simple_scoring(
scoring_matrix, a, b,
scoring.match_score, scoring.mismatch_score, scoring.gap_score,
min_score
)
def _next_locs(score_matrix, i, j, is_local):
diag_score = score_matrix[i - 1][j - 1] if (i != 0 and j != 0) else MIN_INT
up_score = score_matrix[i - 1][j] if i != 0 else MIN_INT
left_score = score_matrix[i][j - 1] if j != 0 else MIN_INT
max_score = max(diag_score, up_score, left_score)
if max_score == MIN_INT:
return []
if (max_score == 0 or diag_score == 0) and (is_local or (i == 1 and j == 1)):
return []
if diag_score == max_score:
get_logger().debug('diag_score: %s (%s)', diag_score, max_score)
return [(i - 1, j - 1)]
locs = []
if up_score == max_score:
locs.append((i - 1, j))
if left_score == max_score:
locs.append((i, j - 1))
return locs
def alignment_matrix_traceback_py(score_matrix, start_locs, is_local):
# Using LinkedListNode to cheaply branch off to multiple paths
pending_roots = deque([
LinkedListNode(tuple(loc))
for loc in start_locs
])
while pending_roots:
n = pending_roots.pop()
i, j = n.data
next_locs = _next_locs(score_matrix, i, j, is_local)
get_logger().debug('next_locs: %s', next_locs)
if not next_locs:
yield n
else:
pending_roots.extend([
LinkedListNode(next_loc, n)
for next_loc in next_locs
])
def alignment_matrix_traceback(score_matrix, start_locs, is_local, limit):
if native_enabled and limit == 1:
yield native_alignment_matrix_single_path_traceback(
score_matrix, start_locs[0], 1 if is_local else 0
)
else:
paths = alignment_matrix_traceback_py(
score_matrix, reversed(start_locs), is_local
)
if limit:
paths = islice(paths, limit)
for path in paths:
yield path
class SimpleScoring(object):
def __init__(self, match_score, mismatch_score, gap_score):
self.match_score = match_score
self.mismatch_score = mismatch_score
self.gap_score = gap_score
class CustomScoring(object):
def __init__(self, scoring_fn, gap_score):
self.scoring_fn = scoring_fn
self.gap_score = gap_score
class LinkedListNode(object):
def __init__(self, data, next_node=None):
self.data = data
self.next_node = next_node
def __str__(self):
if self.next_node is not None:
return str(self.data) + ' -> ' + str(self.next_node)
return str(self.data)
def __iter__(self):
yield self.data
next_node = self.next_node
while next_node is not None:
yield next_node.data
next_node = next_node.next_node
def _path_to_matching_blocks(path, a, b):
block_ai = 0
block_bi = 0
block_size = 0
for ai, bi in ((ai_ - 1, bi_ - 1) for ai_, bi_ in path):
if a[ai] == b[bi]:
if block_size and block_ai + block_size == ai and block_bi + block_size == bi:
block_size += 1
else:
if block_size:
yield (block_ai, block_bi, block_size)
block_ai = ai
block_bi = bi
block_size = 1
if block_size:
yield (block_ai, block_bi, block_size)
def _as_np_array(s):
if isinstance(s, binary_type):
return np.frombuffer(s, dtype=np.uint8).astype(np.int32)
if isinstance(s, string_types):
return np.array([ord(c) for c in s], dtype=np.int32)
return np.asarray(s)
wrap_sequence = _as_np_array
class AbstractSequenceMatcher(object, with_metaclass(ABCMeta)):
def __init__(self, a, b, scoring):
self.a = a
self.b = b
self.scoring = scoring
self._alignment_matrix = None
self._a = _as_np_array(a)
self._b = _as_np_array(b)
@abstractmethod
def _computer_alignment_matrix(self):
pass
def _get_alignment_matrix(self):
if self._alignment_matrix is None:
self._alignment_matrix = self._computer_alignment_matrix()
return self._alignment_matrix
@abstractmethod
def get_multiple_matching_blocks(self, limit=None):
pass
def get_matching_blocks(self):
for matching_blocks in self.get_multiple_matching_blocks(limit=1):
return list(matching_blocks) + [(len(self.a), len(self.b), 0)]
return [(len(self.a), len(self.b), 0)]
class LocalSequenceMatcher(AbstractSequenceMatcher):
"""
Local sequence matcher using Smith-Waterman algorithm
"""
def _computer_alignment_matrix(self):
m = len(self._a) + 1
n = len(self._b) + 1
scoring_matrix = np.empty((m, n), dtype=int)
scoring_matrix[:, 0] = 0
scoring_matrix[0, :] = 0
min_score = 0
compute_inner_alignment_matrix(
scoring_matrix,
self._a, self._b,
self.scoring,
min_score
)
return scoring_matrix
def get_multiple_matching_blocks(self, limit=None):
score_matrix = self._get_alignment_matrix()
max_score = score_matrix.max()
max_score_loc = np.argwhere(score_matrix == max_score)
get_logger().debug('max_score_loc: %s', max_score_loc)
is_local = True
paths = alignment_matrix_traceback(score_matrix, max_score_loc, is_local, limit=limit or 0)
return (
list(_path_to_matching_blocks(path, self.a, self.b))
for path in paths
)
class GlobalSequenceMatcher(AbstractSequenceMatcher):
"""
Global sequence matcher using Needleman-Wunsch algorithm
"""
def _computer_alignment_matrix(self):
m = len(self._a) + 1
n = len(self._b) + 1
scoring_matrix = np.empty((m, n), dtype=int)
for i in range(m):
scoring_matrix[i, 0] = self.scoring.gap_score * i
for j in range(n):
scoring_matrix[0, j] = self.scoring.gap_score * j
min_score = MIN_INT
compute_inner_alignment_matrix(
scoring_matrix,
self._a, self._b,
self.scoring,
min_score
)
return scoring_matrix
def get_multiple_matching_blocks(self, limit=None):
score_matrix = self._get_alignment_matrix()
m = len(self._a) + 1
n = len(self._b) + 1
start_locs = [(m - 1, n - 1)]
is_local = False
paths = alignment_matrix_traceback(score_matrix, start_locs, is_local, limit=limit or 0)
return (
list(_path_to_matching_blocks(path, self.a, self.b))
for path in paths
)
|
sciencebeam-alignment
|
/sciencebeam_alignment-0.0.5.tar.gz/sciencebeam_alignment-0.0.5/sciencebeam_alignment/align.py
|
align.py
|
import logging
import warnings
from collections import deque
from itertools import islice
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import numpy as np
from six import (
with_metaclass,
string_types,
binary_type
)
try:
from sciencebeam_alignment.align_fast_utils import ( # pylint: disable=no-name-in-module
native_compute_inner_alignment_matrix_simple_scoring_int,
native_compute_inner_alignment_matrix_simple_scoring_any,
native_compute_inner_alignment_matrix_scoring_fn_any,
native_alignment_matrix_single_path_traceback
)
native_enabled = True
except Exception as e: # pylint: disable=broad-except
warnings.warn('fast implementation not available due to: %s' % e)
native_enabled = False
MIN_INT = -2147483647
def get_logger():
return logging.getLogger(__name__)
@contextmanager
def require_native(required=True):
global native_enabled # pylint: disable=W0603
was_enabled = native_enabled
native_enabled = required
yield
native_enabled = was_enabled
def _is_array_of_type(a, dtype):
return np.issubdtype(a.dtype, dtype)
# pylint: disable=too-many-arguments
def compute_inner_alignment_matrix_simple_scoring_py(
scoring_matrix, a, b, match_score, mismatch_score, gap_score, min_score):
"""Pure python fallback implementation.
Calculates the inner alignment matrix for a and b using simple scoring parameters:
match_score, mismatch_score, gap_score, min_score
Arguments:
scoring_matrix {matrix} -- Output matrix (1 + len(a), 1 + len(b))
a {sequence} -- First sequence (string or list)
b {sequence} -- Second sequence (string or list)
match_score {int} -- Score for a match
mismatch_score {int} -- Score for a mismatch
gap_score {int} -- Score for a gap (increase to peanilise gaps between matches)
min_score {int} -- Minimum score (e.g. zero if scores shouldn't be allowed to go negative)
"""
m = len(a) + 1
n = len(b) + 1
for i in range(1, m):
for j in range(1, n):
scoring_matrix[i, j] = max(
min_score,
# Match elements.
scoring_matrix[i - 1, j - 1] +
(match_score if a[i - 1] == b[j - 1] else mismatch_score),
# Gap on sequenceA.
scoring_matrix[i, j - 1] + gap_score,
# Gap on sequenceB.
scoring_matrix[i - 1, j] + gap_score
)
def compute_inner_alignment_matrix_scoring_fn_py(
scoring_matrix, a, b, scoring_fn, gap_score, min_score):
"""Pure python fallback implementation.
Same as compute_inner_alignment_matrix_simple_scoring_py but uses a function
to calculate match / mismatch (may be slower but more flexible).
Arguments:
scoring_matrix {matrix} -- Output matrix (1 + len(a), 1 + len(b))
a {sequence} -- First sequence (string or list)
b {sequence} -- Second sequence (string or list)
scoring_fn {function} -- Function to return the score between two items (e.g. characters)
gap_score {int} -- Score for a gap (increase to peanilise gaps between matches)
min_score {int} -- Minimum score (e.g. zero if scores shouldn't be allowed to go negative)
"""
m = len(a) + 1
n = len(b) + 1
for i in range(1, m):
for j in range(1, n):
scoring_matrix[i, j] = max(
min_score,
# Match elements.
scoring_matrix[i - 1, j - 1] +
scoring_fn(a[i - 1], b[j - 1]),
# Gap on sequenceA.
scoring_matrix[i, j - 1] + gap_score,
# Gap on sequenceB.
scoring_matrix[i - 1, j] + gap_score
)
def compute_inner_alignment_matrix_simple_scoring(
scoring_matrix, a, b, match_score, mismatch_score, gap_score, min_score):
try:
if (
native_enabled and
_is_array_of_type(a, np.int32) and _is_array_of_type(b, np.int32)
):
native_compute_inner_alignment_matrix_simple_scoring_int(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
return
elif native_enabled:
native_compute_inner_alignment_matrix_simple_scoring_any(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
return
except AttributeError:
pass
compute_inner_alignment_matrix_simple_scoring_py(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
def compute_inner_alignment_matrix_custom_scoring(
scoring_matrix, a, b, scoring_fn, gap_score, min_score):
if native_enabled:
native_compute_inner_alignment_matrix_scoring_fn_any(
scoring_matrix, a, b,
scoring_fn, gap_score, min_score
)
else:
compute_inner_alignment_matrix_scoring_fn_py(
scoring_matrix, a, b,
scoring_fn, gap_score, min_score
)
def compute_inner_alignment_matrix(
scoring_matrix, a, b, scoring, min_score):
if isinstance(scoring, CustomScoring):
compute_inner_alignment_matrix_custom_scoring(
scoring_matrix, a, b,
scoring.scoring_fn, scoring.gap_score, min_score
)
else:
compute_inner_alignment_matrix_simple_scoring(
scoring_matrix, a, b,
scoring.match_score, scoring.mismatch_score, scoring.gap_score,
min_score
)
def _next_locs(score_matrix, i, j, is_local):
diag_score = score_matrix[i - 1][j - 1] if (i != 0 and j != 0) else MIN_INT
up_score = score_matrix[i - 1][j] if i != 0 else MIN_INT
left_score = score_matrix[i][j - 1] if j != 0 else MIN_INT
max_score = max(diag_score, up_score, left_score)
if max_score == MIN_INT:
return []
if (max_score == 0 or diag_score == 0) and (is_local or (i == 1 and j == 1)):
return []
if diag_score == max_score:
get_logger().debug('diag_score: %s (%s)', diag_score, max_score)
return [(i - 1, j - 1)]
locs = []
if up_score == max_score:
locs.append((i - 1, j))
if left_score == max_score:
locs.append((i, j - 1))
return locs
def alignment_matrix_traceback_py(score_matrix, start_locs, is_local):
# Using LinkedListNode to cheaply branch off to multiple paths
pending_roots = deque([
LinkedListNode(tuple(loc))
for loc in start_locs
])
while pending_roots:
n = pending_roots.pop()
i, j = n.data
next_locs = _next_locs(score_matrix, i, j, is_local)
get_logger().debug('next_locs: %s', next_locs)
if not next_locs:
yield n
else:
pending_roots.extend([
LinkedListNode(next_loc, n)
for next_loc in next_locs
])
def alignment_matrix_traceback(score_matrix, start_locs, is_local, limit):
if native_enabled and limit == 1:
yield native_alignment_matrix_single_path_traceback(
score_matrix, start_locs[0], 1 if is_local else 0
)
else:
paths = alignment_matrix_traceback_py(
score_matrix, reversed(start_locs), is_local
)
if limit:
paths = islice(paths, limit)
for path in paths:
yield path
class SimpleScoring(object):
def __init__(self, match_score, mismatch_score, gap_score):
self.match_score = match_score
self.mismatch_score = mismatch_score
self.gap_score = gap_score
class CustomScoring(object):
def __init__(self, scoring_fn, gap_score):
self.scoring_fn = scoring_fn
self.gap_score = gap_score
class LinkedListNode(object):
def __init__(self, data, next_node=None):
self.data = data
self.next_node = next_node
def __str__(self):
if self.next_node is not None:
return str(self.data) + ' -> ' + str(self.next_node)
return str(self.data)
def __iter__(self):
yield self.data
next_node = self.next_node
while next_node is not None:
yield next_node.data
next_node = next_node.next_node
def _path_to_matching_blocks(path, a, b):
block_ai = 0
block_bi = 0
block_size = 0
for ai, bi in ((ai_ - 1, bi_ - 1) for ai_, bi_ in path):
if a[ai] == b[bi]:
if block_size and block_ai + block_size == ai and block_bi + block_size == bi:
block_size += 1
else:
if block_size:
yield (block_ai, block_bi, block_size)
block_ai = ai
block_bi = bi
block_size = 1
if block_size:
yield (block_ai, block_bi, block_size)
def _as_np_array(s):
if isinstance(s, binary_type):
return np.frombuffer(s, dtype=np.uint8).astype(np.int32)
if isinstance(s, string_types):
return np.array([ord(c) for c in s], dtype=np.int32)
return np.asarray(s)
wrap_sequence = _as_np_array
class AbstractSequenceMatcher(object, with_metaclass(ABCMeta)):
def __init__(self, a, b, scoring):
self.a = a
self.b = b
self.scoring = scoring
self._alignment_matrix = None
self._a = _as_np_array(a)
self._b = _as_np_array(b)
@abstractmethod
def _computer_alignment_matrix(self):
pass
def _get_alignment_matrix(self):
if self._alignment_matrix is None:
self._alignment_matrix = self._computer_alignment_matrix()
return self._alignment_matrix
@abstractmethod
def get_multiple_matching_blocks(self, limit=None):
pass
def get_matching_blocks(self):
for matching_blocks in self.get_multiple_matching_blocks(limit=1):
return list(matching_blocks) + [(len(self.a), len(self.b), 0)]
return [(len(self.a), len(self.b), 0)]
class LocalSequenceMatcher(AbstractSequenceMatcher):
"""
Local sequence matcher using Smith-Waterman algorithm
"""
def _computer_alignment_matrix(self):
m = len(self._a) + 1
n = len(self._b) + 1
scoring_matrix = np.empty((m, n), dtype=int)
scoring_matrix[:, 0] = 0
scoring_matrix[0, :] = 0
min_score = 0
compute_inner_alignment_matrix(
scoring_matrix,
self._a, self._b,
self.scoring,
min_score
)
return scoring_matrix
def get_multiple_matching_blocks(self, limit=None):
score_matrix = self._get_alignment_matrix()
max_score = score_matrix.max()
max_score_loc = np.argwhere(score_matrix == max_score)
get_logger().debug('max_score_loc: %s', max_score_loc)
is_local = True
paths = alignment_matrix_traceback(score_matrix, max_score_loc, is_local, limit=limit or 0)
return (
list(_path_to_matching_blocks(path, self.a, self.b))
for path in paths
)
class GlobalSequenceMatcher(AbstractSequenceMatcher):
"""
Global sequence matcher using Needleman-Wunsch algorithm
"""
def _computer_alignment_matrix(self):
m = len(self._a) + 1
n = len(self._b) + 1
scoring_matrix = np.empty((m, n), dtype=int)
for i in range(m):
scoring_matrix[i, 0] = self.scoring.gap_score * i
for j in range(n):
scoring_matrix[0, j] = self.scoring.gap_score * j
min_score = MIN_INT
compute_inner_alignment_matrix(
scoring_matrix,
self._a, self._b,
self.scoring,
min_score
)
return scoring_matrix
def get_multiple_matching_blocks(self, limit=None):
score_matrix = self._get_alignment_matrix()
m = len(self._a) + 1
n = len(self._b) + 1
start_locs = [(m - 1, n - 1)]
is_local = False
paths = alignment_matrix_traceback(score_matrix, start_locs, is_local, limit=limit or 0)
return (
list(_path_to_matching_blocks(path, self.a, self.b))
for path in paths
)
| 0.647464 | 0.344085 |
# ScienceBeam Parser
[](https://pypi.org/project/sciencebeam-parser/)
[](https://opensource.org/licenses/MIT)
ScienceBeam Parser allows you to parse scientific documents.
Initially is starting as a partial Python variation of GROBID and allows you to re-use some of the models.
However, it may deviate more in the future.
## Pre-requisites
Docker containers are provided that can be used on multiple operating systems.
It can be used as an example setup for Linux / Ubuntu based systems.
Otherwise the following paragraphs list some of the pre-requisits when not using Docker:
This currently only supports Linux due to the binaries used (`pdfalto`, `wapiti`).
It may also be used on other platforms without Docker, provided matching binaries are configured.
For Computer Vision PyTorch is required.
For OCR, tesseract needs to be installed. On Ubuntu the following command can be used:
```bash
apt-get install libtesseract4 tesseract-ocr-eng libtesseract-dev libleptonica-dev
```
The Word* to PDF conversion requires [LibreOffice](https://www.libreoffice.org/).
## Development
### Create Virtual Environment and install Dependencies
```bash
make dev-venv
```
### Configuration
There is no implicit "grobid-home" directory. The only configuration file is the [default config.yml](sciencebeam_parser/resources/default_config/config.yml).
Paths may point to local or remote files. Remote files are downloaded and cached locally (urls are assumed to be versioned).
You may override config values using environment variables.
Environment variables should start with `SCIENCEBEAM_PARSER__`. After that `__` is used as a section separator.
For example `SCIENCEBEAM_PARSER__LOGGING__HANDLERS__LOG_FILE__LEVEL` would override `logging.handlers.log_file.level`.
Generally, resources and models are loaded on demand, depending on the `preload_on_startup` configuration option (`SCIENCEBEAM_PARSER__PRELOAD_ON_STARTUP` environment variable).
Models will be loaded "eagerly" at startup, by setting the configuration option to `true`.
### Run tests (linting, pytest, etc.)
```bash
make dev-test
```
### Start the server
```bash
make dev-start
```
Run the server in debug mode (including auto-reload and debug logging):
```bash
make dev-debug
```
Run the server with auto reload but no debug logging:
```bash
make dev-start-no-debug-logging-auto-reload
```
### Submit a sample document to the server
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/pdfalto"
```
### Submit a sample document to the header model
The following output formats are supported:
| output_format | description |
| ------------- | ----- |
| raw_data | generated data (without using the model) |
| data | generated data with predicted labels |
| xml | using simple xml elements for predicted labels |
| json | json of prediction |
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/models/header?first_page=1&last_page=1&output_format=xml"
```
### Submit a sample document to the name-header api
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/models/name-header?first_page=1&last_page=1&output_format=xml"
```
### GROBID compatible APIs
The following APIs are aiming to be compatible with selected endpoints of the
[GROBID's REST API](https://grobid.readthedocs.io/en/latest/Grobid-service/), for common use-cases.
#### Submit a sample document to the header document api
The `/processHeaderDocument` endpoint is similar to the `/processFulltextDocument`, but it will only contain front matter.
It still uses the same segmentation model, but it won't need to process a number of other models.
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processHeaderDocument?first_page=1&last_page=1"
```
The default response will be TEI XML (`application/tei+xml`).
The `Accept` HTTP request header may be used to request JATS, with the mime type `application/vnd.jats+xml`.
```bash
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processHeaderDocument?first_page=1&last_page=1"
```
Regardless, the returned content type will be `application/xml`.
(BibTeX output is currently not supported)
#### Submit a sample document to the full text document api
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextDocument?first_page=1&last_page=1"
```
The default response will be TEI XML (`application/tei+xml`).
The `Accept` HTTP request header may be used to request JATS, with the mime type `application/vnd.jats+xml`.
```bash
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextDocument?first_page=1&last_page=1"
```
Regardless, the returned content type will be `application/xml`.
#### Submit a sample document to the references api
The `/processReferences` endpoint is similar to the `/processFulltextDocument`, but it will only contain references.
It still uses the same segmentation model, but it won't need to process a number of other models.
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processReferences?first_page=1&last_page=100"
```
The default response will be TEI XML (`application/tei+xml`).
The `Accept` HTTP request header may be used to request JATS, with the mime type `application/vnd.jats+xml`.
```bash
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processReferences?first_page=1&last_page=100"
```
Regardless, the returned content type will be `application/xml`.
#### Submit a sample document to the full text asset document api
The `processFulltextAssetDocument` is like `processFulltextDocument`. But instead of returning the TEI XML directly, it will contain a zip with the TEI XML document, along with other assets such as figure images.
```bash
curl --fail --show-error \
--output "example-tei-xml-and-assets.zip" \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextAssetDocument?first_page=1&last_page=1"
```
The default response will be ZIP containing TEI XML (`application/tei+xml+zip`).
The `Accept` HTTP request header may be used to request a ZIP containing JATS,
with the mime type `application/vnd.jats+xml+zip`.
```bash
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml+zip' \
--output "example-jats-xml-and-assets.zip" \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextAssetDocument?first_page=1&last_page=1"
```
Regardless, the returned content type will be `application/zip`.
### Submit a sample document to the `/convert` api
The `/convert` API is aiming to be a single endpoint for the conversion of PDF documents to a semantic representation.
By default it will return JATS XML.
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/convert?first_page=1&last_page=1"
```
The following section describe parameters to influence the response:
#### Using the `Accept` HTTP header parameter
The [Accept HTTP header](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields)
may be used to request a different response type. e.g. `application/tei+xml` for TEI XML.
```bash
curl --fail --show-error \
--header 'Accept: application/tei+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/convert?first_page=1&last_page=1"
```
Regardless, the returned content type will be `application/xml`.
The `/convert` endpoint can also be used for a Word* to PDF conversion
by specifying `application/pdf` as the desired response:
```bash
curl --fail --show-error --silent \
--header 'Accept: application/pdf' \
--form "file=@test-data/minimal-office-open.docx;filename=test-data/minimal-office-open.docx" \
--output "example.pdf" \
"http://localhost:8080/api/convert?first_page=1&last_page=1"
```
#### Using the `includes` request parameter
The `includes` request parameter may be used to specify the requested fields, in order to reduce the processing time.
e.g. `title,abstract` to requst the `title` and the `abstract` only. In that case fewer models will be used.
The output may still contain more fields than requested.
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/convert?includes=title,abstract"
```
The currently supported fields are:
* `title`
* `abstract`
* `authors`
* `affiliations`
* `references`
Passing in any other values (no values), will behave as if no `includes` parameter was passed in.
### Word* support
All of the above APIs will also accept a Word* document instead of a PDF.
Formats that are supported:
* `.docx` (media type: `application/vnd.openxmlformats-officedocument.wordprocessingml.document`)
* `.dotx` (media type: `application/vnd.openxmlformats-officedocument.wordprocessingml.template`)
* `.doc` (media type: `application/msword`)
* `.rtf` (media type: `application/rtf`)
The support is currently implemented by converting the document to PDF using [LibreOffice](https://www.libreoffice.org/).
Where no content type is provided, the content type is inferred from the file extension.
For example:
```bash
curl --fail --show-error \
--form "file=@test-data/minimal-office-open.docx;filename=test-data/minimal-office-open.docx" \
--silent "http://localhost:8080/api/convert?first_page=1&last_page=1"
```
### Docker Usage
```bash
docker pull elifesciences/sciencebeam-parser
```
```bash
docker run --rm \
-p 8070:8070 \
elifesciences/sciencebeam-parser
```
Note: Docker images with the tag suffix `-cv` include the dependencies required for the CV (Computer Vision) models (disabled by default).
```bash
docker run --rm \
-p 8070:8070 \
--env SCIENCEBEAM_PARSER__PROCESSORS__FULLTEXT__USE_CV_MODEL=true \
--env SCIENCEBEAM_PARSER__PROCESSORS__FULLTEXT__USE_OCR_MODEL=true \
elifesciences/sciencebeam-parser:latest-cv
```
Non-release builds are available with the `_unstable` image suffix, e.g. `elifesciences/sciencebeam-parser_unstable`.
## See also
* [Architecture](ARCHITECTURE.md)
* [Python API](doc/python_library.md)
* [Training](doc/training.md)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/README.md
|
README.md
|
apt-get install libtesseract4 tesseract-ocr-eng libtesseract-dev libleptonica-dev
make dev-venv
make dev-test
make dev-start
make dev-debug
make dev-start-no-debug-logging-auto-reload
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/pdfalto"
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/models/header?first_page=1&last_page=1&output_format=xml"
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/models/name-header?first_page=1&last_page=1&output_format=xml"
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processHeaderDocument?first_page=1&last_page=1"
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processHeaderDocument?first_page=1&last_page=1"
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextDocument?first_page=1&last_page=1"
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextDocument?first_page=1&last_page=1"
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processReferences?first_page=1&last_page=100"
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processReferences?first_page=1&last_page=100"
curl --fail --show-error \
--output "example-tei-xml-and-assets.zip" \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextAssetDocument?first_page=1&last_page=1"
curl --fail --show-error \
--header 'Accept: application/vnd.jats+xml+zip' \
--output "example-jats-xml-and-assets.zip" \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/processFulltextAssetDocument?first_page=1&last_page=1"
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/convert?first_page=1&last_page=1"
curl --fail --show-error \
--header 'Accept: application/tei+xml' \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/convert?first_page=1&last_page=1"
curl --fail --show-error --silent \
--header 'Accept: application/pdf' \
--form "file=@test-data/minimal-office-open.docx;filename=test-data/minimal-office-open.docx" \
--output "example.pdf" \
"http://localhost:8080/api/convert?first_page=1&last_page=1"
curl --fail --show-error \
--form "file=@test-data/minimal-example.pdf;filename=test-data/minimal-example.pdf" \
--silent "http://localhost:8080/api/convert?includes=title,abstract"
curl --fail --show-error \
--form "file=@test-data/minimal-office-open.docx;filename=test-data/minimal-office-open.docx" \
--silent "http://localhost:8080/api/convert?first_page=1&last_page=1"
docker pull elifesciences/sciencebeam-parser
docker run --rm \
-p 8070:8070 \
elifesciences/sciencebeam-parser
docker run --rm \
-p 8070:8070 \
--env SCIENCEBEAM_PARSER__PROCESSORS__FULLTEXT__USE_CV_MODEL=true \
--env SCIENCEBEAM_PARSER__PROCESSORS__FULLTEXT__USE_OCR_MODEL=true \
elifesciences/sciencebeam-parser:latest-cv
| 0.311008 | 0.894559 |
# ScienceBeam Parser Python Library
ScienceBeam Parser allows you to parse scientific documents. It provides a REST API Service, as well as a Python API.
## Installation
```bash
pip install sciencebeam-parser
```
## CLI
### CLI: Start Server
```bash
python -m sciencebeam_parser.service.server --port=8080
```
The server will start to listen on port `8080`.
The [default config.yml](../sciencebeam_parser/resources/default_config/config.yml) defines what models to load.
## Python API
### Python API: Start Server
```python
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.service.server import create_app
config = AppConfig.load_yaml(DEFAULT_CONFIG_FILE)
app = create_app(config)
app.run(port=8080, host='127.0.0.1', threaded=True)
```
The server will start to listen on port `8080`.
### Python API: Parse Multiple Files
```python
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.utils.media_types import MediaTypes
from sciencebeam_parser.app.parser import ScienceBeamParser
config = AppConfig.load_yaml(DEFAULT_CONFIG_FILE)
# the parser contains all of the models
sciencebeam_parser = ScienceBeamParser.from_config(config)
# a session provides a scope and temporary directory for intermediate files
# it is recommended to create a separate session for every document
with sciencebeam_parser.get_new_session() as session:
session_source = session.get_source(
'test-data/minimal-example.pdf',
MediaTypes.PDF
)
converted_file = session_source.get_local_file_for_response_media_type(
MediaTypes.TEI_XML
)
# Note: the converted file will be in the temporary directory of the session
print('converted file:', converted_file)
```
## More Usage Examples
For more usage examples see
[sciencebeam-usage-examples](https://github.com/elifesciences/sciencebeam-usage-examples).
[](https://mybinder.org/v2/gh/elifesciences/sciencebeam-usage-examples/HEAD?urlpath=tree/sciencebeam-parser)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/doc/python_library.md
|
python_library.md
|
pip install sciencebeam-parser
python -m sciencebeam_parser.service.server --port=8080
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.service.server import create_app
config = AppConfig.load_yaml(DEFAULT_CONFIG_FILE)
app = create_app(config)
app.run(port=8080, host='127.0.0.1', threaded=True)
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.utils.media_types import MediaTypes
from sciencebeam_parser.app.parser import ScienceBeamParser
config = AppConfig.load_yaml(DEFAULT_CONFIG_FILE)
# the parser contains all of the models
sciencebeam_parser = ScienceBeamParser.from_config(config)
# a session provides a scope and temporary directory for intermediate files
# it is recommended to create a separate session for every document
with sciencebeam_parser.get_new_session() as session:
session_source = session.get_source(
'test-data/minimal-example.pdf',
MediaTypes.PDF
)
converted_file = session_source.get_local_file_for_response_media_type(
MediaTypes.TEI_XML
)
# Note: the converted file will be in the temporary directory of the session
print('converted file:', converted_file)
| 0.494629 | 0.732472 |
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast
)
from typing_extensions import Protocol
from sciencebeam_parser.document.layout_document import (
EMPTY_BLOCK,
LayoutBlock,
LayoutGraphic,
LayoutToken
)
class SemanticContentWrapper(ABC):
def get_text(self) -> str:
return ' '.join((
block.text
for block in self.iter_blocks()
))
def __len__(self) -> int:
return len(list(self.iter_blocks()))
def get_short_semantic_content_repr(self):
return '%s(%r)' % (type(self).__name__, self.get_text())
@abstractmethod
def iter_blocks(self) -> Iterable[LayoutBlock]:
pass
def iter_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.iter_blocks()
for token in block.iter_all_tokens()
)
@property
def merged_block(self) -> LayoutBlock:
return LayoutBlock.merge_blocks(self.iter_blocks())
@dataclass
class SemanticSimpleContentWrapper(SemanticContentWrapper):
content: LayoutBlock = EMPTY_BLOCK
layout_block: dataclasses.InitVar[LayoutBlock] = None
def __post_init__(self, layout_block: Optional[LayoutBlock] = None):
assert isinstance(self.content, LayoutBlock)
if layout_block is not None:
self.add_content(layout_block)
def iter_blocks(self) -> Iterable[LayoutBlock]:
return [self.content]
def add_content(self, block: LayoutBlock):
self.content = LayoutBlock(
lines=self.content.lines + block.lines
)
class SemanticTextContentWrapper(SemanticSimpleContentWrapper):
pass
class SemanticContentFactoryProtocol(Protocol):
def __call__(self, layout_block: LayoutBlock) -> SemanticContentWrapper:
pass
EMPTY_CONTENT = SemanticSimpleContentWrapper()
T_SemanticContentWrapper = TypeVar('T_SemanticContentWrapper', bound=SemanticContentWrapper)
@dataclass
class SemanticMixedContentWrapper(SemanticContentWrapper):
mixed_content: List[SemanticContentWrapper] = field(default_factory=list)
content_id: Optional[str] = None
layout_block: dataclasses.InitVar[LayoutBlock] = None
def __post_init__(self, layout_block: Optional[LayoutBlock] = None):
if layout_block is not None:
self.add_block_content(layout_block)
def __len__(self):
return len(self.mixed_content)
def __iter__(self) -> Iterator[SemanticContentWrapper]:
return iter(self.mixed_content)
def is_empty(self):
return not self.mixed_content
def iter_blocks(self) -> Iterable[LayoutBlock]:
return (
block
for content in self.mixed_content
for block in content.iter_blocks()
)
def add_block_content(self, block: LayoutBlock):
self.add_content(SemanticTextContentWrapper(block))
def add_content(self, content: SemanticContentWrapper):
assert not isinstance(content, LayoutBlock)
self.mixed_content.append(content)
def add_content_and_return_content(
self, content: T_SemanticContentWrapper
) -> T_SemanticContentWrapper:
self.add_content(content)
return content
def iter_by_type(
self, type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return (
content for content in self.mixed_content
if isinstance(content, type_)
)
def iter_by_type_recursively(
self, type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return iter_by_semantic_type_recursively(self.mixed_content, type_)
def iter_by_types_recursively(
self, types_: Tuple[Type[T_SemanticContentWrapper], ...]
) -> Iterable[SemanticContentWrapper]:
return iter_by_semantic_types_recursively(self.mixed_content, types_)
def iter_parent_by_semantic_type_recursively(
self, type_: Type[T_SemanticContentWrapper]
):
return iter_parent_by_semantic_type_recursively(
self.mixed_content, type_, self
)
def has_type(
self, type_: Type[T_SemanticContentWrapper]
) -> bool:
return next(iter(self.iter_by_type(type_)), None) is not None
def view_by_type(self, type_: Type[T_SemanticContentWrapper]) -> 'SemanticMixedContentWrapper':
return SemanticMixedContentWrapper(list(self.iter_by_type(type_)))
def flat_map_inplace(
self,
fn: Callable[[SemanticContentWrapper], Sequence[SemanticContentWrapper]]
):
self.mixed_content = [
replaced_content
for content in self.mixed_content
for replaced_content in fn(content)
]
def flat_map_inplace_by_type(
self,
type_: Type[T_SemanticContentWrapper],
fn: Callable[[SemanticContentWrapper], Sequence[SemanticContentWrapper]]
):
self.flat_map_inplace(
lambda content: (
fn(content) if isinstance(content, type_)
else [content]
)
)
def get_text_list(self) -> List[str]:
return [content.get_text() for content in self.mixed_content]
def get_text_by_type(self, type_: Type[T_SemanticContentWrapper]) -> str:
return self.view_by_type(type_).get_text()
def iter_parent_by_semantic_type_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
type_: Type[T_SemanticContentWrapper],
parent_content: SemanticContentWrapper
) -> Iterable[SemanticContentWrapper]:
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, type_):
yield parent_content
return
if isinstance(semantic_content, SemanticMixedContentWrapper):
yield from iter_parent_by_semantic_type_recursively(
semantic_content.mixed_content,
type_=type_,
parent_content=semantic_content
)
def iter_by_semantic_types_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
types_: Union[Type[T_SemanticContentWrapper], Tuple[Type[T_SemanticContentWrapper], ...]]
) -> Iterable[SemanticContentWrapper]:
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, types_):
yield semantic_content
continue
if isinstance(semantic_content, SemanticMixedContentWrapper):
yield from iter_by_semantic_types_recursively(
semantic_content.mixed_content,
types_=types_
)
def iter_by_semantic_type_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return cast(
Iterable[T_SemanticContentWrapper],
iter_by_semantic_types_recursively(
semantic_content_iterable,
type_
)
)
@dataclass
class SemanticNote(SemanticSimpleContentWrapper):
note_type: str = 'other'
@dataclass
class SemanticMixedNote(SemanticMixedContentWrapper):
note_type: str = 'other'
@dataclass
class SemanticOptionalValueSemanticMixedContentWrapper(SemanticMixedContentWrapper):
value: Optional[str] = None
class SemanticHeading(SemanticMixedContentWrapper):
pass
class SemanticParagraph(SemanticMixedContentWrapper):
pass
class SemanticSectionTypes:
BODY = 'BODY'
BACK = 'BACK'
ACKNOWLEDGEMENT = 'ACKNOWLEDGEMENT'
OTHER = 'OTHER'
class SemanticLabel(SemanticSimpleContentWrapper):
pass
class SemanticCaption(SemanticSimpleContentWrapper):
pass
class SemanticTitle(SemanticSimpleContentWrapper):
pass
class SemanticJournal(SemanticSimpleContentWrapper):
pass
class SemanticVolume(SemanticSimpleContentWrapper):
pass
class SemanticIssue(SemanticSimpleContentWrapper):
pass
@dataclass
class SemanticPageRange(SemanticSimpleContentWrapper):
from_page: Optional[str] = None
to_page: Optional[str] = None
class SemanticPublisher(SemanticSimpleContentWrapper):
pass
class SemanticLocation(SemanticSimpleContentWrapper):
pass
@dataclass
class SemanticDate(SemanticSimpleContentWrapper):
year: Optional[int] = None
class SemanticExternalIdentifierTypes:
ARXIV = 'ARXIV'
DOI = 'DOI'
PII = 'PII'
PMCID = 'PMCID'
PMID = 'PMID'
@dataclass
class SemanticExternalIdentifier(SemanticSimpleContentWrapper):
value: Optional[str] = None
external_identifier_type: Optional[str] = None
class SemanticExternalUrl(SemanticOptionalValueSemanticMixedContentWrapper):
pass
class SemanticAbstract(SemanticSimpleContentWrapper):
pass
class SemanticRawNameList(SemanticMixedContentWrapper):
pass
T_SemanticRawNameList = TypeVar('T_SemanticRawNameList', bound=SemanticRawNameList)
class SemanticRawAuthors(SemanticRawNameList):
pass
class SemanticRawEditors(SemanticRawNameList):
pass
class SemanticRawAffiliation(SemanticMixedContentWrapper):
pass
class SemanticRawAddress(SemanticMixedContentWrapper):
pass
class SemanticRawAffiliationAddress(SemanticMixedContentWrapper):
pass
class SemanticMarker(SemanticSimpleContentWrapper):
pass
class SemanticNamePart(SemanticOptionalValueSemanticMixedContentWrapper):
pass
class SemanticNameTitle(SemanticNamePart):
pass
class SemanticNameSuffix(SemanticNamePart):
pass
class SemanticGivenName(SemanticNamePart):
pass
class SemanticMiddleName(SemanticNamePart):
pass
class SemanticSurname(SemanticNamePart):
pass
class SemanticName(SemanticMixedContentWrapper):
@property
def label_text(self) -> str:
return self.view_by_type(SemanticLabel).get_text()
@property
def given_name_text(self) -> str:
return self.view_by_type(SemanticGivenName).get_text()
@property
def surname_text(self) -> str:
return self.view_by_type(SemanticSurname).get_text()
T_SemanticName = TypeVar('T_SemanticName', bound=SemanticName)
class SemanticAuthor(SemanticName):
pass
class SemanticEditor(SemanticName):
pass
class SemanticInstitution(SemanticMixedContentWrapper):
pass
class SemanticDepartment(SemanticMixedContentWrapper):
pass
class SemanticLaboratory(SemanticMixedContentWrapper):
pass
class SemanticAddressField(SemanticMixedContentWrapper):
pass
class SemanticAddressLine(SemanticAddressField):
pass
class SemanticPostCode(SemanticAddressField):
pass
class SemanticPostBox(SemanticAddressField):
pass
class SemanticRegion(SemanticAddressField):
pass
class SemanticSettlement(SemanticAddressField):
pass
class SemanticCountry(SemanticAddressField):
pass
class SemanticAffiliationAddress(SemanticMixedContentWrapper):
pass
class SemanticRawReferenceText(SemanticMixedContentWrapper):
pass
class SemanticRawReference(SemanticMixedContentWrapper):
pass
class SemanticReference(SemanticMixedContentWrapper):
pass
class SemanticInvalidReference(SemanticMixedContentWrapper):
pass
class SemanticReferenceList(SemanticMixedContentWrapper):
pass
class SemanticRawFigure(SemanticMixedContentWrapper):
pass
class SemanticFigure(SemanticMixedContentWrapper):
pass
class SemanticRawTable(SemanticMixedContentWrapper):
pass
class SemanticTable(SemanticMixedContentWrapper):
pass
class SemanticRawEquationContent(SemanticMixedContentWrapper):
pass
class SemanticRawEquation(SemanticMixedContentWrapper):
pass
@dataclass
class SemanticGraphic(SemanticSimpleContentWrapper):
layout_graphic: Optional[LayoutGraphic] = None
relative_path: Optional[str] = None
def get_short_semantic_content_repr(self):
if not self.layout_graphic:
return repr(self)
return '%s(layout_graphic.local_file_path=%r)' % (
type(self).__name__,
self.layout_graphic.local_file_path
)
@dataclass
class SemanticCitation(SemanticSimpleContentWrapper):
target_content_id: Optional[str] = None
class SemanticFigureCitation(SemanticCitation):
pass
class SemanticTableCitation(SemanticCitation):
pass
class SemanticReferenceCitation(SemanticCitation):
pass
class SemanticFront(SemanticMixedContentWrapper):
@property
def authors(self) -> List[SemanticAuthor]:
return list(self.iter_by_type(SemanticAuthor))
def get_raw_authors_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticRawAuthors).get_text_list())
def get_authors_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticAuthor).get_text_list())
@dataclass
class SemanticSection(SemanticMixedContentWrapper):
section_type: str = SemanticSectionTypes.OTHER
@property
def headings(self) -> List[SemanticHeading]:
return list(self.iter_by_type(SemanticHeading))
def get_heading_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticHeading).get_text_list())
@property
def paragraphs(self) -> List[SemanticParagraph]:
return list(self.iter_by_type(SemanticParagraph))
def get_paragraph_text_list(self) -> List[str]:
return self.view_by_type(SemanticParagraph).get_text_list()
def add_heading_block(self, block: LayoutBlock) -> SemanticHeading:
return self.add_content_and_return_content(SemanticHeading(layout_block=block))
def add_new_paragraph(self) -> SemanticParagraph:
return self.add_content_and_return_content(SemanticParagraph())
def add_note(self, block: LayoutBlock, note_type: str) -> SemanticNote:
return self.add_content_and_return_content(
SemanticNote(block, note_type=note_type)
)
def get_notes(self, note_type: str) -> List[SemanticNote]:
return [
note
for note in self.iter_by_type(SemanticNote)
if note.note_type == note_type
]
def get_notes_text_list(self, note_type: str) -> List[str]:
return [note.get_text() for note in self.get_notes(note_type)]
@property
def sections(self) -> List['SemanticSection']:
return list(self.iter_by_type(SemanticSection))
def get_sections(
self,
section_type: Optional[str] = None
) -> List['SemanticSection']:
return [
section
for section in self.iter_by_type(SemanticSection)
if not section_type or section.section_type == section_type
]
def view_by_section_type(self, section_type: str) -> 'SemanticMixedContentWrapper':
return SemanticMixedContentWrapper(
cast(List[SemanticContentWrapper], self.get_sections(section_type))
)
def add_new_section(
self,
section_type: str = SemanticSectionTypes.OTHER
) -> 'SemanticSection':
return self.add_content_and_return_content(
SemanticSection(section_type=section_type)
)
class SemanticDocument(SemanticMixedContentWrapper):
def __init__(self):
self.front = SemanticFront()
self.body_section = SemanticSection(section_type=SemanticSectionTypes.BODY)
self.back_section = SemanticSection(section_type=SemanticSectionTypes.BACK)
super().__init__([self.front, self.body_section, self.back_section])
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/semantic_document.py
|
semantic_document.py
|
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast
)
from typing_extensions import Protocol
from sciencebeam_parser.document.layout_document import (
EMPTY_BLOCK,
LayoutBlock,
LayoutGraphic,
LayoutToken
)
class SemanticContentWrapper(ABC):
def get_text(self) -> str:
return ' '.join((
block.text
for block in self.iter_blocks()
))
def __len__(self) -> int:
return len(list(self.iter_blocks()))
def get_short_semantic_content_repr(self):
return '%s(%r)' % (type(self).__name__, self.get_text())
@abstractmethod
def iter_blocks(self) -> Iterable[LayoutBlock]:
pass
def iter_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.iter_blocks()
for token in block.iter_all_tokens()
)
@property
def merged_block(self) -> LayoutBlock:
return LayoutBlock.merge_blocks(self.iter_blocks())
@dataclass
class SemanticSimpleContentWrapper(SemanticContentWrapper):
content: LayoutBlock = EMPTY_BLOCK
layout_block: dataclasses.InitVar[LayoutBlock] = None
def __post_init__(self, layout_block: Optional[LayoutBlock] = None):
assert isinstance(self.content, LayoutBlock)
if layout_block is not None:
self.add_content(layout_block)
def iter_blocks(self) -> Iterable[LayoutBlock]:
return [self.content]
def add_content(self, block: LayoutBlock):
self.content = LayoutBlock(
lines=self.content.lines + block.lines
)
class SemanticTextContentWrapper(SemanticSimpleContentWrapper):
pass
class SemanticContentFactoryProtocol(Protocol):
def __call__(self, layout_block: LayoutBlock) -> SemanticContentWrapper:
pass
EMPTY_CONTENT = SemanticSimpleContentWrapper()
T_SemanticContentWrapper = TypeVar('T_SemanticContentWrapper', bound=SemanticContentWrapper)
@dataclass
class SemanticMixedContentWrapper(SemanticContentWrapper):
mixed_content: List[SemanticContentWrapper] = field(default_factory=list)
content_id: Optional[str] = None
layout_block: dataclasses.InitVar[LayoutBlock] = None
def __post_init__(self, layout_block: Optional[LayoutBlock] = None):
if layout_block is not None:
self.add_block_content(layout_block)
def __len__(self):
return len(self.mixed_content)
def __iter__(self) -> Iterator[SemanticContentWrapper]:
return iter(self.mixed_content)
def is_empty(self):
return not self.mixed_content
def iter_blocks(self) -> Iterable[LayoutBlock]:
return (
block
for content in self.mixed_content
for block in content.iter_blocks()
)
def add_block_content(self, block: LayoutBlock):
self.add_content(SemanticTextContentWrapper(block))
def add_content(self, content: SemanticContentWrapper):
assert not isinstance(content, LayoutBlock)
self.mixed_content.append(content)
def add_content_and_return_content(
self, content: T_SemanticContentWrapper
) -> T_SemanticContentWrapper:
self.add_content(content)
return content
def iter_by_type(
self, type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return (
content for content in self.mixed_content
if isinstance(content, type_)
)
def iter_by_type_recursively(
self, type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return iter_by_semantic_type_recursively(self.mixed_content, type_)
def iter_by_types_recursively(
self, types_: Tuple[Type[T_SemanticContentWrapper], ...]
) -> Iterable[SemanticContentWrapper]:
return iter_by_semantic_types_recursively(self.mixed_content, types_)
def iter_parent_by_semantic_type_recursively(
self, type_: Type[T_SemanticContentWrapper]
):
return iter_parent_by_semantic_type_recursively(
self.mixed_content, type_, self
)
def has_type(
self, type_: Type[T_SemanticContentWrapper]
) -> bool:
return next(iter(self.iter_by_type(type_)), None) is not None
def view_by_type(self, type_: Type[T_SemanticContentWrapper]) -> 'SemanticMixedContentWrapper':
return SemanticMixedContentWrapper(list(self.iter_by_type(type_)))
def flat_map_inplace(
self,
fn: Callable[[SemanticContentWrapper], Sequence[SemanticContentWrapper]]
):
self.mixed_content = [
replaced_content
for content in self.mixed_content
for replaced_content in fn(content)
]
def flat_map_inplace_by_type(
self,
type_: Type[T_SemanticContentWrapper],
fn: Callable[[SemanticContentWrapper], Sequence[SemanticContentWrapper]]
):
self.flat_map_inplace(
lambda content: (
fn(content) if isinstance(content, type_)
else [content]
)
)
def get_text_list(self) -> List[str]:
return [content.get_text() for content in self.mixed_content]
def get_text_by_type(self, type_: Type[T_SemanticContentWrapper]) -> str:
return self.view_by_type(type_).get_text()
def iter_parent_by_semantic_type_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
type_: Type[T_SemanticContentWrapper],
parent_content: SemanticContentWrapper
) -> Iterable[SemanticContentWrapper]:
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, type_):
yield parent_content
return
if isinstance(semantic_content, SemanticMixedContentWrapper):
yield from iter_parent_by_semantic_type_recursively(
semantic_content.mixed_content,
type_=type_,
parent_content=semantic_content
)
def iter_by_semantic_types_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
types_: Union[Type[T_SemanticContentWrapper], Tuple[Type[T_SemanticContentWrapper], ...]]
) -> Iterable[SemanticContentWrapper]:
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, types_):
yield semantic_content
continue
if isinstance(semantic_content, SemanticMixedContentWrapper):
yield from iter_by_semantic_types_recursively(
semantic_content.mixed_content,
types_=types_
)
def iter_by_semantic_type_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return cast(
Iterable[T_SemanticContentWrapper],
iter_by_semantic_types_recursively(
semantic_content_iterable,
type_
)
)
@dataclass
class SemanticNote(SemanticSimpleContentWrapper):
note_type: str = 'other'
@dataclass
class SemanticMixedNote(SemanticMixedContentWrapper):
note_type: str = 'other'
@dataclass
class SemanticOptionalValueSemanticMixedContentWrapper(SemanticMixedContentWrapper):
value: Optional[str] = None
class SemanticHeading(SemanticMixedContentWrapper):
pass
class SemanticParagraph(SemanticMixedContentWrapper):
pass
class SemanticSectionTypes:
BODY = 'BODY'
BACK = 'BACK'
ACKNOWLEDGEMENT = 'ACKNOWLEDGEMENT'
OTHER = 'OTHER'
class SemanticLabel(SemanticSimpleContentWrapper):
pass
class SemanticCaption(SemanticSimpleContentWrapper):
pass
class SemanticTitle(SemanticSimpleContentWrapper):
pass
class SemanticJournal(SemanticSimpleContentWrapper):
pass
class SemanticVolume(SemanticSimpleContentWrapper):
pass
class SemanticIssue(SemanticSimpleContentWrapper):
pass
@dataclass
class SemanticPageRange(SemanticSimpleContentWrapper):
from_page: Optional[str] = None
to_page: Optional[str] = None
class SemanticPublisher(SemanticSimpleContentWrapper):
pass
class SemanticLocation(SemanticSimpleContentWrapper):
pass
@dataclass
class SemanticDate(SemanticSimpleContentWrapper):
year: Optional[int] = None
class SemanticExternalIdentifierTypes:
ARXIV = 'ARXIV'
DOI = 'DOI'
PII = 'PII'
PMCID = 'PMCID'
PMID = 'PMID'
@dataclass
class SemanticExternalIdentifier(SemanticSimpleContentWrapper):
value: Optional[str] = None
external_identifier_type: Optional[str] = None
class SemanticExternalUrl(SemanticOptionalValueSemanticMixedContentWrapper):
pass
class SemanticAbstract(SemanticSimpleContentWrapper):
pass
class SemanticRawNameList(SemanticMixedContentWrapper):
pass
T_SemanticRawNameList = TypeVar('T_SemanticRawNameList', bound=SemanticRawNameList)
class SemanticRawAuthors(SemanticRawNameList):
pass
class SemanticRawEditors(SemanticRawNameList):
pass
class SemanticRawAffiliation(SemanticMixedContentWrapper):
pass
class SemanticRawAddress(SemanticMixedContentWrapper):
pass
class SemanticRawAffiliationAddress(SemanticMixedContentWrapper):
pass
class SemanticMarker(SemanticSimpleContentWrapper):
pass
class SemanticNamePart(SemanticOptionalValueSemanticMixedContentWrapper):
pass
class SemanticNameTitle(SemanticNamePart):
pass
class SemanticNameSuffix(SemanticNamePart):
pass
class SemanticGivenName(SemanticNamePart):
pass
class SemanticMiddleName(SemanticNamePart):
pass
class SemanticSurname(SemanticNamePart):
pass
class SemanticName(SemanticMixedContentWrapper):
@property
def label_text(self) -> str:
return self.view_by_type(SemanticLabel).get_text()
@property
def given_name_text(self) -> str:
return self.view_by_type(SemanticGivenName).get_text()
@property
def surname_text(self) -> str:
return self.view_by_type(SemanticSurname).get_text()
T_SemanticName = TypeVar('T_SemanticName', bound=SemanticName)
class SemanticAuthor(SemanticName):
pass
class SemanticEditor(SemanticName):
pass
class SemanticInstitution(SemanticMixedContentWrapper):
pass
class SemanticDepartment(SemanticMixedContentWrapper):
pass
class SemanticLaboratory(SemanticMixedContentWrapper):
pass
class SemanticAddressField(SemanticMixedContentWrapper):
pass
class SemanticAddressLine(SemanticAddressField):
pass
class SemanticPostCode(SemanticAddressField):
pass
class SemanticPostBox(SemanticAddressField):
pass
class SemanticRegion(SemanticAddressField):
pass
class SemanticSettlement(SemanticAddressField):
pass
class SemanticCountry(SemanticAddressField):
pass
class SemanticAffiliationAddress(SemanticMixedContentWrapper):
pass
class SemanticRawReferenceText(SemanticMixedContentWrapper):
pass
class SemanticRawReference(SemanticMixedContentWrapper):
pass
class SemanticReference(SemanticMixedContentWrapper):
pass
class SemanticInvalidReference(SemanticMixedContentWrapper):
pass
class SemanticReferenceList(SemanticMixedContentWrapper):
pass
class SemanticRawFigure(SemanticMixedContentWrapper):
pass
class SemanticFigure(SemanticMixedContentWrapper):
pass
class SemanticRawTable(SemanticMixedContentWrapper):
pass
class SemanticTable(SemanticMixedContentWrapper):
pass
class SemanticRawEquationContent(SemanticMixedContentWrapper):
pass
class SemanticRawEquation(SemanticMixedContentWrapper):
pass
@dataclass
class SemanticGraphic(SemanticSimpleContentWrapper):
layout_graphic: Optional[LayoutGraphic] = None
relative_path: Optional[str] = None
def get_short_semantic_content_repr(self):
if not self.layout_graphic:
return repr(self)
return '%s(layout_graphic.local_file_path=%r)' % (
type(self).__name__,
self.layout_graphic.local_file_path
)
@dataclass
class SemanticCitation(SemanticSimpleContentWrapper):
target_content_id: Optional[str] = None
class SemanticFigureCitation(SemanticCitation):
pass
class SemanticTableCitation(SemanticCitation):
pass
class SemanticReferenceCitation(SemanticCitation):
pass
class SemanticFront(SemanticMixedContentWrapper):
@property
def authors(self) -> List[SemanticAuthor]:
return list(self.iter_by_type(SemanticAuthor))
def get_raw_authors_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticRawAuthors).get_text_list())
def get_authors_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticAuthor).get_text_list())
@dataclass
class SemanticSection(SemanticMixedContentWrapper):
section_type: str = SemanticSectionTypes.OTHER
@property
def headings(self) -> List[SemanticHeading]:
return list(self.iter_by_type(SemanticHeading))
def get_heading_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticHeading).get_text_list())
@property
def paragraphs(self) -> List[SemanticParagraph]:
return list(self.iter_by_type(SemanticParagraph))
def get_paragraph_text_list(self) -> List[str]:
return self.view_by_type(SemanticParagraph).get_text_list()
def add_heading_block(self, block: LayoutBlock) -> SemanticHeading:
return self.add_content_and_return_content(SemanticHeading(layout_block=block))
def add_new_paragraph(self) -> SemanticParagraph:
return self.add_content_and_return_content(SemanticParagraph())
def add_note(self, block: LayoutBlock, note_type: str) -> SemanticNote:
return self.add_content_and_return_content(
SemanticNote(block, note_type=note_type)
)
def get_notes(self, note_type: str) -> List[SemanticNote]:
return [
note
for note in self.iter_by_type(SemanticNote)
if note.note_type == note_type
]
def get_notes_text_list(self, note_type: str) -> List[str]:
return [note.get_text() for note in self.get_notes(note_type)]
@property
def sections(self) -> List['SemanticSection']:
return list(self.iter_by_type(SemanticSection))
def get_sections(
self,
section_type: Optional[str] = None
) -> List['SemanticSection']:
return [
section
for section in self.iter_by_type(SemanticSection)
if not section_type or section.section_type == section_type
]
def view_by_section_type(self, section_type: str) -> 'SemanticMixedContentWrapper':
return SemanticMixedContentWrapper(
cast(List[SemanticContentWrapper], self.get_sections(section_type))
)
def add_new_section(
self,
section_type: str = SemanticSectionTypes.OTHER
) -> 'SemanticSection':
return self.add_content_and_return_content(
SemanticSection(section_type=section_type)
)
class SemanticDocument(SemanticMixedContentWrapper):
def __init__(self):
self.front = SemanticFront()
self.body_section = SemanticSection(section_type=SemanticSectionTypes.BODY)
self.back_section = SemanticSection(section_type=SemanticSectionTypes.BACK)
super().__init__([self.front, self.body_section, self.back_section])
| 0.851907 | 0.244611 |
import logging
from typing import (
Dict,
List,
Optional,
)
from sciencebeam_parser.utils.stop_watch import StopWatchRecorder
from sciencebeam_parser.document.semantic_document import (
SemanticAbstract,
SemanticAffiliationAddress,
SemanticAuthor,
SemanticDocument,
SemanticFigure,
SemanticMarker,
SemanticNote,
SemanticReferenceList,
SemanticSection,
SemanticSectionTypes,
SemanticTable,
SemanticTitle
)
from sciencebeam_parser.document.tei.common import (
TeiElementBuilder
)
from sciencebeam_parser.document.tei.document import (
TeiDocument
)
from sciencebeam_parser.document.tei.factories import (
DEFAULT_TEI_ELEMENT_FACTORY_CONTEXT,
TeiElementFactoryContext,
get_tei_note_for_semantic_content
)
from sciencebeam_parser.document.tei.author import (
get_dummy_tei_author_for_semantic_affiliations_element,
get_orphan_affiliations,
get_tei_author_for_semantic_author_element
)
from sciencebeam_parser.document.tei.references import (
get_tei_raw_reference_list_element
)
LOGGER = logging.getLogger(__name__)
def get_tei_for_semantic_document( # pylint: disable=too-many-branches, too-many-statements
semantic_document: SemanticDocument,
context: Optional[TeiElementFactoryContext] = None
) -> TeiDocument:
if context is None:
context = DEFAULT_TEI_ELEMENT_FACTORY_CONTEXT
stop_watch_recorder = StopWatchRecorder()
LOGGER.info('generating tei document')
LOGGER.debug('semantic_document: %s', semantic_document)
tei_document = TeiDocument()
stop_watch_recorder.start('front')
LOGGER.info('generating tei document: front')
title_block = semantic_document.front.view_by_type(SemanticTitle).merged_block
if title_block:
tei_document.set_title_layout_block(title_block)
abstract_block = semantic_document.front.view_by_type(SemanticAbstract).merged_block
if abstract_block:
tei_document.set_abstract_layout_block(abstract_block)
affiliations_by_marker: Dict[str, List[SemanticAffiliationAddress]] = {}
for semantic_content in semantic_document.front:
if isinstance(semantic_content, SemanticAffiliationAddress):
marker_text = semantic_content.get_text_by_type(SemanticMarker)
affiliations_by_marker.setdefault(marker_text, []).append(semantic_content)
LOGGER.debug('affiliations_by_marker: %r', affiliations_by_marker)
semantic_authors: List[SemanticAuthor] = []
for semantic_content in semantic_document.front:
if isinstance(semantic_content, SemanticAuthor):
semantic_authors.append(semantic_content)
tei_author_element = get_tei_author_for_semantic_author_element(
semantic_content,
context=context,
affiliations_by_marker=affiliations_by_marker
)
tei_document.get_or_create_element_at(
['teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic']
).append(tei_author_element)
continue
if isinstance(semantic_content, SemanticTitle):
continue
if isinstance(semantic_content, SemanticAbstract):
continue
if isinstance(semantic_content, SemanticAffiliationAddress):
continue
tei_document.get_or_create_element_at(
['teiHeader']
).append(get_tei_note_for_semantic_content(
semantic_content
))
orphan_affiliations = get_orphan_affiliations(
affiliations_by_marker=affiliations_by_marker,
authors=semantic_authors
)
if orphan_affiliations:
dummy_tei_author_element = get_dummy_tei_author_for_semantic_affiliations_element(
orphan_affiliations,
context=context
)
tei_document.get_or_create_element_at(
['teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic']
).append(dummy_tei_author_element)
LOGGER.info('generating tei document: body')
stop_watch_recorder.start('body')
for semantic_content in semantic_document.body_section:
if isinstance(semantic_content, SemanticNote):
tei_document.get_body().add_note(
semantic_content.note_type,
semantic_content.merged_block
)
continue
TeiElementBuilder(tei_document.get_body_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
for semantic_content in semantic_document.body_section.iter_by_types_recursively(
(SemanticFigure, SemanticTable,)
):
TeiElementBuilder(tei_document.get_body_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
LOGGER.info('generating tei document: back section')
stop_watch_recorder.start('back')
for semantic_content in semantic_document.back_section:
if isinstance(semantic_content, SemanticSection):
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
_parent = TeiElementBuilder(tei_document.get_back_element())
else:
_parent = TeiElementBuilder(tei_document.get_back_annex_element())
TeiElementBuilder(_parent).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
continue
if isinstance(semantic_content, SemanticNote):
tei_document.get_back_annex().add_note(
semantic_content.note_type,
semantic_content.merged_block
)
continue
if isinstance(semantic_content, SemanticReferenceList):
tei_document.get_references().element.append(
get_tei_raw_reference_list_element(semantic_content, context=context)
)
continue
TeiElementBuilder(tei_document.get_back_annex_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
for semantic_figure in semantic_document.back_section.iter_by_types_recursively(
(SemanticFigure, SemanticTable,)
):
TeiElementBuilder(tei_document.get_back_annex_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_figure)
)
stop_watch_recorder.stop()
LOGGER.info('generating tei document done, took: %s', stop_watch_recorder)
return tei_document
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei_document.py
|
tei_document.py
|
import logging
from typing import (
Dict,
List,
Optional,
)
from sciencebeam_parser.utils.stop_watch import StopWatchRecorder
from sciencebeam_parser.document.semantic_document import (
SemanticAbstract,
SemanticAffiliationAddress,
SemanticAuthor,
SemanticDocument,
SemanticFigure,
SemanticMarker,
SemanticNote,
SemanticReferenceList,
SemanticSection,
SemanticSectionTypes,
SemanticTable,
SemanticTitle
)
from sciencebeam_parser.document.tei.common import (
TeiElementBuilder
)
from sciencebeam_parser.document.tei.document import (
TeiDocument
)
from sciencebeam_parser.document.tei.factories import (
DEFAULT_TEI_ELEMENT_FACTORY_CONTEXT,
TeiElementFactoryContext,
get_tei_note_for_semantic_content
)
from sciencebeam_parser.document.tei.author import (
get_dummy_tei_author_for_semantic_affiliations_element,
get_orphan_affiliations,
get_tei_author_for_semantic_author_element
)
from sciencebeam_parser.document.tei.references import (
get_tei_raw_reference_list_element
)
LOGGER = logging.getLogger(__name__)
def get_tei_for_semantic_document( # pylint: disable=too-many-branches, too-many-statements
semantic_document: SemanticDocument,
context: Optional[TeiElementFactoryContext] = None
) -> TeiDocument:
if context is None:
context = DEFAULT_TEI_ELEMENT_FACTORY_CONTEXT
stop_watch_recorder = StopWatchRecorder()
LOGGER.info('generating tei document')
LOGGER.debug('semantic_document: %s', semantic_document)
tei_document = TeiDocument()
stop_watch_recorder.start('front')
LOGGER.info('generating tei document: front')
title_block = semantic_document.front.view_by_type(SemanticTitle).merged_block
if title_block:
tei_document.set_title_layout_block(title_block)
abstract_block = semantic_document.front.view_by_type(SemanticAbstract).merged_block
if abstract_block:
tei_document.set_abstract_layout_block(abstract_block)
affiliations_by_marker: Dict[str, List[SemanticAffiliationAddress]] = {}
for semantic_content in semantic_document.front:
if isinstance(semantic_content, SemanticAffiliationAddress):
marker_text = semantic_content.get_text_by_type(SemanticMarker)
affiliations_by_marker.setdefault(marker_text, []).append(semantic_content)
LOGGER.debug('affiliations_by_marker: %r', affiliations_by_marker)
semantic_authors: List[SemanticAuthor] = []
for semantic_content in semantic_document.front:
if isinstance(semantic_content, SemanticAuthor):
semantic_authors.append(semantic_content)
tei_author_element = get_tei_author_for_semantic_author_element(
semantic_content,
context=context,
affiliations_by_marker=affiliations_by_marker
)
tei_document.get_or_create_element_at(
['teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic']
).append(tei_author_element)
continue
if isinstance(semantic_content, SemanticTitle):
continue
if isinstance(semantic_content, SemanticAbstract):
continue
if isinstance(semantic_content, SemanticAffiliationAddress):
continue
tei_document.get_or_create_element_at(
['teiHeader']
).append(get_tei_note_for_semantic_content(
semantic_content
))
orphan_affiliations = get_orphan_affiliations(
affiliations_by_marker=affiliations_by_marker,
authors=semantic_authors
)
if orphan_affiliations:
dummy_tei_author_element = get_dummy_tei_author_for_semantic_affiliations_element(
orphan_affiliations,
context=context
)
tei_document.get_or_create_element_at(
['teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic']
).append(dummy_tei_author_element)
LOGGER.info('generating tei document: body')
stop_watch_recorder.start('body')
for semantic_content in semantic_document.body_section:
if isinstance(semantic_content, SemanticNote):
tei_document.get_body().add_note(
semantic_content.note_type,
semantic_content.merged_block
)
continue
TeiElementBuilder(tei_document.get_body_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
for semantic_content in semantic_document.body_section.iter_by_types_recursively(
(SemanticFigure, SemanticTable,)
):
TeiElementBuilder(tei_document.get_body_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
LOGGER.info('generating tei document: back section')
stop_watch_recorder.start('back')
for semantic_content in semantic_document.back_section:
if isinstance(semantic_content, SemanticSection):
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
_parent = TeiElementBuilder(tei_document.get_back_element())
else:
_parent = TeiElementBuilder(tei_document.get_back_annex_element())
TeiElementBuilder(_parent).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
continue
if isinstance(semantic_content, SemanticNote):
tei_document.get_back_annex().add_note(
semantic_content.note_type,
semantic_content.merged_block
)
continue
if isinstance(semantic_content, SemanticReferenceList):
tei_document.get_references().element.append(
get_tei_raw_reference_list_element(semantic_content, context=context)
)
continue
TeiElementBuilder(tei_document.get_back_annex_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
for semantic_figure in semantic_document.back_section.iter_by_types_recursively(
(SemanticFigure, SemanticTable,)
):
TeiElementBuilder(tei_document.get_back_annex_element()).extend(
context.get_tei_child_elements_for_semantic_content(semantic_figure)
)
stop_watch_recorder.stop()
LOGGER.info('generating tei document done, took: %s', stop_watch_recorder)
return tei_document
| 0.565179 | 0.106924 |
import dataclasses
import logging
import itertools
import operator
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, List, Iterable, NamedTuple, Optional, Sequence, Tuple
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.utils.tokenizer import iter_tokenized_tokens, get_tokenized_tokens
LOGGER = logging.getLogger(__name__)
class LayoutFont(NamedTuple):
font_id: str
font_family: Optional[str] = None
font_size: Optional[float] = None
is_bold: Optional[bool] = None
is_italics: Optional[bool] = None
is_subscript: Optional[bool] = None
is_superscript: Optional[bool] = None
EMPTY_FONT = LayoutFont(font_id='_EMPTY')
class LayoutPageCoordinates(NamedTuple):
x: float
y: float
width: float
height: float
page_number: int = 0
@staticmethod
def from_bounding_box(
bounding_box: BoundingBox,
page_number: int = 0
) -> 'LayoutPageCoordinates':
return LayoutPageCoordinates(
x=bounding_box.x,
y=bounding_box.y,
width=bounding_box.width,
height=bounding_box.height,
page_number=page_number
)
@property
def bounding_box(self) -> BoundingBox:
return BoundingBox(x=self.x, y=self.y, width=self.width, height=self.height)
def __bool__(self) -> bool:
return not self.is_empty()
def is_empty(self) -> bool:
return self.width == 0 or self.height == 0
def move_by(self, dx: float = 0, dy: float = 0) -> 'LayoutPageCoordinates':
return LayoutPageCoordinates(
x=self.x + dx, y=self.y + dy, width=self.width, height=self.height,
page_number=self.page_number
)
def get_merged_with(
self,
other: 'LayoutPageCoordinates'
) -> 'LayoutPageCoordinates':
assert self.page_number == other.page_number, \
'cannot merge coordinates on different pages'
x = min(self.x, other.x)
y = min(self.y, other.y)
width = max(self.x + self.width, other.x + other.width) - x
height = max(self.y + self.height, other.y + other.height) - y
return LayoutPageCoordinates(
x=x, y=y, width=width, height=height, page_number=self.page_number
)
def get_merged_coordinates_list(
coordinates_list: Iterable[LayoutPageCoordinates]
) -> List[LayoutPageCoordinates]:
result: List[LayoutPageCoordinates] = []
pending_coordinates: Optional[LayoutPageCoordinates] = None
for coordinates in coordinates_list:
if not pending_coordinates:
pending_coordinates = coordinates
continue
if coordinates.page_number != pending_coordinates.page_number:
result.append(pending_coordinates)
pending_coordinates = coordinates
continue
pending_coordinates = pending_coordinates.get_merged_with(
coordinates
)
if pending_coordinates:
result.append(pending_coordinates)
return result
class LayoutPageMeta(NamedTuple):
page_number: int = 0
coordinates: Optional[LayoutPageCoordinates] = None
@staticmethod
def for_coordinates(coordinates: LayoutPageCoordinates) -> 'LayoutPageMeta':
return LayoutPageMeta(page_number=coordinates.page_number, coordinates=coordinates)
DEFAULT_LAYOUT_PAGE_META = LayoutPageMeta()
class LayoutLineMeta(NamedTuple):
line_id: int = -1
page_meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
DEFAULT_LAYOUT_LINE_META = LayoutLineMeta()
class LayoutToken(NamedTuple):
text: str
font: LayoutFont = EMPTY_FONT
whitespace: str = ' '
coordinates: Optional[LayoutPageCoordinates] = None
line_meta: LayoutLineMeta = DEFAULT_LAYOUT_LINE_META
T_FlatMapLayoutTokensFn = Callable[[LayoutToken], List[LayoutToken]]
def default_get_tokenized_tokens_keep_whitespace(text: str) -> List[str]:
return get_tokenized_tokens(text, keep_whitespace=True)
def get_relative_coordinates(
coordinates: Optional[LayoutPageCoordinates],
text: str,
text_character_offset: int,
total_text_length: int
) -> Optional[LayoutPageCoordinates]:
if not coordinates:
return None
return LayoutPageCoordinates(
page_number=coordinates.page_number,
x=(
coordinates.x
+ coordinates.width * text_character_offset / total_text_length
),
y=coordinates.y,
width=(
coordinates.width
* len(text) / total_text_length
),
height=coordinates.height
)
def retokenize_layout_token(
layout_token: LayoutToken,
tokenize_fn: Optional[Callable[[str], List[str]]] = None
) -> List[LayoutToken]:
if not layout_token.text.strip():
return []
if tokenize_fn is None:
tokenize_fn = default_get_tokenized_tokens_keep_whitespace
token_texts = tokenize_fn(layout_token.text)
if token_texts == [layout_token.text]:
return [layout_token]
total_text_length = sum(len(token_text) for token_text in token_texts)
texts_with_whitespace: List[Tuple[str, str, int]] = []
pending_token_text = ''
pending_whitespace = ''
text_character_offset = 0
pending_text_character_offset = 0
for token_text in token_texts:
if not token_text.strip():
pending_whitespace += token_text
text_character_offset += len(token_text)
continue
if pending_token_text:
texts_with_whitespace.append((
pending_token_text,
pending_whitespace,
pending_text_character_offset
))
pending_token_text = token_text
pending_whitespace = ''
pending_text_character_offset = text_character_offset
text_character_offset += len(token_text)
pending_whitespace += layout_token.whitespace
if pending_token_text:
texts_with_whitespace.append((
pending_token_text,
pending_whitespace,
pending_text_character_offset
))
return [
LayoutToken(
text=token_text,
font=layout_token.font,
whitespace=whitespace,
coordinates=get_relative_coordinates(
layout_token.coordinates,
pending_token_text,
text_character_offset,
total_text_length
),
line_meta=layout_token.line_meta
)
for token_text, whitespace, text_character_offset in texts_with_whitespace
]
def iter_layout_tokens_for_text(
text: str,
tail_whitespace: str = ' ',
**kwargs
) -> Iterable[LayoutToken]:
pending_text = ''
pending_whitespace = ' '
for token_text in iter_tokenized_tokens(text, keep_whitespace=True):
if not token_text.strip():
pending_whitespace += token_text
continue
if pending_text:
yield LayoutToken(pending_text, whitespace=pending_whitespace, **kwargs)
pending_text = token_text
pending_whitespace = ''
if pending_text:
pending_whitespace += tail_whitespace
yield LayoutToken(pending_text, whitespace=pending_whitespace, **kwargs)
def get_layout_tokens_for_text(*args, **kwargs) -> List[LayoutToken]:
return list(iter_layout_tokens_for_text(*args, **kwargs))
@dataclass
class LayoutLine:
tokens: List[LayoutToken]
@property
def text(self) -> str:
return join_layout_tokens(self.tokens)
@staticmethod
def for_text(text: str, **kwargs) -> 'LayoutLine':
return LayoutLine(tokens=get_layout_tokens_for_text(text, **kwargs))
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutLine':
return LayoutLine(tokens=[
tokenized_token
for token in self.tokens
for tokenized_token in fn(token)
])
@dataclass
class LayoutBlock:
lines: List[LayoutLine]
def __len__(self):
return len(self.lines)
@staticmethod
def for_tokens(tokens: List[LayoutToken]) -> 'LayoutBlock':
if not tokens:
return EMPTY_BLOCK
lines = [
LayoutLine(tokens=list(line_tokens))
for _, line_tokens in itertools.groupby(
tokens, key=operator.attrgetter('line_meta')
)
]
return LayoutBlock(lines=lines)
@staticmethod
def merge_blocks(blocks: Iterable['LayoutBlock']) -> 'LayoutBlock':
return LayoutBlock(lines=[
line
for block in blocks
for line in block.lines
])
@staticmethod
def for_text(text: str, **kwargs) -> 'LayoutBlock':
return LayoutBlock(lines=[LayoutLine.for_text(text, **kwargs)])
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for line in self.lines
for token in line.tokens
)
def get_merged_coordinates_list(self) -> List[LayoutPageCoordinates]:
return get_merged_coordinates_list([
token.coordinates
for token in self.iter_all_tokens()
if token.coordinates
])
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutBlock':
return LayoutBlock(lines=[
line.flat_map_layout_tokens(fn)
for line in self.lines
])
def remove_empty_lines(self) -> 'LayoutBlock':
return LayoutBlock(lines=[
line
for line in self.lines
if line.tokens
])
@property
def text(self) -> str:
return join_layout_tokens(self.iter_all_tokens())
@property
def whitespace(self) -> str:
if not self.lines or not self.lines[-1].tokens:
return ''
return self.lines[-1].tokens[-1].whitespace
EMPTY_BLOCK = LayoutBlock(lines=[])
class LayoutGraphic(NamedTuple):
local_file_path: Optional[str] = None
coordinates: Optional[LayoutPageCoordinates] = None
graphic_type: Optional[str] = None
related_block: Optional[LayoutBlock] = None
page_meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
@dataclass
class LayoutPage:
blocks: List[LayoutBlock]
graphics: Sequence[LayoutGraphic] = field(default_factory=list)
meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
def replace(self, **changes) -> 'LayoutPage':
return dataclasses.replace(self, **changes)
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.blocks
for token in block.iter_all_tokens()
)
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutPage':
return LayoutPage(
blocks=[
block.flat_map_layout_tokens(fn)
for block in self.blocks
],
graphics=self.graphics,
meta=self.meta
)
def remove_empty_blocks(self) -> 'LayoutPage':
blocks: List[LayoutBlock] = [
block.remove_empty_lines()
for block in self.blocks
]
return LayoutPage(
blocks=[
block
for block in blocks
if block.lines
],
graphics=self.graphics,
meta=self.meta
)
@dataclass
class LayoutDocument:
pages: List[LayoutPage]
def __len__(self):
return len(self.pages)
@staticmethod
def for_blocks(blocks: List[LayoutBlock]) -> 'LayoutDocument':
return LayoutDocument(pages=[LayoutPage(
blocks=blocks, graphics=[]
)])
def replace(self, **changes) -> 'LayoutDocument':
return dataclasses.replace(self, **changes)
def iter_all_blocks(self) -> Iterable[LayoutBlock]:
return (
block
for page in self.pages
for block in page.blocks
)
def iter_all_lines(self) -> Iterable[LayoutLine]:
return (
line
for block in self.iter_all_blocks()
for line in block.lines
)
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.iter_all_blocks()
for token in block.iter_all_tokens()
)
def iter_all_graphics(self) -> Iterable[LayoutGraphic]:
return (
graphic
for page in self.pages
for graphic in page.graphics
)
def flat_map_layout_tokens(
self, fn: T_FlatMapLayoutTokensFn, **kwargs
) -> 'LayoutDocument':
if kwargs:
fn = partial(fn, **kwargs)
return LayoutDocument(pages=[
page.flat_map_layout_tokens(fn)
for page in self.pages
])
def retokenize(self, **kwargs) -> 'LayoutDocument':
return self.flat_map_layout_tokens(retokenize_layout_token, **kwargs)
def remove_empty_blocks(self, preserve_empty_pages: bool = False) -> 'LayoutDocument':
pages: List[LayoutPage] = [
page.remove_empty_blocks()
for page in self.pages
]
return LayoutDocument(pages=[
page
for page in pages
if page.blocks or preserve_empty_pages
])
class LayoutTokenIndexRange(NamedTuple):
layout_token: LayoutToken
start: int
end: int
class LayoutTokensText:
def __init__(self, layout_block: LayoutBlock) -> None:
self.layout_block = layout_block
text_fragments = []
pending_whitespace = ''
text_offset = 0
token_index_ranges: List[LayoutTokenIndexRange] = []
for line in layout_block.lines:
for token in line.tokens:
if pending_whitespace:
text_fragments.append(pending_whitespace)
text_offset += len(pending_whitespace)
pending_whitespace = ''
token_text = token.text
token_index_ranges.append(LayoutTokenIndexRange(
layout_token=token,
start=text_offset,
end=text_offset + len(token_text)
))
text_fragments.append(token_text)
text_offset += len(token_text)
pending_whitespace += token.whitespace
self.token_index_ranges = token_index_ranges
self.text = ''.join(text_fragments)
def __str__(self):
return self.text
def iter_layout_tokens_between(
self, start: int, end: int
) -> Iterable[LayoutToken]:
for token_index_range in self.token_index_ranges:
if token_index_range.start >= end:
break
if token_index_range.end <= start:
continue
yield token_index_range.layout_token
def get_layout_tokens_between(
self, start: int, end: int
) -> List[LayoutToken]:
return list(self.iter_layout_tokens_between(start, end))
def join_layout_tokens(layout_tokens: Iterable[LayoutToken]) -> str:
layout_tokens = list(layout_tokens)
return ''.join([
(
token.text + token.whitespace
if index < len(layout_tokens) - 1
else token.text
)
for index, token in enumerate(layout_tokens)
])
def flat_map_layout_document_tokens(
layout_document: LayoutDocument,
fn: T_FlatMapLayoutTokensFn,
**kwargs
) -> LayoutDocument:
return layout_document.flat_map_layout_tokens(fn, **kwargs)
def retokenize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return layout_document.retokenize(**kwargs)
def remove_empty_blocks(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return layout_document.remove_empty_blocks(**kwargs)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/layout_document.py
|
layout_document.py
|
import dataclasses
import logging
import itertools
import operator
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, List, Iterable, NamedTuple, Optional, Sequence, Tuple
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.utils.tokenizer import iter_tokenized_tokens, get_tokenized_tokens
LOGGER = logging.getLogger(__name__)
class LayoutFont(NamedTuple):
font_id: str
font_family: Optional[str] = None
font_size: Optional[float] = None
is_bold: Optional[bool] = None
is_italics: Optional[bool] = None
is_subscript: Optional[bool] = None
is_superscript: Optional[bool] = None
EMPTY_FONT = LayoutFont(font_id='_EMPTY')
class LayoutPageCoordinates(NamedTuple):
x: float
y: float
width: float
height: float
page_number: int = 0
@staticmethod
def from_bounding_box(
bounding_box: BoundingBox,
page_number: int = 0
) -> 'LayoutPageCoordinates':
return LayoutPageCoordinates(
x=bounding_box.x,
y=bounding_box.y,
width=bounding_box.width,
height=bounding_box.height,
page_number=page_number
)
@property
def bounding_box(self) -> BoundingBox:
return BoundingBox(x=self.x, y=self.y, width=self.width, height=self.height)
def __bool__(self) -> bool:
return not self.is_empty()
def is_empty(self) -> bool:
return self.width == 0 or self.height == 0
def move_by(self, dx: float = 0, dy: float = 0) -> 'LayoutPageCoordinates':
return LayoutPageCoordinates(
x=self.x + dx, y=self.y + dy, width=self.width, height=self.height,
page_number=self.page_number
)
def get_merged_with(
self,
other: 'LayoutPageCoordinates'
) -> 'LayoutPageCoordinates':
assert self.page_number == other.page_number, \
'cannot merge coordinates on different pages'
x = min(self.x, other.x)
y = min(self.y, other.y)
width = max(self.x + self.width, other.x + other.width) - x
height = max(self.y + self.height, other.y + other.height) - y
return LayoutPageCoordinates(
x=x, y=y, width=width, height=height, page_number=self.page_number
)
def get_merged_coordinates_list(
coordinates_list: Iterable[LayoutPageCoordinates]
) -> List[LayoutPageCoordinates]:
result: List[LayoutPageCoordinates] = []
pending_coordinates: Optional[LayoutPageCoordinates] = None
for coordinates in coordinates_list:
if not pending_coordinates:
pending_coordinates = coordinates
continue
if coordinates.page_number != pending_coordinates.page_number:
result.append(pending_coordinates)
pending_coordinates = coordinates
continue
pending_coordinates = pending_coordinates.get_merged_with(
coordinates
)
if pending_coordinates:
result.append(pending_coordinates)
return result
class LayoutPageMeta(NamedTuple):
page_number: int = 0
coordinates: Optional[LayoutPageCoordinates] = None
@staticmethod
def for_coordinates(coordinates: LayoutPageCoordinates) -> 'LayoutPageMeta':
return LayoutPageMeta(page_number=coordinates.page_number, coordinates=coordinates)
DEFAULT_LAYOUT_PAGE_META = LayoutPageMeta()
class LayoutLineMeta(NamedTuple):
line_id: int = -1
page_meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
DEFAULT_LAYOUT_LINE_META = LayoutLineMeta()
class LayoutToken(NamedTuple):
text: str
font: LayoutFont = EMPTY_FONT
whitespace: str = ' '
coordinates: Optional[LayoutPageCoordinates] = None
line_meta: LayoutLineMeta = DEFAULT_LAYOUT_LINE_META
T_FlatMapLayoutTokensFn = Callable[[LayoutToken], List[LayoutToken]]
def default_get_tokenized_tokens_keep_whitespace(text: str) -> List[str]:
return get_tokenized_tokens(text, keep_whitespace=True)
def get_relative_coordinates(
coordinates: Optional[LayoutPageCoordinates],
text: str,
text_character_offset: int,
total_text_length: int
) -> Optional[LayoutPageCoordinates]:
if not coordinates:
return None
return LayoutPageCoordinates(
page_number=coordinates.page_number,
x=(
coordinates.x
+ coordinates.width * text_character_offset / total_text_length
),
y=coordinates.y,
width=(
coordinates.width
* len(text) / total_text_length
),
height=coordinates.height
)
def retokenize_layout_token(
layout_token: LayoutToken,
tokenize_fn: Optional[Callable[[str], List[str]]] = None
) -> List[LayoutToken]:
if not layout_token.text.strip():
return []
if tokenize_fn is None:
tokenize_fn = default_get_tokenized_tokens_keep_whitespace
token_texts = tokenize_fn(layout_token.text)
if token_texts == [layout_token.text]:
return [layout_token]
total_text_length = sum(len(token_text) for token_text in token_texts)
texts_with_whitespace: List[Tuple[str, str, int]] = []
pending_token_text = ''
pending_whitespace = ''
text_character_offset = 0
pending_text_character_offset = 0
for token_text in token_texts:
if not token_text.strip():
pending_whitespace += token_text
text_character_offset += len(token_text)
continue
if pending_token_text:
texts_with_whitespace.append((
pending_token_text,
pending_whitespace,
pending_text_character_offset
))
pending_token_text = token_text
pending_whitespace = ''
pending_text_character_offset = text_character_offset
text_character_offset += len(token_text)
pending_whitespace += layout_token.whitespace
if pending_token_text:
texts_with_whitespace.append((
pending_token_text,
pending_whitespace,
pending_text_character_offset
))
return [
LayoutToken(
text=token_text,
font=layout_token.font,
whitespace=whitespace,
coordinates=get_relative_coordinates(
layout_token.coordinates,
pending_token_text,
text_character_offset,
total_text_length
),
line_meta=layout_token.line_meta
)
for token_text, whitespace, text_character_offset in texts_with_whitespace
]
def iter_layout_tokens_for_text(
text: str,
tail_whitespace: str = ' ',
**kwargs
) -> Iterable[LayoutToken]:
pending_text = ''
pending_whitespace = ' '
for token_text in iter_tokenized_tokens(text, keep_whitespace=True):
if not token_text.strip():
pending_whitespace += token_text
continue
if pending_text:
yield LayoutToken(pending_text, whitespace=pending_whitespace, **kwargs)
pending_text = token_text
pending_whitespace = ''
if pending_text:
pending_whitespace += tail_whitespace
yield LayoutToken(pending_text, whitespace=pending_whitespace, **kwargs)
def get_layout_tokens_for_text(*args, **kwargs) -> List[LayoutToken]:
return list(iter_layout_tokens_for_text(*args, **kwargs))
@dataclass
class LayoutLine:
tokens: List[LayoutToken]
@property
def text(self) -> str:
return join_layout_tokens(self.tokens)
@staticmethod
def for_text(text: str, **kwargs) -> 'LayoutLine':
return LayoutLine(tokens=get_layout_tokens_for_text(text, **kwargs))
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutLine':
return LayoutLine(tokens=[
tokenized_token
for token in self.tokens
for tokenized_token in fn(token)
])
@dataclass
class LayoutBlock:
lines: List[LayoutLine]
def __len__(self):
return len(self.lines)
@staticmethod
def for_tokens(tokens: List[LayoutToken]) -> 'LayoutBlock':
if not tokens:
return EMPTY_BLOCK
lines = [
LayoutLine(tokens=list(line_tokens))
for _, line_tokens in itertools.groupby(
tokens, key=operator.attrgetter('line_meta')
)
]
return LayoutBlock(lines=lines)
@staticmethod
def merge_blocks(blocks: Iterable['LayoutBlock']) -> 'LayoutBlock':
return LayoutBlock(lines=[
line
for block in blocks
for line in block.lines
])
@staticmethod
def for_text(text: str, **kwargs) -> 'LayoutBlock':
return LayoutBlock(lines=[LayoutLine.for_text(text, **kwargs)])
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for line in self.lines
for token in line.tokens
)
def get_merged_coordinates_list(self) -> List[LayoutPageCoordinates]:
return get_merged_coordinates_list([
token.coordinates
for token in self.iter_all_tokens()
if token.coordinates
])
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutBlock':
return LayoutBlock(lines=[
line.flat_map_layout_tokens(fn)
for line in self.lines
])
def remove_empty_lines(self) -> 'LayoutBlock':
return LayoutBlock(lines=[
line
for line in self.lines
if line.tokens
])
@property
def text(self) -> str:
return join_layout_tokens(self.iter_all_tokens())
@property
def whitespace(self) -> str:
if not self.lines or not self.lines[-1].tokens:
return ''
return self.lines[-1].tokens[-1].whitespace
EMPTY_BLOCK = LayoutBlock(lines=[])
class LayoutGraphic(NamedTuple):
local_file_path: Optional[str] = None
coordinates: Optional[LayoutPageCoordinates] = None
graphic_type: Optional[str] = None
related_block: Optional[LayoutBlock] = None
page_meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
@dataclass
class LayoutPage:
blocks: List[LayoutBlock]
graphics: Sequence[LayoutGraphic] = field(default_factory=list)
meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
def replace(self, **changes) -> 'LayoutPage':
return dataclasses.replace(self, **changes)
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.blocks
for token in block.iter_all_tokens()
)
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutPage':
return LayoutPage(
blocks=[
block.flat_map_layout_tokens(fn)
for block in self.blocks
],
graphics=self.graphics,
meta=self.meta
)
def remove_empty_blocks(self) -> 'LayoutPage':
blocks: List[LayoutBlock] = [
block.remove_empty_lines()
for block in self.blocks
]
return LayoutPage(
blocks=[
block
for block in blocks
if block.lines
],
graphics=self.graphics,
meta=self.meta
)
@dataclass
class LayoutDocument:
pages: List[LayoutPage]
def __len__(self):
return len(self.pages)
@staticmethod
def for_blocks(blocks: List[LayoutBlock]) -> 'LayoutDocument':
return LayoutDocument(pages=[LayoutPage(
blocks=blocks, graphics=[]
)])
def replace(self, **changes) -> 'LayoutDocument':
return dataclasses.replace(self, **changes)
def iter_all_blocks(self) -> Iterable[LayoutBlock]:
return (
block
for page in self.pages
for block in page.blocks
)
def iter_all_lines(self) -> Iterable[LayoutLine]:
return (
line
for block in self.iter_all_blocks()
for line in block.lines
)
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.iter_all_blocks()
for token in block.iter_all_tokens()
)
def iter_all_graphics(self) -> Iterable[LayoutGraphic]:
return (
graphic
for page in self.pages
for graphic in page.graphics
)
def flat_map_layout_tokens(
self, fn: T_FlatMapLayoutTokensFn, **kwargs
) -> 'LayoutDocument':
if kwargs:
fn = partial(fn, **kwargs)
return LayoutDocument(pages=[
page.flat_map_layout_tokens(fn)
for page in self.pages
])
def retokenize(self, **kwargs) -> 'LayoutDocument':
return self.flat_map_layout_tokens(retokenize_layout_token, **kwargs)
def remove_empty_blocks(self, preserve_empty_pages: bool = False) -> 'LayoutDocument':
pages: List[LayoutPage] = [
page.remove_empty_blocks()
for page in self.pages
]
return LayoutDocument(pages=[
page
for page in pages
if page.blocks or preserve_empty_pages
])
class LayoutTokenIndexRange(NamedTuple):
layout_token: LayoutToken
start: int
end: int
class LayoutTokensText:
def __init__(self, layout_block: LayoutBlock) -> None:
self.layout_block = layout_block
text_fragments = []
pending_whitespace = ''
text_offset = 0
token_index_ranges: List[LayoutTokenIndexRange] = []
for line in layout_block.lines:
for token in line.tokens:
if pending_whitespace:
text_fragments.append(pending_whitespace)
text_offset += len(pending_whitespace)
pending_whitespace = ''
token_text = token.text
token_index_ranges.append(LayoutTokenIndexRange(
layout_token=token,
start=text_offset,
end=text_offset + len(token_text)
))
text_fragments.append(token_text)
text_offset += len(token_text)
pending_whitespace += token.whitespace
self.token_index_ranges = token_index_ranges
self.text = ''.join(text_fragments)
def __str__(self):
return self.text
def iter_layout_tokens_between(
self, start: int, end: int
) -> Iterable[LayoutToken]:
for token_index_range in self.token_index_ranges:
if token_index_range.start >= end:
break
if token_index_range.end <= start:
continue
yield token_index_range.layout_token
def get_layout_tokens_between(
self, start: int, end: int
) -> List[LayoutToken]:
return list(self.iter_layout_tokens_between(start, end))
def join_layout_tokens(layout_tokens: Iterable[LayoutToken]) -> str:
layout_tokens = list(layout_tokens)
return ''.join([
(
token.text + token.whitespace
if index < len(layout_tokens) - 1
else token.text
)
for index, token in enumerate(layout_tokens)
])
def flat_map_layout_document_tokens(
layout_document: LayoutDocument,
fn: T_FlatMapLayoutTokensFn,
**kwargs
) -> LayoutDocument:
return layout_document.flat_map_layout_tokens(fn, **kwargs)
def retokenize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return layout_document.retokenize(**kwargs)
def remove_empty_blocks(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return layout_document.remove_empty_blocks(**kwargs)
| 0.870776 | 0.279988 |
import logging
from typing import Dict, Iterable, List, Optional, Union
from lxml import etree
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml import get_text_content
from sciencebeam_parser.utils.xml_writer import parse_tag_expression
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutPageCoordinates,
LayoutToken
)
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticMixedContentWrapper
)
LOGGER = logging.getLogger(__name__)
XML_NS = 'http://www.w3.org/XML/1998/namespace'
XML_NS_PREFIX = '{%s}' % XML_NS
XML_ID = XML_NS_PREFIX + 'id'
TEI_NS = 'http://www.tei-c.org/ns/1.0'
TEI_NS_PREFIX = '{%s}' % TEI_NS
TEI_NS_MAP = {
'tei': TEI_NS
}
TEI_E = ElementMaker(namespace=TEI_NS, nsmap={
None: TEI_NS
})
def get_or_create_element_at(parent: etree.ElementBase, path: List[str]) -> etree.ElementBase:
if not path:
return parent
child = parent.find(TEI_NS_PREFIX + path[0])
if child is None:
LOGGER.debug('creating element: %s', path[0])
tag_expression = parse_tag_expression(path[0])
child = tag_expression.create_node(
element_maker=TEI_E
)
parent.append(child)
return get_or_create_element_at(child, path[1:])
def tei_xpath(parent: etree.ElementBase, xpath: str) -> List[etree.ElementBase]:
return parent.xpath(xpath, namespaces=TEI_NS_MAP)
def get_tei_xpath_text_content_list(parent: etree.ElementBase, xpath: str) -> List[str]:
return [get_text_content(node) for node in tei_xpath(parent, xpath)]
def get_required_styles(layout_token: LayoutToken) -> List[str]:
required_styles = []
if layout_token.font.is_bold:
required_styles.append('bold')
if layout_token.font.is_italics:
required_styles.append('italic')
if layout_token.font.is_subscript:
required_styles.append('subscript')
if layout_token.font.is_superscript:
required_styles.append('superscript')
return required_styles
def get_element_for_styles(styles: List[str], text: str) -> etree.ElementBase:
if not styles:
return text
child: Optional[etree.ElementBase] = None
for style in reversed(styles):
LOGGER.debug('style: %r, child: %r, text: %r', style, child, text)
if child is not None:
child = TEI_E('hi', {'rend': style}, child)
else:
child = TEI_E('hi', {'rend': style}, text)
return child
def format_coordinates(coordinates: LayoutPageCoordinates) -> str:
return '%d,%.2f,%.2f,%.2f,%.2f' % (
coordinates.page_number,
coordinates.x,
coordinates.y,
coordinates.width,
coordinates.height
)
def format_coordinates_list(coordinates_list: List[LayoutPageCoordinates]) -> str:
return ';'.join((
format_coordinates(coordinates)
for coordinates in coordinates_list
))
def get_default_attributes_for_layout_block(
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Dict[str, str]:
if enable_coordinates:
formatted_coords = format_coordinates_list(
layout_block.get_merged_coordinates_list()
)
if formatted_coords:
return {'coords': formatted_coords}
return {}
def iter_layout_block_tei_children(
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Iterable[Union[str, etree.ElementBase]]:
pending_styles: List[str] = []
pending_text = ''
pending_whitespace = ''
if enable_coordinates:
yield get_default_attributes_for_layout_block(
layout_block=layout_block,
enable_coordinates=enable_coordinates
)
for line in layout_block.lines:
for token in line.tokens:
required_styles = get_required_styles(token)
LOGGER.debug('token: %r, required_styles=%r', token, required_styles)
if required_styles != pending_styles:
if pending_text:
yield get_element_for_styles(
pending_styles,
pending_text
)
pending_text = ''
if pending_whitespace:
yield pending_whitespace
pending_whitespace = ''
pending_styles = required_styles
if pending_whitespace:
pending_text += pending_whitespace
pending_whitespace = ''
pending_text += token.text
pending_whitespace = token.whitespace
if pending_text:
yield get_element_for_styles(
pending_styles,
pending_text
)
def extend_element(
element: etree.ElementBase,
children_or_attributes: Iterable[etree.ElementBase]
):
for item in children_or_attributes:
if isinstance(item, dict):
element.attrib.update(item)
continue
if isinstance(item, str):
try:
previous_element = element[-1]
except IndexError:
previous_element = None
if previous_element is not None:
previous_element.tail = (
(previous_element.tail or '')
+ item
)
else:
element.text = (
(element.text or '')
+ item
)
continue
element.append(item)
def create_tei_note_element(
note_type: str,
layout_block: LayoutBlock
) -> etree.EntityBase:
return TEI_E(
'note',
{'type': note_type},
*iter_layout_block_tei_children(layout_block)
)
def get_default_attributes_for_semantic_content(
semantic_content: SemanticContentWrapper,
**kwargs
) -> Dict[str, str]:
attrib = get_default_attributes_for_layout_block(
semantic_content.merged_block,
**kwargs
)
if isinstance(semantic_content, SemanticMixedContentWrapper):
if semantic_content.content_id:
attrib = {
**attrib,
XML_ID: semantic_content.content_id
}
return attrib
def _create_tei_note_element(
note_type: str,
layout_block: LayoutBlock
) -> etree.EntityBase:
return TEI_E(
'note',
{'type': note_type},
*iter_layout_block_tei_children(layout_block)
)
class TeiElementWrapper:
def __init__(self, element: etree.ElementBase):
self.element = element
def xpath_nodes(self, xpath: str) -> List[etree.ElementBase]:
return tei_xpath(self.element, xpath)
def xpath(self, xpath: str) -> List['TeiElementWrapper']:
return [TeiElementWrapper(node) for node in self.xpath_nodes(xpath)]
def get_xpath_text_content_list(self, xpath: str) -> List[str]:
return get_tei_xpath_text_content_list(self.element, xpath)
def get_notes_text_list(self, note_type: str) -> List[str]:
return get_tei_xpath_text_content_list(
self.element,
'//tei:note[@type="%s"]' % note_type,
)
def add_note(self, note_type: str, layout_block: LayoutBlock):
self.element.append(create_tei_note_element(note_type, layout_block))
class TeiElementBuilder:
def __init__(
self,
element: etree.ElementBase,
):
self.element = element
self.builder_by_path_fragment: Dict[str, 'TeiElementBuilder'] = {}
def get_or_create(
self,
path: Optional[List[str]]
) -> 'TeiElementBuilder':
if not path:
return self
key = path[0]
builder = self.builder_by_path_fragment.get(key)
if not builder:
builder = TeiElementBuilder(TEI_E(key))
self.element.append(builder.element)
self.builder_by_path_fragment[key] = builder
return builder.get_or_create(path[1:])
def add_dict(self, attrib: dict):
_attrib = self.element.attrib
for k, v in attrib.items():
_attrib[k] = v
def append(
self,
child: Union[dict, etree.ElementBase]
):
if isinstance(child, dict):
self.add_dict(child)
return
self.element.append(child)
def extend(self, children: List[Union[dict, etree.ElementBase]]):
for child in children:
self.append(child)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/common.py
|
common.py
|
import logging
from typing import Dict, Iterable, List, Optional, Union
from lxml import etree
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml import get_text_content
from sciencebeam_parser.utils.xml_writer import parse_tag_expression
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutPageCoordinates,
LayoutToken
)
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticMixedContentWrapper
)
LOGGER = logging.getLogger(__name__)
XML_NS = 'http://www.w3.org/XML/1998/namespace'
XML_NS_PREFIX = '{%s}' % XML_NS
XML_ID = XML_NS_PREFIX + 'id'
TEI_NS = 'http://www.tei-c.org/ns/1.0'
TEI_NS_PREFIX = '{%s}' % TEI_NS
TEI_NS_MAP = {
'tei': TEI_NS
}
TEI_E = ElementMaker(namespace=TEI_NS, nsmap={
None: TEI_NS
})
def get_or_create_element_at(parent: etree.ElementBase, path: List[str]) -> etree.ElementBase:
if not path:
return parent
child = parent.find(TEI_NS_PREFIX + path[0])
if child is None:
LOGGER.debug('creating element: %s', path[0])
tag_expression = parse_tag_expression(path[0])
child = tag_expression.create_node(
element_maker=TEI_E
)
parent.append(child)
return get_or_create_element_at(child, path[1:])
def tei_xpath(parent: etree.ElementBase, xpath: str) -> List[etree.ElementBase]:
return parent.xpath(xpath, namespaces=TEI_NS_MAP)
def get_tei_xpath_text_content_list(parent: etree.ElementBase, xpath: str) -> List[str]:
return [get_text_content(node) for node in tei_xpath(parent, xpath)]
def get_required_styles(layout_token: LayoutToken) -> List[str]:
required_styles = []
if layout_token.font.is_bold:
required_styles.append('bold')
if layout_token.font.is_italics:
required_styles.append('italic')
if layout_token.font.is_subscript:
required_styles.append('subscript')
if layout_token.font.is_superscript:
required_styles.append('superscript')
return required_styles
def get_element_for_styles(styles: List[str], text: str) -> etree.ElementBase:
if not styles:
return text
child: Optional[etree.ElementBase] = None
for style in reversed(styles):
LOGGER.debug('style: %r, child: %r, text: %r', style, child, text)
if child is not None:
child = TEI_E('hi', {'rend': style}, child)
else:
child = TEI_E('hi', {'rend': style}, text)
return child
def format_coordinates(coordinates: LayoutPageCoordinates) -> str:
return '%d,%.2f,%.2f,%.2f,%.2f' % (
coordinates.page_number,
coordinates.x,
coordinates.y,
coordinates.width,
coordinates.height
)
def format_coordinates_list(coordinates_list: List[LayoutPageCoordinates]) -> str:
return ';'.join((
format_coordinates(coordinates)
for coordinates in coordinates_list
))
def get_default_attributes_for_layout_block(
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Dict[str, str]:
if enable_coordinates:
formatted_coords = format_coordinates_list(
layout_block.get_merged_coordinates_list()
)
if formatted_coords:
return {'coords': formatted_coords}
return {}
def iter_layout_block_tei_children(
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Iterable[Union[str, etree.ElementBase]]:
pending_styles: List[str] = []
pending_text = ''
pending_whitespace = ''
if enable_coordinates:
yield get_default_attributes_for_layout_block(
layout_block=layout_block,
enable_coordinates=enable_coordinates
)
for line in layout_block.lines:
for token in line.tokens:
required_styles = get_required_styles(token)
LOGGER.debug('token: %r, required_styles=%r', token, required_styles)
if required_styles != pending_styles:
if pending_text:
yield get_element_for_styles(
pending_styles,
pending_text
)
pending_text = ''
if pending_whitespace:
yield pending_whitespace
pending_whitespace = ''
pending_styles = required_styles
if pending_whitespace:
pending_text += pending_whitespace
pending_whitespace = ''
pending_text += token.text
pending_whitespace = token.whitespace
if pending_text:
yield get_element_for_styles(
pending_styles,
pending_text
)
def extend_element(
element: etree.ElementBase,
children_or_attributes: Iterable[etree.ElementBase]
):
for item in children_or_attributes:
if isinstance(item, dict):
element.attrib.update(item)
continue
if isinstance(item, str):
try:
previous_element = element[-1]
except IndexError:
previous_element = None
if previous_element is not None:
previous_element.tail = (
(previous_element.tail or '')
+ item
)
else:
element.text = (
(element.text or '')
+ item
)
continue
element.append(item)
def create_tei_note_element(
note_type: str,
layout_block: LayoutBlock
) -> etree.EntityBase:
return TEI_E(
'note',
{'type': note_type},
*iter_layout_block_tei_children(layout_block)
)
def get_default_attributes_for_semantic_content(
semantic_content: SemanticContentWrapper,
**kwargs
) -> Dict[str, str]:
attrib = get_default_attributes_for_layout_block(
semantic_content.merged_block,
**kwargs
)
if isinstance(semantic_content, SemanticMixedContentWrapper):
if semantic_content.content_id:
attrib = {
**attrib,
XML_ID: semantic_content.content_id
}
return attrib
def _create_tei_note_element(
note_type: str,
layout_block: LayoutBlock
) -> etree.EntityBase:
return TEI_E(
'note',
{'type': note_type},
*iter_layout_block_tei_children(layout_block)
)
class TeiElementWrapper:
def __init__(self, element: etree.ElementBase):
self.element = element
def xpath_nodes(self, xpath: str) -> List[etree.ElementBase]:
return tei_xpath(self.element, xpath)
def xpath(self, xpath: str) -> List['TeiElementWrapper']:
return [TeiElementWrapper(node) for node in self.xpath_nodes(xpath)]
def get_xpath_text_content_list(self, xpath: str) -> List[str]:
return get_tei_xpath_text_content_list(self.element, xpath)
def get_notes_text_list(self, note_type: str) -> List[str]:
return get_tei_xpath_text_content_list(
self.element,
'//tei:note[@type="%s"]' % note_type,
)
def add_note(self, note_type: str, layout_block: LayoutBlock):
self.element.append(create_tei_note_element(note_type, layout_block))
class TeiElementBuilder:
def __init__(
self,
element: etree.ElementBase,
):
self.element = element
self.builder_by_path_fragment: Dict[str, 'TeiElementBuilder'] = {}
def get_or_create(
self,
path: Optional[List[str]]
) -> 'TeiElementBuilder':
if not path:
return self
key = path[0]
builder = self.builder_by_path_fragment.get(key)
if not builder:
builder = TeiElementBuilder(TEI_E(key))
self.element.append(builder.element)
self.builder_by_path_fragment[key] = builder
return builder.get_or_create(path[1:])
def add_dict(self, attrib: dict):
_attrib = self.element.attrib
for k, v in attrib.items():
_attrib[k] = v
def append(
self,
child: Union[dict, etree.ElementBase]
):
if isinstance(child, dict):
self.add_dict(child)
return
self.element.append(child)
def extend(self, children: List[Union[dict, etree.ElementBase]]):
for child in children:
self.append(child)
| 0.709523 | 0.154185 |
import logging
from typing import (
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Union
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticAddressField,
SemanticAffiliationAddress,
SemanticAuthor,
SemanticMarker
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
XML_ID
)
from sciencebeam_parser.document.tei.factories import (
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
def _get_tei_raw_affiliation_element_for_semantic_affiliation_address(
semantic_affiliation_address: SemanticAffiliationAddress,
context: TeiElementFactoryContext
) -> etree.ElementBase:
children: List[Union[str, dict, etree.ElementBase]] = []
children.append({'type': 'raw_affiliation'})
pending_whitespace: str = ''
for semantic_content in semantic_affiliation_address:
merged_block = semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
if isinstance(semantic_content, SemanticMarker):
children.append(TEI_E(
'label',
*context.iter_layout_block_tei_children(merged_block, enable_coordinates=False)
))
pending_whitespace = merged_block.whitespace
continue
children.extend(
context.iter_layout_block_tei_children(merged_block, enable_coordinates=False)
)
pending_whitespace = merged_block.whitespace
return TEI_E('note', *children)
def get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation_address: SemanticAffiliationAddress,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_affiliation_address: %s', semantic_affiliation_address)
raw_affiliation = _get_tei_raw_affiliation_element_for_semantic_affiliation_address(
semantic_affiliation_address,
context=context
)
attributes = context.get_default_attributes_for_semantic_content(
semantic_affiliation_address
)
if semantic_affiliation_address.content_id:
attributes = {**attributes, 'key': semantic_affiliation_address.content_id}
if XML_ID in attributes:
del attributes[XML_ID]
children = [
attributes,
raw_affiliation
]
address_semantic_content_list = []
for semantic_content in semantic_affiliation_address:
if isinstance(semantic_content, SemanticAddressField):
address_semantic_content_list.append(semantic_content)
continue
children.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
LOGGER.debug('address_semantic_content_list: %r', address_semantic_content_list)
if address_semantic_content_list:
children.append(TEI_E('address', *[
child
for semantic_content in address_semantic_content_list
for child in context.get_tei_child_elements_for_semantic_content(
semantic_content
)
]))
return TEI_E('affiliation', *children)
def get_tei_author_for_semantic_author_element(
semantic_author: SemanticAuthor,
context: TeiElementFactoryContext,
affiliations_by_marker: Optional[Mapping[str, Sequence[SemanticAffiliationAddress]]] = None
) -> etree.ElementBase:
if affiliations_by_marker is None:
affiliations_by_marker = {}
LOGGER.debug('semantic_author: %s', semantic_author)
pers_name_children = []
for semantic_content in semantic_author:
pers_name_children.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
children = [
TEI_E(
'persName',
context.get_default_attributes_for_semantic_content(semantic_author),
*pers_name_children
)
]
affiliations = []
for marker_text in semantic_author.view_by_type(SemanticMarker).get_text_list():
semantic_affiliations = affiliations_by_marker.get(marker_text)
if not semantic_affiliations:
LOGGER.warning('affiliation not found for marker: %r', marker_text)
continue
for semantic_affiliation in semantic_affiliations:
affiliations.append(get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation,
context=context
))
children.extend(affiliations)
return TEI_E('author', *children)
def get_dummy_tei_author_for_semantic_affiliations_element(
semantic_affiliations: Sequence[SemanticAffiliationAddress],
context: TeiElementFactoryContext
) -> etree.ElementBase:
children = [
TEI_E('note', {'type': 'dummy_author'}, 'Dummy author for orphan affiliations')
]
children.extend([
get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation,
context=context
)
for semantic_affiliation in semantic_affiliations
])
return TEI_E('author', *children)
def get_authors_affiliation_markers(authors: List[SemanticAuthor]) -> Set[str]:
return {
marker
for author in authors
for marker in author.view_by_type(SemanticMarker).get_text_list()
}
def get_orphan_affiliations(
affiliations_by_marker: Dict[str, List[SemanticAffiliationAddress]],
authors: List[SemanticAuthor]
) -> List[SemanticAffiliationAddress]:
used_affiliation_markers = get_authors_affiliation_markers(authors)
return [
affiliation
for marker, affiliations in affiliations_by_marker.items()
if marker not in used_affiliation_markers
for affiliation in affiliations
]
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/author.py
|
author.py
|
import logging
from typing import (
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Union
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticAddressField,
SemanticAffiliationAddress,
SemanticAuthor,
SemanticMarker
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
XML_ID
)
from sciencebeam_parser.document.tei.factories import (
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
def _get_tei_raw_affiliation_element_for_semantic_affiliation_address(
semantic_affiliation_address: SemanticAffiliationAddress,
context: TeiElementFactoryContext
) -> etree.ElementBase:
children: List[Union[str, dict, etree.ElementBase]] = []
children.append({'type': 'raw_affiliation'})
pending_whitespace: str = ''
for semantic_content in semantic_affiliation_address:
merged_block = semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
if isinstance(semantic_content, SemanticMarker):
children.append(TEI_E(
'label',
*context.iter_layout_block_tei_children(merged_block, enable_coordinates=False)
))
pending_whitespace = merged_block.whitespace
continue
children.extend(
context.iter_layout_block_tei_children(merged_block, enable_coordinates=False)
)
pending_whitespace = merged_block.whitespace
return TEI_E('note', *children)
def get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation_address: SemanticAffiliationAddress,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_affiliation_address: %s', semantic_affiliation_address)
raw_affiliation = _get_tei_raw_affiliation_element_for_semantic_affiliation_address(
semantic_affiliation_address,
context=context
)
attributes = context.get_default_attributes_for_semantic_content(
semantic_affiliation_address
)
if semantic_affiliation_address.content_id:
attributes = {**attributes, 'key': semantic_affiliation_address.content_id}
if XML_ID in attributes:
del attributes[XML_ID]
children = [
attributes,
raw_affiliation
]
address_semantic_content_list = []
for semantic_content in semantic_affiliation_address:
if isinstance(semantic_content, SemanticAddressField):
address_semantic_content_list.append(semantic_content)
continue
children.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
LOGGER.debug('address_semantic_content_list: %r', address_semantic_content_list)
if address_semantic_content_list:
children.append(TEI_E('address', *[
child
for semantic_content in address_semantic_content_list
for child in context.get_tei_child_elements_for_semantic_content(
semantic_content
)
]))
return TEI_E('affiliation', *children)
def get_tei_author_for_semantic_author_element(
semantic_author: SemanticAuthor,
context: TeiElementFactoryContext,
affiliations_by_marker: Optional[Mapping[str, Sequence[SemanticAffiliationAddress]]] = None
) -> etree.ElementBase:
if affiliations_by_marker is None:
affiliations_by_marker = {}
LOGGER.debug('semantic_author: %s', semantic_author)
pers_name_children = []
for semantic_content in semantic_author:
pers_name_children.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
children = [
TEI_E(
'persName',
context.get_default_attributes_for_semantic_content(semantic_author),
*pers_name_children
)
]
affiliations = []
for marker_text in semantic_author.view_by_type(SemanticMarker).get_text_list():
semantic_affiliations = affiliations_by_marker.get(marker_text)
if not semantic_affiliations:
LOGGER.warning('affiliation not found for marker: %r', marker_text)
continue
for semantic_affiliation in semantic_affiliations:
affiliations.append(get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation,
context=context
))
children.extend(affiliations)
return TEI_E('author', *children)
def get_dummy_tei_author_for_semantic_affiliations_element(
semantic_affiliations: Sequence[SemanticAffiliationAddress],
context: TeiElementFactoryContext
) -> etree.ElementBase:
children = [
TEI_E('note', {'type': 'dummy_author'}, 'Dummy author for orphan affiliations')
]
children.extend([
get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation,
context=context
)
for semantic_affiliation in semantic_affiliations
])
return TEI_E('author', *children)
def get_authors_affiliation_markers(authors: List[SemanticAuthor]) -> Set[str]:
return {
marker
for author in authors
for marker in author.view_by_type(SemanticMarker).get_text_list()
}
def get_orphan_affiliations(
affiliations_by_marker: Dict[str, List[SemanticAffiliationAddress]],
authors: List[SemanticAuthor]
) -> List[SemanticAffiliationAddress]:
used_affiliation_markers = get_authors_affiliation_markers(authors)
return [
affiliation
for marker, affiliations in affiliations_by_marker.items()
if marker not in used_affiliation_markers
for affiliation in affiliations
]
| 0.640523 | 0.189484 |
import logging
from typing import (
Iterable,
List,
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticFigure,
SemanticHeading,
SemanticLabel,
SemanticParagraph,
SemanticRawEquation,
SemanticSection,
SemanticSectionTypes,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class HeadingTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticHeading)
semantic_heading = semantic_content
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(semantic_heading)
]
pending_whitespace = ''
for child_semantic_content in semantic_heading:
if isinstance(child_semantic_content, SemanticLabel):
children.append({'n': child_semantic_content.get_text()})
continue
layout_block = child_semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
children.extend(context.iter_layout_block_tei_children(
layout_block=layout_block,
enable_coordinates=False
))
pending_whitespace = layout_block.whitespace
return TEI_E('head', *children)
def iter_flat_paragraph_formula(
semantic_paragraph: SemanticParagraph
) -> Iterable[SemanticContentWrapper]:
pending_semantic_content_list: List[SemanticContentWrapper] = []
for semantic_content in semantic_paragraph:
if isinstance(semantic_content, SemanticRawEquation):
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
pending_semantic_content_list = []
yield semantic_content
continue
pending_semantic_content_list.append(semantic_content)
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
class ParagraphTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticParagraph)
semantic_paragraph = semantic_content
result: List[etree.ElementBase] = []
for flat_parent_semantic_content in iter_flat_paragraph_formula(semantic_paragraph):
if not isinstance(flat_parent_semantic_content, SemanticParagraph):
result.extend(context.get_tei_child_elements_for_semantic_content(
flat_parent_semantic_content
))
continue
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(flat_parent_semantic_content)
]
pending_whitespace = ''
for child_semantic_content in flat_parent_semantic_content:
pending_whitespace = context.append_tei_children_list_and_get_whitespace(
children,
child_semantic_content,
pending_whitespace=pending_whitespace
)
result.append(TEI_E('p', *children))
return result
class SectionTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticSection)
semantic_section = semantic_content
tei_section = TeiElementBuilder(TEI_E('div'))
for child_semantic_content in semantic_section:
if isinstance(child_semantic_content, (SemanticFigure, SemanticTable,)):
# rendered at parent level
continue
tei_section.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
tei_section.element.attrib['type'] = 'acknowledgement'
if not list(tei_section.element):
return []
return [tei_section.element]
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/section.py
|
section.py
|
import logging
from typing import (
Iterable,
List,
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticFigure,
SemanticHeading,
SemanticLabel,
SemanticParagraph,
SemanticRawEquation,
SemanticSection,
SemanticSectionTypes,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class HeadingTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticHeading)
semantic_heading = semantic_content
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(semantic_heading)
]
pending_whitespace = ''
for child_semantic_content in semantic_heading:
if isinstance(child_semantic_content, SemanticLabel):
children.append({'n': child_semantic_content.get_text()})
continue
layout_block = child_semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
children.extend(context.iter_layout_block_tei_children(
layout_block=layout_block,
enable_coordinates=False
))
pending_whitespace = layout_block.whitespace
return TEI_E('head', *children)
def iter_flat_paragraph_formula(
semantic_paragraph: SemanticParagraph
) -> Iterable[SemanticContentWrapper]:
pending_semantic_content_list: List[SemanticContentWrapper] = []
for semantic_content in semantic_paragraph:
if isinstance(semantic_content, SemanticRawEquation):
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
pending_semantic_content_list = []
yield semantic_content
continue
pending_semantic_content_list.append(semantic_content)
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
class ParagraphTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticParagraph)
semantic_paragraph = semantic_content
result: List[etree.ElementBase] = []
for flat_parent_semantic_content in iter_flat_paragraph_formula(semantic_paragraph):
if not isinstance(flat_parent_semantic_content, SemanticParagraph):
result.extend(context.get_tei_child_elements_for_semantic_content(
flat_parent_semantic_content
))
continue
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(flat_parent_semantic_content)
]
pending_whitespace = ''
for child_semantic_content in flat_parent_semantic_content:
pending_whitespace = context.append_tei_children_list_and_get_whitespace(
children,
child_semantic_content,
pending_whitespace=pending_whitespace
)
result.append(TEI_E('p', *children))
return result
class SectionTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticSection)
semantic_section = semantic_content
tei_section = TeiElementBuilder(TEI_E('div'))
for child_semantic_content in semantic_section:
if isinstance(child_semantic_content, (SemanticFigure, SemanticTable,)):
# rendered at parent level
continue
tei_section.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
tei_section.element.attrib['type'] = 'acknowledgement'
if not list(tei_section.element):
return []
return [tei_section.element]
| 0.601711 | 0.165627 |
from typing import List, Optional
from lxml import etree
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementWrapper,
extend_element,
get_or_create_element_at,
get_tei_xpath_text_content_list,
iter_layout_block_tei_children,
tei_xpath
)
class TeiAuthor(TeiElementWrapper):
pass
class TeiAffiliation(TeiElementWrapper):
pass
class TeiSectionParagraph(TeiElementWrapper):
def __init__(self, element: etree.ElementBase):
super().__init__(element)
self._pending_whitespace: Optional[str] = None
def add_content(self, layout_block: LayoutBlock):
if self._pending_whitespace:
extend_element(self.element, [self._pending_whitespace])
self._pending_whitespace = layout_block.whitespace
extend_element(
self.element,
iter_layout_block_tei_children(layout_block)
)
class TeiSection(TeiElementWrapper):
def get_title_text(self) -> str:
return '\n'.join(get_tei_xpath_text_content_list(
self.element,
'//tei:head',
))
def get_paragraph_text_list(self) -> List[str]:
return get_tei_xpath_text_content_list(
self.element,
'//tei:p',
)
def add_paragraph(self, paragraph: TeiSectionParagraph):
self.element.append(paragraph.element)
def create_paragraph(self) -> TeiSectionParagraph:
return TeiSectionParagraph(TEI_E('p'))
class TeiDocument(TeiElementWrapper):
def __init__(self, root: Optional[etree.ElementBase] = None):
if root is None:
self.root = TEI_E('TEI')
else:
self.root = root
self._reference_element: Optional[etree.ElementBase] = None
super().__init__(self.root)
def get_or_create_element_at(self, path: List[str]) -> etree.ElementBase:
return get_or_create_element_at(self.root, path)
def set_child_element_at(self, path: List[str], child: etree.ElementBase):
parent = self.get_or_create_element_at(path)
parent.append(child)
def get_title(self) -> str:
return '\n'.join(get_tei_xpath_text_content_list(
self.root,
'//tei:fileDesc/tei:titleStmt/tei:title[@level="a"][@type="main"]',
))
def set_title(self, title: str):
self.set_child_element_at(
['teiHeader', 'fileDesc', 'titleStmt'],
TEI_E('title', title, level="a", type="main")
)
def set_title_layout_block(self, title_block: LayoutBlock):
self.set_child_element_at(
['teiHeader', 'fileDesc', 'titleStmt'],
TEI_E(
'title',
{'level': 'a', 'type': 'main'},
*iter_layout_block_tei_children(title_block)
)
)
def get_abstract(self) -> str:
return '\n'.join(get_tei_xpath_text_content_list(
self.root,
'//tei:abstract/tei:p',
))
def set_abstract(self, abstract: str):
self.set_child_element_at(
['teiHeader', 'profileDesc', 'abstract'],
TEI_E('p', abstract)
)
def set_abstract_layout_block(self, abstract_block: LayoutBlock):
self.set_child_element_at(
['teiHeader', 'profileDesc', 'abstract'],
TEI_E('p', *iter_layout_block_tei_children(abstract_block))
)
def get_body_element(self) -> etree.ElementBase:
return self.get_or_create_element_at(['text', 'body'])
def get_body(self) -> TeiElementWrapper:
return TeiElementWrapper(self.get_body_element())
def get_back_element(self) -> etree.ElementBase:
return self.get_or_create_element_at(['text', 'back'])
def get_back_annex_element(self) -> etree.ElementBase:
return self.get_or_create_element_at(['text', 'back', 'div[@type="annex"]'])
def get_back_annex(self) -> TeiElementWrapper:
return TeiElementWrapper(self.get_back_annex_element())
def get_references_element(self) -> etree.ElementBase:
if self._reference_element is not None:
return self._reference_element
self._reference_element = self.get_or_create_element_at(
['text', 'back', 'div[@type="references"]']
)
return self._reference_element
def get_references(self) -> TeiElementWrapper:
return TeiElementWrapper(self.get_references_element())
def get_body_sections(self) -> List[TeiSection]:
return [
TeiSection(element)
for element in tei_xpath(self.get_body_element(), './tei:div')
]
def add_body_section(self, section: TeiSection):
self.get_body_element().append(section.element)
def add_back_annex_section(self, section: TeiSection):
self.get_back_annex_element().append(section.element)
def add_acknowledgement_section(self, section: TeiSection):
section.element.attrib['type'] = 'acknowledgement'
self.get_back_element().append(section.element)
def create_section(self) -> TeiSection:
return TeiSection(TEI_E('div'))
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/document.py
|
document.py
|
from typing import List, Optional
from lxml import etree
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementWrapper,
extend_element,
get_or_create_element_at,
get_tei_xpath_text_content_list,
iter_layout_block_tei_children,
tei_xpath
)
class TeiAuthor(TeiElementWrapper):
pass
class TeiAffiliation(TeiElementWrapper):
pass
class TeiSectionParagraph(TeiElementWrapper):
def __init__(self, element: etree.ElementBase):
super().__init__(element)
self._pending_whitespace: Optional[str] = None
def add_content(self, layout_block: LayoutBlock):
if self._pending_whitespace:
extend_element(self.element, [self._pending_whitespace])
self._pending_whitespace = layout_block.whitespace
extend_element(
self.element,
iter_layout_block_tei_children(layout_block)
)
class TeiSection(TeiElementWrapper):
def get_title_text(self) -> str:
return '\n'.join(get_tei_xpath_text_content_list(
self.element,
'//tei:head',
))
def get_paragraph_text_list(self) -> List[str]:
return get_tei_xpath_text_content_list(
self.element,
'//tei:p',
)
def add_paragraph(self, paragraph: TeiSectionParagraph):
self.element.append(paragraph.element)
def create_paragraph(self) -> TeiSectionParagraph:
return TeiSectionParagraph(TEI_E('p'))
class TeiDocument(TeiElementWrapper):
def __init__(self, root: Optional[etree.ElementBase] = None):
if root is None:
self.root = TEI_E('TEI')
else:
self.root = root
self._reference_element: Optional[etree.ElementBase] = None
super().__init__(self.root)
def get_or_create_element_at(self, path: List[str]) -> etree.ElementBase:
return get_or_create_element_at(self.root, path)
def set_child_element_at(self, path: List[str], child: etree.ElementBase):
parent = self.get_or_create_element_at(path)
parent.append(child)
def get_title(self) -> str:
return '\n'.join(get_tei_xpath_text_content_list(
self.root,
'//tei:fileDesc/tei:titleStmt/tei:title[@level="a"][@type="main"]',
))
def set_title(self, title: str):
self.set_child_element_at(
['teiHeader', 'fileDesc', 'titleStmt'],
TEI_E('title', title, level="a", type="main")
)
def set_title_layout_block(self, title_block: LayoutBlock):
self.set_child_element_at(
['teiHeader', 'fileDesc', 'titleStmt'],
TEI_E(
'title',
{'level': 'a', 'type': 'main'},
*iter_layout_block_tei_children(title_block)
)
)
def get_abstract(self) -> str:
return '\n'.join(get_tei_xpath_text_content_list(
self.root,
'//tei:abstract/tei:p',
))
def set_abstract(self, abstract: str):
self.set_child_element_at(
['teiHeader', 'profileDesc', 'abstract'],
TEI_E('p', abstract)
)
def set_abstract_layout_block(self, abstract_block: LayoutBlock):
self.set_child_element_at(
['teiHeader', 'profileDesc', 'abstract'],
TEI_E('p', *iter_layout_block_tei_children(abstract_block))
)
def get_body_element(self) -> etree.ElementBase:
return self.get_or_create_element_at(['text', 'body'])
def get_body(self) -> TeiElementWrapper:
return TeiElementWrapper(self.get_body_element())
def get_back_element(self) -> etree.ElementBase:
return self.get_or_create_element_at(['text', 'back'])
def get_back_annex_element(self) -> etree.ElementBase:
return self.get_or_create_element_at(['text', 'back', 'div[@type="annex"]'])
def get_back_annex(self) -> TeiElementWrapper:
return TeiElementWrapper(self.get_back_annex_element())
def get_references_element(self) -> etree.ElementBase:
if self._reference_element is not None:
return self._reference_element
self._reference_element = self.get_or_create_element_at(
['text', 'back', 'div[@type="references"]']
)
return self._reference_element
def get_references(self) -> TeiElementWrapper:
return TeiElementWrapper(self.get_references_element())
def get_body_sections(self) -> List[TeiSection]:
return [
TeiSection(element)
for element in tei_xpath(self.get_body_element(), './tei:div')
]
def add_body_section(self, section: TeiSection):
self.get_body_element().append(section.element)
def add_back_annex_section(self, section: TeiSection):
self.get_back_annex_element().append(section.element)
def add_acknowledgement_section(self, section: TeiSection):
section.element.attrib['type'] = 'acknowledgement'
self.get_back_element().append(section.element)
def create_section(self) -> TeiSection:
return TeiSection(TEI_E('div'))
| 0.783285 | 0.133811 |
import logging
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticAuthor,
SemanticContentWrapper,
SemanticDate,
SemanticLabel,
SemanticRawReference,
SemanticRawReferenceText,
SemanticReference,
SemanticReferenceList,
SemanticTitle
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder,
create_tei_note_element
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
TeiElementFactoryContext
)
from sciencebeam_parser.document.tei.author import get_tei_author_for_semantic_author_element
LOGGER = logging.getLogger(__name__)
def _get_tei_raw_reference_element(
semantic_raw_ref: SemanticRawReference,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_raw_ref: %s', semantic_raw_ref)
children = []
for semantic_content in semantic_raw_ref:
if isinstance(semantic_content, SemanticRawReferenceText):
children.append(create_tei_note_element(
'raw_reference', semantic_content.merged_block
))
continue
children.extend(context.get_tei_child_elements_for_semantic_content(semantic_content))
tei_ref = TEI_E(
'biblStruct',
context.get_default_attributes_for_semantic_content(semantic_raw_ref),
*children
)
return tei_ref
def get_tei_reference_element( # pylint: disable=too-many-branches
semantic_ref: SemanticReference,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_ref: %s', semantic_ref)
tei_ref = TeiElementBuilder(TEI_E(
'biblStruct',
context.get_default_attributes_for_semantic_content(semantic_ref)
))
is_first_date = True
for semantic_content in semantic_ref:
parent_path = context.get_parent_path_for_semantic_content(
semantic_content
)
tei_child_parent = tei_ref.get_or_create(parent_path)
if isinstance(semantic_content, SemanticLabel):
tei_child_parent.append(create_tei_note_element(
'label', semantic_content.merged_block
))
continue
if isinstance(semantic_content, SemanticRawReferenceText):
tei_child_parent.append(create_tei_note_element(
'raw_reference', semantic_content.merged_block
))
continue
if isinstance(semantic_content, SemanticTitle):
tei_child_parent.append(TEI_E(
'title',
{'level': 'a', 'type': 'main'},
*context.iter_layout_block_tei_children(
semantic_content.merged_block
)
))
continue
if isinstance(semantic_content, SemanticAuthor):
tei_child_parent.append(get_tei_author_for_semantic_author_element(
semantic_content,
context=context
))
continue
if isinstance(semantic_content, SemanticDate):
tei_child_parent = tei_ref.get_or_create(['monogr', 'imprint'])
attrib = {}
if is_first_date:
# assume first date is published date (more or less matches GROBID)
attrib['type'] = 'published'
if semantic_content.year:
attrib['when'] = str(semantic_content.year)
tei_child_parent.append(TEI_E(
'date', attrib,
*context.iter_layout_block_tei_children(layout_block=semantic_content.merged_block)
))
is_first_date = False
continue
tei_child_parent.extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
return tei_ref.element
def get_tei_raw_reference_list_element(
semantic_reference_list: SemanticReferenceList,
context: TeiElementFactoryContext
) -> etree.ElementBase:
tei_reference_list = TeiElementBuilder(TEI_E('listBibl'))
for semantic_content in semantic_reference_list:
if isinstance(semantic_content, SemanticRawReference):
tei_reference_list.append(
_get_tei_raw_reference_element(semantic_content, context=context)
)
continue
if isinstance(semantic_content, SemanticReference):
tei_reference_list.append(
get_tei_reference_element(semantic_content, context=context)
)
continue
tei_reference_list.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
return tei_reference_list.element
class SemanticReferenceListTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticReferenceList)
semantic_reference_list = semantic_content
return get_tei_raw_reference_list_element(
semantic_reference_list=semantic_reference_list,
context=context
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/references.py
|
references.py
|
import logging
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticAuthor,
SemanticContentWrapper,
SemanticDate,
SemanticLabel,
SemanticRawReference,
SemanticRawReferenceText,
SemanticReference,
SemanticReferenceList,
SemanticTitle
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder,
create_tei_note_element
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
TeiElementFactoryContext
)
from sciencebeam_parser.document.tei.author import get_tei_author_for_semantic_author_element
LOGGER = logging.getLogger(__name__)
def _get_tei_raw_reference_element(
semantic_raw_ref: SemanticRawReference,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_raw_ref: %s', semantic_raw_ref)
children = []
for semantic_content in semantic_raw_ref:
if isinstance(semantic_content, SemanticRawReferenceText):
children.append(create_tei_note_element(
'raw_reference', semantic_content.merged_block
))
continue
children.extend(context.get_tei_child_elements_for_semantic_content(semantic_content))
tei_ref = TEI_E(
'biblStruct',
context.get_default_attributes_for_semantic_content(semantic_raw_ref),
*children
)
return tei_ref
def get_tei_reference_element( # pylint: disable=too-many-branches
semantic_ref: SemanticReference,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_ref: %s', semantic_ref)
tei_ref = TeiElementBuilder(TEI_E(
'biblStruct',
context.get_default_attributes_for_semantic_content(semantic_ref)
))
is_first_date = True
for semantic_content in semantic_ref:
parent_path = context.get_parent_path_for_semantic_content(
semantic_content
)
tei_child_parent = tei_ref.get_or_create(parent_path)
if isinstance(semantic_content, SemanticLabel):
tei_child_parent.append(create_tei_note_element(
'label', semantic_content.merged_block
))
continue
if isinstance(semantic_content, SemanticRawReferenceText):
tei_child_parent.append(create_tei_note_element(
'raw_reference', semantic_content.merged_block
))
continue
if isinstance(semantic_content, SemanticTitle):
tei_child_parent.append(TEI_E(
'title',
{'level': 'a', 'type': 'main'},
*context.iter_layout_block_tei_children(
semantic_content.merged_block
)
))
continue
if isinstance(semantic_content, SemanticAuthor):
tei_child_parent.append(get_tei_author_for_semantic_author_element(
semantic_content,
context=context
))
continue
if isinstance(semantic_content, SemanticDate):
tei_child_parent = tei_ref.get_or_create(['monogr', 'imprint'])
attrib = {}
if is_first_date:
# assume first date is published date (more or less matches GROBID)
attrib['type'] = 'published'
if semantic_content.year:
attrib['when'] = str(semantic_content.year)
tei_child_parent.append(TEI_E(
'date', attrib,
*context.iter_layout_block_tei_children(layout_block=semantic_content.merged_block)
))
is_first_date = False
continue
tei_child_parent.extend(
context.get_tei_child_elements_for_semantic_content(semantic_content)
)
return tei_ref.element
def get_tei_raw_reference_list_element(
semantic_reference_list: SemanticReferenceList,
context: TeiElementFactoryContext
) -> etree.ElementBase:
tei_reference_list = TeiElementBuilder(TEI_E('listBibl'))
for semantic_content in semantic_reference_list:
if isinstance(semantic_content, SemanticRawReference):
tei_reference_list.append(
_get_tei_raw_reference_element(semantic_content, context=context)
)
continue
if isinstance(semantic_content, SemanticReference):
tei_reference_list.append(
get_tei_reference_element(semantic_content, context=context)
)
continue
tei_reference_list.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
return tei_reference_list.element
class SemanticReferenceListTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticReferenceList)
semantic_reference_list = semantic_content
return get_tei_raw_reference_list_element(
semantic_reference_list=semantic_reference_list,
context=context
)
| 0.382603 | 0.134804 |
import logging
from typing import (
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
Union
)
from lxml import etree
from sciencebeam_parser.utils.xml_writer import TagExpression
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.document.semantic_document import (
SemanticAddressLine,
SemanticAuthor,
SemanticContentWrapper,
SemanticCountry,
SemanticDepartment,
SemanticExternalIdentifier,
SemanticExternalUrl,
SemanticFigure,
SemanticFigureCitation,
SemanticGivenName,
SemanticGraphic,
SemanticHeading,
SemanticInstitution,
SemanticIssue,
SemanticJournal,
SemanticLabel,
SemanticLaboratory,
SemanticLocation,
SemanticMarker,
SemanticMiddleName,
SemanticMixedNote,
SemanticNameSuffix,
SemanticNameTitle,
SemanticNote,
SemanticOptionalValueSemanticMixedContentWrapper,
SemanticPageRange,
SemanticParagraph,
SemanticPostBox,
SemanticPostCode,
SemanticPublisher,
SemanticRawEditors,
SemanticRawEquation,
SemanticReferenceCitation,
SemanticRegion,
SemanticSection,
SemanticSettlement,
SemanticSurname,
SemanticTable,
SemanticTableCitation,
SemanticTextContentWrapper,
SemanticTitle,
SemanticVolume
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
create_tei_note_element,
extend_element,
get_default_attributes_for_semantic_content,
iter_layout_block_tei_children,
parse_tag_expression
)
from sciencebeam_parser.document.tei.factory import (
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
from sciencebeam_parser.document.tei.misc import (
SemanticMixedNoteTeiElementFactory
)
from sciencebeam_parser.document.tei.citation import (
CitationTeiElementFactory
)
from sciencebeam_parser.document.tei.external_identifiers import (
ExternalIdentifierTeiElementFactory
)
from sciencebeam_parser.document.tei.equation import (
RawEquationTeiElementFactory
)
from sciencebeam_parser.document.tei.figure_table import (
FigureTeiElementFactory,
TableTeiElementFactory
)
from sciencebeam_parser.document.tei.graphic import (
GraphicTeiElementFactory
)
from sciencebeam_parser.document.tei.page_range import (
TeiBiblScopeForPageRangeElementFactory
)
from sciencebeam_parser.document.tei.section import (
HeadingTeiElementFactory,
ParagraphTeiElementFactory,
SectionTeiElementFactory
)
LOGGER = logging.getLogger(__name__)
SIMPLE_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS = {
SemanticNameTitle: 'roleName',
SemanticGivenName: 'forename[@type="first"]',
SemanticMiddleName: 'forename[@type="middle"]',
SemanticSurname: 'surname',
SemanticNameSuffix: 'genName',
SemanticRawEditors: 'editor',
SemanticLabel: 'label',
SemanticMarker: 'note[@type="marker"]',
SemanticInstitution: 'orgName[@type="institution"]',
SemanticDepartment: 'orgName[@type="department"]',
SemanticLaboratory: 'orgName[@type="laboratory"]',
SemanticAddressLine: 'addrLine',
SemanticPostCode: 'postCode',
SemanticPostBox: 'postBox',
SemanticSettlement: 'settlement',
SemanticRegion: 'region',
SemanticCountry: 'country',
SemanticJournal: 'title[@level="j"]',
SemanticVolume: 'biblScope[@unit="volume"]',
SemanticIssue: 'biblScope[@unit="issue"]',
SemanticPublisher: 'publisher',
SemanticLocation: 'addrLine',
SemanticExternalUrl: 'ref[@type="url"]'
}
PARSED_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS: Dict[type, TagExpression] = {
key: parse_tag_expression(value)
for key, value in SIMPLE_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS.items()
}
PARENT_PATH_BY_SEMANTIC_CONTENT_CLASS = {
SemanticTitle: ['analytic'],
SemanticAuthor: ['analytic'],
SemanticRawEditors: ['monogr'],
SemanticExternalIdentifier: ['analytic'],
SemanticJournal: ['monogr'],
SemanticVolume: ['monogr', 'imprint'],
SemanticIssue: ['monogr', 'imprint'],
SemanticPageRange: ['monogr', 'imprint'],
SemanticPublisher: ['monogr', 'imprint'],
SemanticLocation: ['monogr', 'meeting', 'address']
}
ELEMENT_FACTORY_BY_SEMANTIC_CONTENT_CLASS: Mapping[
Type[Any],
TeiElementFactory
] = {
SemanticMixedNote: SemanticMixedNoteTeiElementFactory(),
SemanticPageRange: TeiBiblScopeForPageRangeElementFactory(),
SemanticExternalIdentifier: ExternalIdentifierTeiElementFactory(),
SemanticFigure: FigureTeiElementFactory(),
SemanticTable: TableTeiElementFactory(),
SemanticFigureCitation: CitationTeiElementFactory(),
SemanticTableCitation: CitationTeiElementFactory(),
SemanticReferenceCitation: CitationTeiElementFactory(),
SemanticHeading: HeadingTeiElementFactory(),
SemanticParagraph: ParagraphTeiElementFactory(),
SemanticSection: SectionTeiElementFactory(),
SemanticRawEquation: RawEquationTeiElementFactory(),
SemanticGraphic: GraphicTeiElementFactory()
}
def get_tei_note_for_semantic_content(
semantic_content: SemanticContentWrapper
) -> etree.ElementBase:
note_type = 'other'
if isinstance(semantic_content, SemanticNote):
note_type = semantic_content.note_type
else:
note_type = 'other:' + type(semantic_content).__name__
return create_tei_note_element(note_type, semantic_content.merged_block)
class DefaultTeiElementFactoryContext(TeiElementFactoryContext):
def get_default_attributes_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
**kwargs
) -> Dict[str, str]:
return get_default_attributes_for_semantic_content(semantic_content, **kwargs)
def iter_layout_block_tei_children(
self,
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Iterable[Union[str, etree.ElementBase]]:
return iter_layout_block_tei_children(
layout_block,
enable_coordinates=enable_coordinates
)
def get_tei_child_elements_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> List[etree.ElementBase]:
return get_tei_child_elements_for_semantic_content(
semantic_content,
context=self
)
def append_tei_children_list_and_get_whitespace(
self,
children: T_ElementChildrenList,
semantic_content: SemanticContentWrapper,
pending_whitespace: str
) -> str:
return append_tei_children_list_and_get_whitespace(
children,
semantic_content=semantic_content,
pending_whitespace=pending_whitespace,
context=self
)
def get_parent_path_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> Optional[List[str]]:
return PARENT_PATH_BY_SEMANTIC_CONTENT_CLASS.get(
type(semantic_content)
)
DEFAULT_TEI_ELEMENT_FACTORY_CONTEXT = DefaultTeiElementFactoryContext()
def get_tei_child_elements_for_semantic_content(
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
semantic_type = type(semantic_content)
element_factory = ELEMENT_FACTORY_BY_SEMANTIC_CONTENT_CLASS.get(
semantic_type
)
if element_factory is not None:
return element_factory.get_tei_children_for_semantic_content(
semantic_content,
context=context
)
parsed_tag_expression = PARSED_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS.get(
semantic_type
)
if parsed_tag_expression:
if (
isinstance(semantic_content, SemanticOptionalValueSemanticMixedContentWrapper)
and semantic_content.value is not None
):
return [parsed_tag_expression.create_node(
get_default_attributes_for_semantic_content(semantic_content),
semantic_content.value,
element_maker=TEI_E
)]
return [parsed_tag_expression.create_node(
*iter_layout_block_tei_children(semantic_content.merged_block),
element_maker=TEI_E
)]
return [get_tei_note_for_semantic_content(semantic_content)]
def get_tei_children_and_whitespace_for_semantic_content(
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> Tuple[List[Union[dict, str, etree.ElementBase]], str]:
layout_block = semantic_content.merged_block
if isinstance(semantic_content, SemanticTextContentWrapper):
return (
list(iter_layout_block_tei_children(layout_block)),
layout_block.whitespace
)
return (
get_tei_child_elements_for_semantic_content(
semantic_content,
context=context
),
layout_block.whitespace
)
def append_tei_children_and_get_whitespace(
parent: etree.ElementBase,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> str:
children, whitespace = get_tei_children_and_whitespace_for_semantic_content(
semantic_content,
context=context
)
extend_element(parent, children)
return whitespace
def append_tei_children_list_and_get_whitespace(
children: T_ElementChildrenList,
semantic_content: SemanticContentWrapper,
pending_whitespace: str,
context: TeiElementFactoryContext
) -> str:
tail_children, tail_whitespace = get_tei_children_and_whitespace_for_semantic_content(
semantic_content,
context=context
)
if not tail_children:
return pending_whitespace
if pending_whitespace:
children.append(pending_whitespace)
children.extend(tail_children)
return tail_whitespace
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/factories.py
|
factories.py
|
import logging
from typing import (
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
Union
)
from lxml import etree
from sciencebeam_parser.utils.xml_writer import TagExpression
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.document.semantic_document import (
SemanticAddressLine,
SemanticAuthor,
SemanticContentWrapper,
SemanticCountry,
SemanticDepartment,
SemanticExternalIdentifier,
SemanticExternalUrl,
SemanticFigure,
SemanticFigureCitation,
SemanticGivenName,
SemanticGraphic,
SemanticHeading,
SemanticInstitution,
SemanticIssue,
SemanticJournal,
SemanticLabel,
SemanticLaboratory,
SemanticLocation,
SemanticMarker,
SemanticMiddleName,
SemanticMixedNote,
SemanticNameSuffix,
SemanticNameTitle,
SemanticNote,
SemanticOptionalValueSemanticMixedContentWrapper,
SemanticPageRange,
SemanticParagraph,
SemanticPostBox,
SemanticPostCode,
SemanticPublisher,
SemanticRawEditors,
SemanticRawEquation,
SemanticReferenceCitation,
SemanticRegion,
SemanticSection,
SemanticSettlement,
SemanticSurname,
SemanticTable,
SemanticTableCitation,
SemanticTextContentWrapper,
SemanticTitle,
SemanticVolume
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
create_tei_note_element,
extend_element,
get_default_attributes_for_semantic_content,
iter_layout_block_tei_children,
parse_tag_expression
)
from sciencebeam_parser.document.tei.factory import (
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
from sciencebeam_parser.document.tei.misc import (
SemanticMixedNoteTeiElementFactory
)
from sciencebeam_parser.document.tei.citation import (
CitationTeiElementFactory
)
from sciencebeam_parser.document.tei.external_identifiers import (
ExternalIdentifierTeiElementFactory
)
from sciencebeam_parser.document.tei.equation import (
RawEquationTeiElementFactory
)
from sciencebeam_parser.document.tei.figure_table import (
FigureTeiElementFactory,
TableTeiElementFactory
)
from sciencebeam_parser.document.tei.graphic import (
GraphicTeiElementFactory
)
from sciencebeam_parser.document.tei.page_range import (
TeiBiblScopeForPageRangeElementFactory
)
from sciencebeam_parser.document.tei.section import (
HeadingTeiElementFactory,
ParagraphTeiElementFactory,
SectionTeiElementFactory
)
LOGGER = logging.getLogger(__name__)
SIMPLE_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS = {
SemanticNameTitle: 'roleName',
SemanticGivenName: 'forename[@type="first"]',
SemanticMiddleName: 'forename[@type="middle"]',
SemanticSurname: 'surname',
SemanticNameSuffix: 'genName',
SemanticRawEditors: 'editor',
SemanticLabel: 'label',
SemanticMarker: 'note[@type="marker"]',
SemanticInstitution: 'orgName[@type="institution"]',
SemanticDepartment: 'orgName[@type="department"]',
SemanticLaboratory: 'orgName[@type="laboratory"]',
SemanticAddressLine: 'addrLine',
SemanticPostCode: 'postCode',
SemanticPostBox: 'postBox',
SemanticSettlement: 'settlement',
SemanticRegion: 'region',
SemanticCountry: 'country',
SemanticJournal: 'title[@level="j"]',
SemanticVolume: 'biblScope[@unit="volume"]',
SemanticIssue: 'biblScope[@unit="issue"]',
SemanticPublisher: 'publisher',
SemanticLocation: 'addrLine',
SemanticExternalUrl: 'ref[@type="url"]'
}
PARSED_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS: Dict[type, TagExpression] = {
key: parse_tag_expression(value)
for key, value in SIMPLE_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS.items()
}
PARENT_PATH_BY_SEMANTIC_CONTENT_CLASS = {
SemanticTitle: ['analytic'],
SemanticAuthor: ['analytic'],
SemanticRawEditors: ['monogr'],
SemanticExternalIdentifier: ['analytic'],
SemanticJournal: ['monogr'],
SemanticVolume: ['monogr', 'imprint'],
SemanticIssue: ['monogr', 'imprint'],
SemanticPageRange: ['monogr', 'imprint'],
SemanticPublisher: ['monogr', 'imprint'],
SemanticLocation: ['monogr', 'meeting', 'address']
}
ELEMENT_FACTORY_BY_SEMANTIC_CONTENT_CLASS: Mapping[
Type[Any],
TeiElementFactory
] = {
SemanticMixedNote: SemanticMixedNoteTeiElementFactory(),
SemanticPageRange: TeiBiblScopeForPageRangeElementFactory(),
SemanticExternalIdentifier: ExternalIdentifierTeiElementFactory(),
SemanticFigure: FigureTeiElementFactory(),
SemanticTable: TableTeiElementFactory(),
SemanticFigureCitation: CitationTeiElementFactory(),
SemanticTableCitation: CitationTeiElementFactory(),
SemanticReferenceCitation: CitationTeiElementFactory(),
SemanticHeading: HeadingTeiElementFactory(),
SemanticParagraph: ParagraphTeiElementFactory(),
SemanticSection: SectionTeiElementFactory(),
SemanticRawEquation: RawEquationTeiElementFactory(),
SemanticGraphic: GraphicTeiElementFactory()
}
def get_tei_note_for_semantic_content(
semantic_content: SemanticContentWrapper
) -> etree.ElementBase:
note_type = 'other'
if isinstance(semantic_content, SemanticNote):
note_type = semantic_content.note_type
else:
note_type = 'other:' + type(semantic_content).__name__
return create_tei_note_element(note_type, semantic_content.merged_block)
class DefaultTeiElementFactoryContext(TeiElementFactoryContext):
def get_default_attributes_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
**kwargs
) -> Dict[str, str]:
return get_default_attributes_for_semantic_content(semantic_content, **kwargs)
def iter_layout_block_tei_children(
self,
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Iterable[Union[str, etree.ElementBase]]:
return iter_layout_block_tei_children(
layout_block,
enable_coordinates=enable_coordinates
)
def get_tei_child_elements_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> List[etree.ElementBase]:
return get_tei_child_elements_for_semantic_content(
semantic_content,
context=self
)
def append_tei_children_list_and_get_whitespace(
self,
children: T_ElementChildrenList,
semantic_content: SemanticContentWrapper,
pending_whitespace: str
) -> str:
return append_tei_children_list_and_get_whitespace(
children,
semantic_content=semantic_content,
pending_whitespace=pending_whitespace,
context=self
)
def get_parent_path_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> Optional[List[str]]:
return PARENT_PATH_BY_SEMANTIC_CONTENT_CLASS.get(
type(semantic_content)
)
DEFAULT_TEI_ELEMENT_FACTORY_CONTEXT = DefaultTeiElementFactoryContext()
def get_tei_child_elements_for_semantic_content(
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
semantic_type = type(semantic_content)
element_factory = ELEMENT_FACTORY_BY_SEMANTIC_CONTENT_CLASS.get(
semantic_type
)
if element_factory is not None:
return element_factory.get_tei_children_for_semantic_content(
semantic_content,
context=context
)
parsed_tag_expression = PARSED_TAG_EXPRESSION_BY_SEMANTIC_CONTENT_CLASS.get(
semantic_type
)
if parsed_tag_expression:
if (
isinstance(semantic_content, SemanticOptionalValueSemanticMixedContentWrapper)
and semantic_content.value is not None
):
return [parsed_tag_expression.create_node(
get_default_attributes_for_semantic_content(semantic_content),
semantic_content.value,
element_maker=TEI_E
)]
return [parsed_tag_expression.create_node(
*iter_layout_block_tei_children(semantic_content.merged_block),
element_maker=TEI_E
)]
return [get_tei_note_for_semantic_content(semantic_content)]
def get_tei_children_and_whitespace_for_semantic_content(
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> Tuple[List[Union[dict, str, etree.ElementBase]], str]:
layout_block = semantic_content.merged_block
if isinstance(semantic_content, SemanticTextContentWrapper):
return (
list(iter_layout_block_tei_children(layout_block)),
layout_block.whitespace
)
return (
get_tei_child_elements_for_semantic_content(
semantic_content,
context=context
),
layout_block.whitespace
)
def append_tei_children_and_get_whitespace(
parent: etree.ElementBase,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> str:
children, whitespace = get_tei_children_and_whitespace_for_semantic_content(
semantic_content,
context=context
)
extend_element(parent, children)
return whitespace
def append_tei_children_list_and_get_whitespace(
children: T_ElementChildrenList,
semantic_content: SemanticContentWrapper,
pending_whitespace: str,
context: TeiElementFactoryContext
) -> str:
tail_children, tail_whitespace = get_tei_children_and_whitespace_for_semantic_content(
semantic_content,
context=context
)
if not tail_children:
return pending_whitespace
if pending_whitespace:
children.append(pending_whitespace)
children.extend(tail_children)
return tail_whitespace
| 0.640299 | 0.131368 |
from abc import ABC, abstractmethod
import logging
from typing import (
Dict,
Iterable,
List,
Optional,
Union
)
from lxml import etree
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper
)
LOGGER = logging.getLogger(__name__)
T_ElementChildrenListItem = Union[dict, str, etree.ElementBase]
T_ElementChildrenList = List[T_ElementChildrenListItem]
class TeiElementFactoryContext(ABC):
@abstractmethod
def get_default_attributes_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
**kwargs
) -> Dict[str, str]:
pass
@abstractmethod
def iter_layout_block_tei_children(
self,
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Iterable[Union[str, etree.ElementBase]]:
pass
@abstractmethod
def get_tei_child_elements_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> List[etree.ElementBase]:
pass
@abstractmethod
def append_tei_children_list_and_get_whitespace(
self,
children: T_ElementChildrenList,
semantic_content: SemanticContentWrapper,
pending_whitespace: str
) -> str:
pass
@abstractmethod
def get_parent_path_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> Optional[List[str]]:
pass
class TeiElementFactory(ABC):
@abstractmethod
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> T_ElementChildrenList:
pass
class SingleElementTeiElementFactory(TeiElementFactory):
@abstractmethod
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
pass
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> T_ElementChildrenList:
return [self.get_tei_element_for_semantic_content(
semantic_content, context=context
)]
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/factory.py
|
factory.py
|
from abc import ABC, abstractmethod
import logging
from typing import (
Dict,
Iterable,
List,
Optional,
Union
)
from lxml import etree
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper
)
LOGGER = logging.getLogger(__name__)
T_ElementChildrenListItem = Union[dict, str, etree.ElementBase]
T_ElementChildrenList = List[T_ElementChildrenListItem]
class TeiElementFactoryContext(ABC):
@abstractmethod
def get_default_attributes_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
**kwargs
) -> Dict[str, str]:
pass
@abstractmethod
def iter_layout_block_tei_children(
self,
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Iterable[Union[str, etree.ElementBase]]:
pass
@abstractmethod
def get_tei_child_elements_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> List[etree.ElementBase]:
pass
@abstractmethod
def append_tei_children_list_and_get_whitespace(
self,
children: T_ElementChildrenList,
semantic_content: SemanticContentWrapper,
pending_whitespace: str
) -> str:
pass
@abstractmethod
def get_parent_path_for_semantic_content(
self,
semantic_content: SemanticContentWrapper
) -> Optional[List[str]]:
pass
class TeiElementFactory(ABC):
@abstractmethod
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> T_ElementChildrenList:
pass
class SingleElementTeiElementFactory(TeiElementFactory):
@abstractmethod
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
pass
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> T_ElementChildrenList:
return [self.get_tei_element_for_semantic_content(
semantic_content, context=context
)]
| 0.755817 | 0.108401 |
import logging
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticCaption,
SemanticContentWrapper,
SemanticFigure,
SemanticLabel,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class FigureTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticFigure)
semantic_figure = semantic_content
children = [context.get_default_attributes_for_semantic_content(semantic_figure)]
for child_semantic_content in semantic_figure:
if isinstance(child_semantic_content, SemanticLabel):
layout_block = child_semantic_content.merged_block
children.append(TEI_E(
'head', *context.iter_layout_block_tei_children(layout_block)
))
children.append(TEI_E(
'label', *context.iter_layout_block_tei_children(layout_block)
))
continue
if isinstance(child_semantic_content, SemanticCaption):
children.append(TEI_E(
'figDesc', *context.iter_layout_block_tei_children(
child_semantic_content.merged_block
)
))
continue
children.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
return TEI_E('figure', *children)
class TableTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticTable)
semantic_table = semantic_content
children = [context.get_default_attributes_for_semantic_content(semantic_table)]
for child_semantic_content in semantic_table:
if isinstance(child_semantic_content, SemanticLabel):
layout_block = child_semantic_content.merged_block
children.append(TEI_E(
'head', *context.iter_layout_block_tei_children(layout_block)
))
children.append(TEI_E(
'label', *context.iter_layout_block_tei_children(layout_block)
))
continue
if isinstance(child_semantic_content, SemanticCaption):
children.append(TEI_E(
'figDesc', *context.iter_layout_block_tei_children(
child_semantic_content.merged_block
)
))
continue
children.extend(
context.get_tei_child_elements_for_semantic_content(child_semantic_content)
)
return TEI_E('figure', {'type': 'table'}, *children)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/figure_table.py
|
figure_table.py
|
import logging
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticCaption,
SemanticContentWrapper,
SemanticFigure,
SemanticLabel,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class FigureTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticFigure)
semantic_figure = semantic_content
children = [context.get_default_attributes_for_semantic_content(semantic_figure)]
for child_semantic_content in semantic_figure:
if isinstance(child_semantic_content, SemanticLabel):
layout_block = child_semantic_content.merged_block
children.append(TEI_E(
'head', *context.iter_layout_block_tei_children(layout_block)
))
children.append(TEI_E(
'label', *context.iter_layout_block_tei_children(layout_block)
))
continue
if isinstance(child_semantic_content, SemanticCaption):
children.append(TEI_E(
'figDesc', *context.iter_layout_block_tei_children(
child_semantic_content.merged_block
)
))
continue
children.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
return TEI_E('figure', *children)
class TableTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticTable)
semantic_table = semantic_content
children = [context.get_default_attributes_for_semantic_content(semantic_table)]
for child_semantic_content in semantic_table:
if isinstance(child_semantic_content, SemanticLabel):
layout_block = child_semantic_content.merged_block
children.append(TEI_E(
'head', *context.iter_layout_block_tei_children(layout_block)
))
children.append(TEI_E(
'label', *context.iter_layout_block_tei_children(layout_block)
))
continue
if isinstance(child_semantic_content, SemanticCaption):
children.append(TEI_E(
'figDesc', *context.iter_layout_block_tei_children(
child_semantic_content.merged_block
)
))
continue
children.extend(
context.get_tei_child_elements_for_semantic_content(child_semantic_content)
)
return TEI_E('figure', {'type': 'table'}, *children)
| 0.44071 | 0.141133 |
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Callable, Sequence
import PIL.Image
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
class ComputerVisionModelInstance(ABC):
@abstractmethod
def get_bounding_box(self) -> BoundingBox:
pass
@abstractmethod
def get_type_name(self) -> str:
pass
@dataclass
class SimpleComputerVisionModelInstance(ComputerVisionModelInstance):
bounding_box: BoundingBox
type_name: str
def get_bounding_box(self) -> BoundingBox:
return self.bounding_box
def get_type_name(self) -> str:
return self.type_name
class ComputerVisionModelResult(ABC):
@abstractmethod
def get_instances_by_type_names(
self,
type_names: Sequence[str]
) -> Sequence[ComputerVisionModelInstance]:
pass
def get_instances_by_type_name(
self,
type_name: str
) -> Sequence[ComputerVisionModelInstance]:
return self.get_instances_by_type_names([type_name])
class ComputerVisionModel(ABC, Preloadable):
@abstractmethod
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
pass
T_ComputerVisionModelFactory = Callable[[], ComputerVisionModel]
class LazyComputerVisionModel(ComputerVisionModel):
def __init__(self, factory: T_ComputerVisionModelFactory) -> None:
super().__init__()
self._lazy_model = LazyLoaded[ComputerVisionModel](factory)
def __repr__(self) -> str:
return '%s(factory=%r, loaded=%r)' % (
type(self).__name__, self._lazy_model.factory, self._lazy_model.is_loaded
)
@property
def cv_model(self) -> ComputerVisionModel:
return self._lazy_model.get()
def preload(self):
self.cv_model.preload()
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
return self.cv_model.predict_single(image)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/cv_models/cv_model.py
|
cv_model.py
|
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Callable, Sequence
import PIL.Image
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
class ComputerVisionModelInstance(ABC):
@abstractmethod
def get_bounding_box(self) -> BoundingBox:
pass
@abstractmethod
def get_type_name(self) -> str:
pass
@dataclass
class SimpleComputerVisionModelInstance(ComputerVisionModelInstance):
bounding_box: BoundingBox
type_name: str
def get_bounding_box(self) -> BoundingBox:
return self.bounding_box
def get_type_name(self) -> str:
return self.type_name
class ComputerVisionModelResult(ABC):
@abstractmethod
def get_instances_by_type_names(
self,
type_names: Sequence[str]
) -> Sequence[ComputerVisionModelInstance]:
pass
def get_instances_by_type_name(
self,
type_name: str
) -> Sequence[ComputerVisionModelInstance]:
return self.get_instances_by_type_names([type_name])
class ComputerVisionModel(ABC, Preloadable):
@abstractmethod
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
pass
T_ComputerVisionModelFactory = Callable[[], ComputerVisionModel]
class LazyComputerVisionModel(ComputerVisionModel):
def __init__(self, factory: T_ComputerVisionModelFactory) -> None:
super().__init__()
self._lazy_model = LazyLoaded[ComputerVisionModel](factory)
def __repr__(self) -> str:
return '%s(factory=%r, loaded=%r)' % (
type(self).__name__, self._lazy_model.factory, self._lazy_model.is_loaded
)
@property
def cv_model(self) -> ComputerVisionModel:
return self._lazy_model.get()
def preload(self):
self.cv_model.preload()
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
return self.cv_model.predict_single(image)
| 0.917185 | 0.199152 |
import logging
from typing import List, Sequence, Tuple
import PIL.Image
from layoutparser.elements.layout import Layout
from layoutparser.models.auto_layoutmodel import AutoLayoutModel
from layoutparser.models.base_layoutmodel import BaseLayoutModel
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.cv_models.cv_model import (
ComputerVisionModel,
ComputerVisionModelInstance,
ComputerVisionModelResult,
SimpleComputerVisionModelInstance
)
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
DEFAULT_MODEL_PATH = 'lp://efficientdet/PubLayNet'
DEFAULT_SCORE_THRESHOLD = 0.0
def load_model(model_path: str) -> BaseLayoutModel:
LOGGER.info('loading model: %r', model_path)
return AutoLayoutModel(model_path)
def get_bounding_box_for_layout_parser_coordinates(
coordinates: Tuple[float, float, float, float]
) -> BoundingBox:
x1, y1, x2, y2 = coordinates
return BoundingBox(x=x1, y=y1, width=x2 - x1, height=y2 - y1)
def is_bounding_box_overlapping_with_any_bounding_boxes(
bounding_box: BoundingBox,
other_bounding_boxes: Sequence[BoundingBox],
max_overlap_ratio: float = 0.1
) -> bool:
bounding_box_area = bounding_box.area
for other_bounding_box in other_bounding_boxes:
intersection_bounding_box = bounding_box.intersection(
other_bounding_box
)
if not intersection_bounding_box:
continue
if intersection_bounding_box.area / bounding_box_area >= max_overlap_ratio:
return True
return False
class LayoutParserComputerVisionModelResult(ComputerVisionModelResult):
def __init__(
self,
layout: Layout,
score_threshold: float,
avoid_overlapping: bool,
max_overlap_ratio: float = 0.1
):
super().__init__()
self.layout = layout
self.score_threshold = score_threshold
self.avoid_overlapping = avoid_overlapping
self.max_overlap_ratio = max_overlap_ratio
LOGGER.debug('layout: %r', layout)
def get_instances_by_type_names(
self,
type_names: Sequence[str]
) -> Sequence[ComputerVisionModelInstance]:
instances = [
SimpleComputerVisionModelInstance(
bounding_box=get_bounding_box_for_layout_parser_coordinates(block.coordinates),
type_name=block.type
)
for block in self.layout
if (
block.type in type_names
and block.score >= self.score_threshold
)
]
instances = [
instance
for instance in instances
if instance.get_bounding_box()
]
if self.avoid_overlapping:
_instances = instances
instances = []
prev_bounding_boxes: List[BoundingBox] = []
for instance in _instances:
bounding_box = instance.get_bounding_box()
if is_bounding_box_overlapping_with_any_bounding_boxes(
bounding_box,
prev_bounding_boxes,
max_overlap_ratio=self.max_overlap_ratio
):
LOGGER.debug(
'bounding box overlapping with prev: %r ~ %r',
bounding_box, prev_bounding_boxes
)
continue
instances.append(instance)
prev_bounding_boxes.append(bounding_box)
return instances
class LayoutParserComputerVisionModel(ComputerVisionModel):
def __init__(
self,
config: dict,
model_path: str = DEFAULT_MODEL_PATH,
):
super().__init__()
self.score_threshold = float(config.get('score_threshold', DEFAULT_SCORE_THRESHOLD))
self.avoid_overlapping = bool(config.get('avoid_overlapping', True))
self.model_path = model_path
self._lazy_model = LazyLoaded[BaseLayoutModel](self._load_model)
def _load_model(self) -> BaseLayoutModel:
model = load_model(self.model_path)
LOGGER.info('loaded layout model: %r', self.model_path)
return model
@property
def layout_model(self) -> BaseLayoutModel:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
return LayoutParserComputerVisionModelResult(
self.layout_model.detect(image),
score_threshold=self.score_threshold,
avoid_overlapping=self.avoid_overlapping
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/cv_models/layout_parser_cv_model.py
|
layout_parser_cv_model.py
|
import logging
from typing import List, Sequence, Tuple
import PIL.Image
from layoutparser.elements.layout import Layout
from layoutparser.models.auto_layoutmodel import AutoLayoutModel
from layoutparser.models.base_layoutmodel import BaseLayoutModel
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.cv_models.cv_model import (
ComputerVisionModel,
ComputerVisionModelInstance,
ComputerVisionModelResult,
SimpleComputerVisionModelInstance
)
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
DEFAULT_MODEL_PATH = 'lp://efficientdet/PubLayNet'
DEFAULT_SCORE_THRESHOLD = 0.0
def load_model(model_path: str) -> BaseLayoutModel:
LOGGER.info('loading model: %r', model_path)
return AutoLayoutModel(model_path)
def get_bounding_box_for_layout_parser_coordinates(
coordinates: Tuple[float, float, float, float]
) -> BoundingBox:
x1, y1, x2, y2 = coordinates
return BoundingBox(x=x1, y=y1, width=x2 - x1, height=y2 - y1)
def is_bounding_box_overlapping_with_any_bounding_boxes(
bounding_box: BoundingBox,
other_bounding_boxes: Sequence[BoundingBox],
max_overlap_ratio: float = 0.1
) -> bool:
bounding_box_area = bounding_box.area
for other_bounding_box in other_bounding_boxes:
intersection_bounding_box = bounding_box.intersection(
other_bounding_box
)
if not intersection_bounding_box:
continue
if intersection_bounding_box.area / bounding_box_area >= max_overlap_ratio:
return True
return False
class LayoutParserComputerVisionModelResult(ComputerVisionModelResult):
def __init__(
self,
layout: Layout,
score_threshold: float,
avoid_overlapping: bool,
max_overlap_ratio: float = 0.1
):
super().__init__()
self.layout = layout
self.score_threshold = score_threshold
self.avoid_overlapping = avoid_overlapping
self.max_overlap_ratio = max_overlap_ratio
LOGGER.debug('layout: %r', layout)
def get_instances_by_type_names(
self,
type_names: Sequence[str]
) -> Sequence[ComputerVisionModelInstance]:
instances = [
SimpleComputerVisionModelInstance(
bounding_box=get_bounding_box_for_layout_parser_coordinates(block.coordinates),
type_name=block.type
)
for block in self.layout
if (
block.type in type_names
and block.score >= self.score_threshold
)
]
instances = [
instance
for instance in instances
if instance.get_bounding_box()
]
if self.avoid_overlapping:
_instances = instances
instances = []
prev_bounding_boxes: List[BoundingBox] = []
for instance in _instances:
bounding_box = instance.get_bounding_box()
if is_bounding_box_overlapping_with_any_bounding_boxes(
bounding_box,
prev_bounding_boxes,
max_overlap_ratio=self.max_overlap_ratio
):
LOGGER.debug(
'bounding box overlapping with prev: %r ~ %r',
bounding_box, prev_bounding_boxes
)
continue
instances.append(instance)
prev_bounding_boxes.append(bounding_box)
return instances
class LayoutParserComputerVisionModel(ComputerVisionModel):
def __init__(
self,
config: dict,
model_path: str = DEFAULT_MODEL_PATH,
):
super().__init__()
self.score_threshold = float(config.get('score_threshold', DEFAULT_SCORE_THRESHOLD))
self.avoid_overlapping = bool(config.get('avoid_overlapping', True))
self.model_path = model_path
self._lazy_model = LazyLoaded[BaseLayoutModel](self._load_model)
def _load_model(self) -> BaseLayoutModel:
model = load_model(self.model_path)
LOGGER.info('loaded layout model: %r', self.model_path)
return model
@property
def layout_model(self) -> BaseLayoutModel:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
return LayoutParserComputerVisionModelResult(
self.layout_model.detect(image),
score_threshold=self.score_threshold,
avoid_overlapping=self.avoid_overlapping
)
| 0.854354 | 0.19787 |
import logging
import math
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Iterable, List, NamedTuple, Optional
from lxml import etree
from sciencebeam_parser.lookup import TextLookUp
from sciencebeam_parser.document.layout_document import LayoutDocument, LayoutToken, LayoutLine
from sciencebeam_parser.external.pdfalto.parser import parse_alto_root
LOGGER = logging.getLogger(__name__)
class AppFeaturesContext(NamedTuple):
country_lookup: Optional[TextLookUp] = None
first_name_lookup: Optional[TextLookUp] = None
last_name_lookup: Optional[TextLookUp] = None
DEFAULT_APP_FEATURES_CONTEXT = AppFeaturesContext()
class DocumentFeaturesContext(NamedTuple):
app_features_context: AppFeaturesContext = DEFAULT_APP_FEATURES_CONTEXT
DEFAULT_DOCUMENT_FEATURES_CONTEXT = DocumentFeaturesContext()
class NewDocumentMarker:
pass
NEW_DOCUMENT_MARKER = NewDocumentMarker()
class LabeledLayoutToken(NamedTuple):
label: str
layout_token: LayoutToken
@dataclass
class LayoutModelData:
data_line: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
@property
def label_token_text(self):
return self.data_line.split(' ', maxsplit=1)[0]
@dataclass
class LabeledLayoutModelData(LayoutModelData):
label: Optional[str] = None
@staticmethod
def from_model_data(
model_data: LayoutModelData,
label: Optional[str] = None
) -> 'LabeledLayoutModelData':
return LabeledLayoutModelData(
data_line=model_data.data_line,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token,
label=label
)
class ModelDataGenerator(ABC):
def iter_data_lines_for_xml_root(
self,
root: etree.ElementBase
) -> Iterable[str]:
return self.iter_data_lines_for_layout_document(
parse_alto_root(root)
)
@abstractmethod
def iter_model_data_for_layout_document(
self,
layout_document: LayoutDocument
) -> Iterable[LayoutModelData]:
pass
def iter_data_lines_for_layout_document( # pylint: disable=too-many-locals
self,
layout_document: LayoutDocument
) -> Iterable[str]:
return (
model_data.data_line
for model_data in self.iter_model_data_for_layout_document(
layout_document
)
)
def iter_data_lines_for_layout_documents( # pylint: disable=too-many-locals
self,
layout_documents: Iterable[LayoutDocument]
) -> Iterable[str]:
for index, layout_document in enumerate(layout_documents):
LOGGER.debug('generating data lines for document: index=%d', index)
if index > 0:
LOGGER.debug('adding document separator')
yield from ['\n']
yield from (
model_data.data_line
for model_data in self.iter_model_data_for_layout_document(
layout_document
)
)
def feature_linear_scaling_int(pos: int, total: int, bin_count: int) -> int:
"""
Given an integer value between 0 and total, discretized into nbBins following a linear scale
Adapted from:
grobid-core/src/main/java/org/grobid/core/features/FeatureFactory.java
"""
if pos >= total:
return bin_count
if pos <= 0:
return 0
return math.floor((pos / total) * bin_count)
def get_token_font_status(previous_token: Optional[LayoutToken], current_token: LayoutToken):
if not previous_token:
return 'NEWFONT'
return (
'SAMEFONT' if current_token.font.font_family == previous_token.font.font_family
else 'NEWFONT'
)
def get_token_font_size_feature(
previous_token: Optional[LayoutToken],
current_token: LayoutToken
):
if not previous_token:
return 'HIGHERFONT'
previous_font_size = previous_token.font.font_size
current_font_size = current_token.font.font_size
if not previous_font_size or not current_font_size:
return 'HIGHERFONT'
if previous_font_size < current_font_size:
return 'HIGHERFONT'
if previous_font_size > current_font_size:
return 'LOWERFONT'
return 'SAMEFONTSIZE'
def get_digit_feature(text: str) -> str:
if text.isdigit():
return 'ALLDIGIT'
for c in text:
if c.isdigit():
return 'CONTAINSDIGITS'
return 'NODIGIT'
def get_capitalisation_feature(text: str) -> str:
if text and all(not c.islower() for c in text):
return 'ALLCAP'
if text and text[0].isupper():
return 'INITCAP'
return 'NOCAPS'
class PunctuationProfileValues:
OPENBRACKET = 'OPENBRACKET'
ENDBRACKET = 'ENDBRACKET'
DOT = 'DOT'
COMMA = 'COMMA'
HYPHEN = 'HYPHEN'
QUOTE = 'QUOTE'
PUNCT = 'PUNCT'
NOPUNCT = 'NOPUNCT'
PUNCTUATION_PROFILE_MAP = {
'(': PunctuationProfileValues.OPENBRACKET,
'[': PunctuationProfileValues.OPENBRACKET,
')': PunctuationProfileValues.ENDBRACKET,
']': PunctuationProfileValues.ENDBRACKET,
'.': PunctuationProfileValues.DOT,
',': PunctuationProfileValues.COMMA,
'-': PunctuationProfileValues.HYPHEN,
'–': PunctuationProfileValues.HYPHEN,
'"': PunctuationProfileValues.QUOTE,
'\'': PunctuationProfileValues.QUOTE,
'`': PunctuationProfileValues.QUOTE,
'’': PunctuationProfileValues.QUOTE
}
IS_PUNCT_PATTERN = r"^[\,\:;\?\.]+$"
PUNCTUATION_PROFILE_CHARACTERS = (
"(([ •*,:;?.!/))-−–‐«»„\"“”‘’'`$#@]*\u2666\u2665\u2663\u2660\u00A0"
)
def get_line_status_with_lineend_for_single_token(token_index: int, token_count: int) -> str:
return (
'LINEEND' if token_index == token_count - 1
else (
'LINESTART' if token_index == 0
else 'LINEIN'
)
)
def get_line_status_with_linestart_for_single_token(
token_index: int, token_count: int
) -> str:
return (
'LINESTART' if token_index == 0
else (
'LINEEND' if token_index == token_count - 1
else 'LINEIN'
)
)
def get_block_status_with_blockend_for_single_token(
line_index: int,
line_count: int,
line_status: str
) -> str:
return (
'BLOCKEND'
if line_index == line_count - 1 and line_status == 'LINEEND'
else (
'BLOCKSTART'
if line_index == 0 and line_status == 'LINESTART'
else 'BLOCKIN'
)
)
def get_block_status_with_blockstart_for_single_token(
line_index: int,
line_count: int,
line_status: str
) -> str:
return (
'BLOCKSTART'
if line_index == 0 and line_status == 'LINESTART'
else (
'BLOCKEND'
if line_index == line_count - 1 and line_status == 'LINEEND'
else 'BLOCKIN'
)
)
class RelativeFontSizeFeature:
def __init__(self, layout_tokens: Iterable[LayoutToken]):
font_sizes = [
layout_token.font.font_size
for layout_token in layout_tokens
if layout_token.font.font_size
]
LOGGER.debug('font_sizes (%d): %r', len(font_sizes), font_sizes)
self.largest_font_size = max(font_sizes) if font_sizes else 0.0
self.smallest_font_size = min(font_sizes) if font_sizes else 0.0
self.mean_font_size = sum(font_sizes) / len(font_sizes) if font_sizes else 0.0
LOGGER.debug('relative font size: %r', self)
def __repr__(self) -> str:
return (
'%s(largest_font_size=%f, smallest_font_size=%f, mean_font_size=%f)'
) % (
type(self).__name__,
self.largest_font_size,
self.smallest_font_size,
self.mean_font_size
)
def is_largest_font_size(self, layout_token: LayoutToken):
return layout_token.font.font_size == self.largest_font_size
def is_smallest_font_size(self, layout_token: LayoutToken):
return layout_token.font.font_size == self.smallest_font_size
def is_larger_than_average_font_size(self, layout_token: LayoutToken):
if not layout_token.font.font_size:
return False
return layout_token.font.font_size > self.mean_font_size
class LineIndentationStatusFeature:
def __init__(self):
self._line_start_x = None
self._is_new_line = True
self._is_indented = False
def on_new_block(self):
pass
def on_new_line(self):
self._is_new_line = True
def get_is_indented_and_update(self, layout_token: LayoutToken):
if self._is_new_line and layout_token.coordinates and layout_token.text:
previous_line_start_x = self._line_start_x
self._line_start_x = layout_token.coordinates.x
character_width = layout_token.coordinates.width / len(layout_token.text)
if previous_line_start_x is not None:
if self._line_start_x - previous_line_start_x > character_width:
self._is_indented = True
if previous_line_start_x - self._line_start_x > character_width:
self._is_indented = False
self._is_new_line = False
return self._is_indented
def get_punctuation_type_feature(text: str) -> str:
result = PUNCTUATION_PROFILE_MAP.get(text)
if not result and re.match(IS_PUNCT_PATTERN, text):
return PunctuationProfileValues.PUNCT
if not result:
return PunctuationProfileValues.NOPUNCT
return result
def get_raw_punctuation_profile_feature(text: str) -> str:
if not text:
return ''
return ''.join((
c
for c in text
if not c.isspace() and c in PUNCTUATION_PROFILE_CHARACTERS
))
def get_punctuation_profile_feature_for_raw_punctuation_profile_feature(
raw_punctuation_profile: str
) -> str:
if not raw_punctuation_profile:
return 'no'
return raw_punctuation_profile
def get_punctuation_profile_length_for_raw_punctuation_profile_feature(
raw_punctuation_profile: str,
max_length: Optional[int] = None
) -> str:
if max_length:
return str(min(10, len(raw_punctuation_profile)))
return str(len(raw_punctuation_profile))
def get_char_shape_feature(ch: str) -> str:
if ch.isdigit():
return 'd'
if ch.isalpha():
if ch.isupper():
return 'X'
return 'x'
return ch
def get_word_shape_feature(text: str) -> str:
shape = [
get_char_shape_feature(ch)
for ch in text
]
prefix = shape[:1]
middle = shape[1:-2]
suffix = shape[1:][-2:]
middle_without_consequitive_duplicates = middle[:1].copy()
for ch in middle[1:]:
if ch != middle_without_consequitive_duplicates[-1]:
middle_without_consequitive_duplicates.append(ch)
return ''.join(prefix + middle_without_consequitive_duplicates + suffix)
def get_str_bool_feature_value(value: Optional[bool]) -> str:
return '1' if value else '0'
class CommonLayoutTokenFeatures(ABC): # pylint: disable=too-many-public-methods
def __init__(self, layout_token: LayoutToken) -> None:
self.layout_token = layout_token
self.token_text = layout_token.text or ''
def get_lower_token_text(self) -> str:
return self.token_text.lower()
def get_prefix(self, n: int) -> str:
return self.token_text[:n]
def get_suffix(self, n: int) -> str:
return self.token_text[-n:]
def get_str_is_bold(self) -> str:
return get_str_bool_feature_value(self.layout_token.font.is_bold)
def get_str_is_italic(self) -> str:
return get_str_bool_feature_value(self.layout_token.font.is_italics)
def get_str_is_superscript(self) -> str:
return get_str_bool_feature_value(self.layout_token.font.is_superscript)
def get_str_is_single_char(self) -> str:
return get_str_bool_feature_value(len(self.token_text) == 1)
def get_digit_status_using_containsdigits(self) -> str:
return get_digit_feature(self.token_text)
def get_digit_status_using_containdigit(self) -> str:
digit_status = get_digit_feature(self.token_text)
if digit_status == 'CONTAINSDIGITS':
digit_status = 'CONTAINDIGIT'
return digit_status
def get_capitalisation_status_using_allcap(self) -> str:
if self.get_digit_status_using_containsdigits() == 'ALLDIGIT':
return 'NOCAPS'
return get_capitalisation_feature(self.token_text)
def get_capitalisation_status_using_allcaps(self) -> str:
capitalisation_status = self.get_capitalisation_status_using_allcap()
if capitalisation_status == 'ALLCAP':
return 'ALLCAPS'
return capitalisation_status
def get_punctuation_type_feature(self) -> str:
return get_punctuation_type_feature(self.token_text)
def get_word_shape_feature(self) -> str:
return get_word_shape_feature(self.token_text)
def get_dummy_str_is_proper_name(self) -> str:
return '0'
def get_dummy_str_is_common_name(self) -> str:
return '0'
def get_dummy_str_is_first_name(self) -> str:
return '0'
def get_dummy_str_is_last_name(self) -> str:
return '0'
def get_dummy_str_is_known_title(self) -> str:
return '0'
def get_dummy_str_is_known_suffix(self) -> str:
return '0'
def get_dummy_str_is_location_name(self) -> str:
return '0'
def get_dummy_str_is_country_name(self) -> str:
return '0'
def get_dummy_str_is_year(self) -> str:
return '0'
def get_dummy_str_is_month(self) -> str:
return '0'
def get_dummy_str_is_email(self) -> str:
return '0'
def get_dummy_str_is_http(self) -> str:
return '0'
def get_dummy_str_is_known_collaboration(self) -> str:
return '0'
def get_dummy_str_is_known_journal_title(self) -> str:
return '0'
def get_dummy_str_is_known_conference_title(self) -> str:
return '0'
def get_dummy_str_is_known_publisher(self) -> str:
return '0'
def get_dummy_str_is_known_identifier(self) -> str:
return '0'
def get_dummy_label(self) -> str:
return '0'
_LINESCALE = 10
class ContextAwareLayoutTokenFeatures( # pylint: disable=too-many-public-methods
CommonLayoutTokenFeatures
):
def __init__( # pylint: disable=too-many-locals
self,
layout_token: LayoutToken,
layout_line: LayoutLine,
document_features_context: DocumentFeaturesContext,
previous_layout_token: Optional[LayoutToken] = None,
token_index: int = 0,
token_count: int = 0,
document_token_index: int = 0,
document_token_count: int = 0,
line_index: int = 0,
line_count: int = 0,
concatenated_line_tokens_text: str = '',
max_concatenated_line_tokens_length: int = 0,
line_token_position: int = 0,
relative_font_size_feature: Optional[RelativeFontSizeFeature] = None,
line_indentation_status_feature: Optional[LineIndentationStatusFeature] = None
) -> None:
super().__init__(layout_token)
self.layout_line = layout_line
self.previous_layout_token = previous_layout_token
self.document_features_context = document_features_context
self.token_index = token_index
self.token_count = token_count
self.document_token_index = document_token_index
self.document_token_count = document_token_count
self.line_index = line_index
self.line_count = line_count
self.concatenated_line_tokens_text = concatenated_line_tokens_text
self.max_concatenated_line_tokens_length = max_concatenated_line_tokens_length
self.line_token_position = line_token_position
self.relative_font_size_feature = relative_font_size_feature
self.line_indentation_status_feature = line_indentation_status_feature
def get_layout_model_data(self, features: List[str]) -> LayoutModelData:
return LayoutModelData(
layout_line=self.layout_line,
layout_token=self.layout_token,
data_line=' '.join(features)
)
def get_line_status_with_lineend_for_single_token(self) -> str:
return get_line_status_with_lineend_for_single_token(
token_index=self.token_index, token_count=self.token_count
)
def get_line_status_with_linestart_for_single_token(self) -> str:
return get_line_status_with_linestart_for_single_token(
token_index=self.token_index, token_count=self.token_count
)
def get_block_status_with_blockend_for_single_token(self) -> str:
return get_block_status_with_blockend_for_single_token(
line_index=self.line_index,
line_count=self.line_count,
line_status=self.get_line_status_with_lineend_for_single_token()
)
def get_block_status_with_blockstart_for_single_token(self) -> str:
return get_block_status_with_blockstart_for_single_token(
line_index=self.line_index,
line_count=self.line_count,
line_status=self.get_line_status_with_linestart_for_single_token()
)
def get_dummy_page_status(self) -> str:
return 'PAGEIN'
def get_is_indented_and_update(self) -> bool:
assert self.line_indentation_status_feature
return self.line_indentation_status_feature.get_is_indented_and_update(
self.layout_token
)
def get_alignment_status(self) -> str:
indented = self.get_is_indented_and_update()
return 'LINEINDENT' if indented else 'ALIGNEDLEFT'
def get_token_font_status(self) -> str:
return get_token_font_status(self.previous_layout_token, self.layout_token)
def get_token_font_size_feature(self) -> str:
return get_token_font_size_feature(self.previous_layout_token, self.layout_token)
def get_str_is_largest_font_size(self) -> str:
assert self.relative_font_size_feature
return get_str_bool_feature_value(
self.relative_font_size_feature.is_largest_font_size(
self.layout_token
)
)
def get_dummy_str_is_smallest_font_size(self) -> str:
return '0'
def get_str_is_smallest_font_size(self) -> str:
assert self.relative_font_size_feature
return get_str_bool_feature_value(
self.relative_font_size_feature.is_smallest_font_size(
self.layout_token
)
)
def get_dummy_str_is_larger_than_average_font_size(self, value: str = '0') -> str:
return value
def get_str_is_larger_than_average_font_size(self) -> str:
assert self.relative_font_size_feature
return get_str_bool_feature_value(
self.relative_font_size_feature.is_larger_than_average_font_size(
self.layout_token
)
)
def get_raw_line_punctuation_profile(self) -> str:
return get_raw_punctuation_profile_feature(self.concatenated_line_tokens_text)
def get_line_punctuation_profile(self) -> str:
return get_punctuation_profile_feature_for_raw_punctuation_profile_feature(
self.get_raw_line_punctuation_profile()
)
def get_line_punctuation_profile_length_feature(self) -> str:
return get_punctuation_profile_length_for_raw_punctuation_profile_feature(
self.get_raw_line_punctuation_profile()
)
def get_truncated_line_punctuation_profile_length_feature(self) -> str:
return get_punctuation_profile_length_for_raw_punctuation_profile_feature(
self.get_raw_line_punctuation_profile(),
max_length=10
)
def get_str_line_token_relative_position(self) -> str:
return str(feature_linear_scaling_int(
self.line_token_position,
len(self.concatenated_line_tokens_text),
_LINESCALE
))
def get_str_line_relative_length(self) -> str:
return str(feature_linear_scaling_int(
len(self.concatenated_line_tokens_text),
self.max_concatenated_line_tokens_length,
_LINESCALE
))
def get_str_sentence_token_relative_position(self) -> str:
return str(feature_linear_scaling_int(
# the document is currently the sentence view
self.document_token_index,
self.document_token_count,
12
))
def _get_str_lookup(self, lookup: Optional[TextLookUp]) -> str:
if not lookup:
return get_str_bool_feature_value(False)
return get_str_bool_feature_value(
lookup.contains(self.token_text)
)
def get_str_is_country(self) -> str:
return self._get_str_lookup(
self.document_features_context.app_features_context.country_lookup
)
def get_str_is_first_name(self) -> str:
return self._get_str_lookup(
self.document_features_context.app_features_context.first_name_lookup
)
def get_str_is_last_name(self) -> str:
return self._get_str_lookup(
self.document_features_context.app_features_context.last_name_lookup
)
def get_dummy_str_relative_document_position(self):
# position within whole document
return '0'
def get_dummy_str_relative_page_position(self):
return '0'
def get_dummy_str_is_bitmap_around(self) -> str:
return '0'
def get_dummy_str_is_vector_around(self) -> str:
return '0'
def get_dummy_callout_type(self) -> str:
return 'UNKNOWN' # one of UNKNOWN, NUMBER, AUTHOR
def get_dummy_str_is_callout_known(self) -> str:
return '0'
class ContextAwareLayoutTokenModelDataGenerator(ModelDataGenerator):
def __init__(
self,
document_features_context: DocumentFeaturesContext
):
self.document_features_context = document_features_context
@abstractmethod
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
pass
def iter_model_data_for_layout_document( # pylint: disable=too-many-locals
self,
layout_document: LayoutDocument
) -> Iterable[LayoutModelData]:
relative_font_size_feature = RelativeFontSizeFeature(
layout_document.iter_all_tokens()
)
line_indentation_status_feature = LineIndentationStatusFeature()
previous_layout_token: Optional[LayoutToken] = None
concatenated_line_tokens_length_by_line_id = {
id(line): sum((len(token.text) for token in line.tokens))
for block in layout_document.iter_all_blocks()
for line in block.lines
}
if not concatenated_line_tokens_length_by_line_id:
LOGGER.debug('empty layout document')
return
max_concatenated_line_tokens_length = max(
concatenated_line_tokens_length_by_line_id.values()
)
document_token_count = sum((
1
for _ in layout_document.iter_all_tokens()
))
document_token_index = 0
for block in layout_document.iter_all_blocks():
block_lines = block.lines
line_count = len(block_lines)
for line_index, line in enumerate(block_lines):
line_indentation_status_feature.on_new_line()
line_tokens = line.tokens
token_count = len(line_tokens)
concatenated_line_tokens_text = ''.join([
token.text for token in line_tokens
])
line_token_position = 0
for token_index, token in enumerate(line_tokens):
yield from self.iter_model_data_for_context_layout_token_features(
ContextAwareLayoutTokenFeatures(
token,
layout_line=line,
previous_layout_token=previous_layout_token,
document_features_context=self.document_features_context,
token_index=token_index,
token_count=token_count,
document_token_index=document_token_index,
document_token_count=document_token_count,
line_index=line_index,
line_count=line_count,
concatenated_line_tokens_text=concatenated_line_tokens_text,
max_concatenated_line_tokens_length=max_concatenated_line_tokens_length,
line_token_position=line_token_position,
relative_font_size_feature=relative_font_size_feature,
line_indentation_status_feature=line_indentation_status_feature
)
)
previous_layout_token = token
line_token_position += len(token.text)
document_token_index += 1
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/data.py
|
data.py
|
import logging
import math
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Iterable, List, NamedTuple, Optional
from lxml import etree
from sciencebeam_parser.lookup import TextLookUp
from sciencebeam_parser.document.layout_document import LayoutDocument, LayoutToken, LayoutLine
from sciencebeam_parser.external.pdfalto.parser import parse_alto_root
LOGGER = logging.getLogger(__name__)
class AppFeaturesContext(NamedTuple):
country_lookup: Optional[TextLookUp] = None
first_name_lookup: Optional[TextLookUp] = None
last_name_lookup: Optional[TextLookUp] = None
DEFAULT_APP_FEATURES_CONTEXT = AppFeaturesContext()
class DocumentFeaturesContext(NamedTuple):
app_features_context: AppFeaturesContext = DEFAULT_APP_FEATURES_CONTEXT
DEFAULT_DOCUMENT_FEATURES_CONTEXT = DocumentFeaturesContext()
class NewDocumentMarker:
pass
NEW_DOCUMENT_MARKER = NewDocumentMarker()
class LabeledLayoutToken(NamedTuple):
label: str
layout_token: LayoutToken
@dataclass
class LayoutModelData:
data_line: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
@property
def label_token_text(self):
return self.data_line.split(' ', maxsplit=1)[0]
@dataclass
class LabeledLayoutModelData(LayoutModelData):
label: Optional[str] = None
@staticmethod
def from_model_data(
model_data: LayoutModelData,
label: Optional[str] = None
) -> 'LabeledLayoutModelData':
return LabeledLayoutModelData(
data_line=model_data.data_line,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token,
label=label
)
class ModelDataGenerator(ABC):
def iter_data_lines_for_xml_root(
self,
root: etree.ElementBase
) -> Iterable[str]:
return self.iter_data_lines_for_layout_document(
parse_alto_root(root)
)
@abstractmethod
def iter_model_data_for_layout_document(
self,
layout_document: LayoutDocument
) -> Iterable[LayoutModelData]:
pass
def iter_data_lines_for_layout_document( # pylint: disable=too-many-locals
self,
layout_document: LayoutDocument
) -> Iterable[str]:
return (
model_data.data_line
for model_data in self.iter_model_data_for_layout_document(
layout_document
)
)
def iter_data_lines_for_layout_documents( # pylint: disable=too-many-locals
self,
layout_documents: Iterable[LayoutDocument]
) -> Iterable[str]:
for index, layout_document in enumerate(layout_documents):
LOGGER.debug('generating data lines for document: index=%d', index)
if index > 0:
LOGGER.debug('adding document separator')
yield from ['\n']
yield from (
model_data.data_line
for model_data in self.iter_model_data_for_layout_document(
layout_document
)
)
def feature_linear_scaling_int(pos: int, total: int, bin_count: int) -> int:
"""
Given an integer value between 0 and total, discretized into nbBins following a linear scale
Adapted from:
grobid-core/src/main/java/org/grobid/core/features/FeatureFactory.java
"""
if pos >= total:
return bin_count
if pos <= 0:
return 0
return math.floor((pos / total) * bin_count)
def get_token_font_status(previous_token: Optional[LayoutToken], current_token: LayoutToken):
if not previous_token:
return 'NEWFONT'
return (
'SAMEFONT' if current_token.font.font_family == previous_token.font.font_family
else 'NEWFONT'
)
def get_token_font_size_feature(
previous_token: Optional[LayoutToken],
current_token: LayoutToken
):
if not previous_token:
return 'HIGHERFONT'
previous_font_size = previous_token.font.font_size
current_font_size = current_token.font.font_size
if not previous_font_size or not current_font_size:
return 'HIGHERFONT'
if previous_font_size < current_font_size:
return 'HIGHERFONT'
if previous_font_size > current_font_size:
return 'LOWERFONT'
return 'SAMEFONTSIZE'
def get_digit_feature(text: str) -> str:
if text.isdigit():
return 'ALLDIGIT'
for c in text:
if c.isdigit():
return 'CONTAINSDIGITS'
return 'NODIGIT'
def get_capitalisation_feature(text: str) -> str:
if text and all(not c.islower() for c in text):
return 'ALLCAP'
if text and text[0].isupper():
return 'INITCAP'
return 'NOCAPS'
class PunctuationProfileValues:
OPENBRACKET = 'OPENBRACKET'
ENDBRACKET = 'ENDBRACKET'
DOT = 'DOT'
COMMA = 'COMMA'
HYPHEN = 'HYPHEN'
QUOTE = 'QUOTE'
PUNCT = 'PUNCT'
NOPUNCT = 'NOPUNCT'
PUNCTUATION_PROFILE_MAP = {
'(': PunctuationProfileValues.OPENBRACKET,
'[': PunctuationProfileValues.OPENBRACKET,
')': PunctuationProfileValues.ENDBRACKET,
']': PunctuationProfileValues.ENDBRACKET,
'.': PunctuationProfileValues.DOT,
',': PunctuationProfileValues.COMMA,
'-': PunctuationProfileValues.HYPHEN,
'–': PunctuationProfileValues.HYPHEN,
'"': PunctuationProfileValues.QUOTE,
'\'': PunctuationProfileValues.QUOTE,
'`': PunctuationProfileValues.QUOTE,
'’': PunctuationProfileValues.QUOTE
}
IS_PUNCT_PATTERN = r"^[\,\:;\?\.]+$"
PUNCTUATION_PROFILE_CHARACTERS = (
"(([ •*,:;?.!/))-−–‐«»„\"“”‘’'`$#@]*\u2666\u2665\u2663\u2660\u00A0"
)
def get_line_status_with_lineend_for_single_token(token_index: int, token_count: int) -> str:
return (
'LINEEND' if token_index == token_count - 1
else (
'LINESTART' if token_index == 0
else 'LINEIN'
)
)
def get_line_status_with_linestart_for_single_token(
token_index: int, token_count: int
) -> str:
return (
'LINESTART' if token_index == 0
else (
'LINEEND' if token_index == token_count - 1
else 'LINEIN'
)
)
def get_block_status_with_blockend_for_single_token(
line_index: int,
line_count: int,
line_status: str
) -> str:
return (
'BLOCKEND'
if line_index == line_count - 1 and line_status == 'LINEEND'
else (
'BLOCKSTART'
if line_index == 0 and line_status == 'LINESTART'
else 'BLOCKIN'
)
)
def get_block_status_with_blockstart_for_single_token(
line_index: int,
line_count: int,
line_status: str
) -> str:
return (
'BLOCKSTART'
if line_index == 0 and line_status == 'LINESTART'
else (
'BLOCKEND'
if line_index == line_count - 1 and line_status == 'LINEEND'
else 'BLOCKIN'
)
)
class RelativeFontSizeFeature:
def __init__(self, layout_tokens: Iterable[LayoutToken]):
font_sizes = [
layout_token.font.font_size
for layout_token in layout_tokens
if layout_token.font.font_size
]
LOGGER.debug('font_sizes (%d): %r', len(font_sizes), font_sizes)
self.largest_font_size = max(font_sizes) if font_sizes else 0.0
self.smallest_font_size = min(font_sizes) if font_sizes else 0.0
self.mean_font_size = sum(font_sizes) / len(font_sizes) if font_sizes else 0.0
LOGGER.debug('relative font size: %r', self)
def __repr__(self) -> str:
return (
'%s(largest_font_size=%f, smallest_font_size=%f, mean_font_size=%f)'
) % (
type(self).__name__,
self.largest_font_size,
self.smallest_font_size,
self.mean_font_size
)
def is_largest_font_size(self, layout_token: LayoutToken):
return layout_token.font.font_size == self.largest_font_size
def is_smallest_font_size(self, layout_token: LayoutToken):
return layout_token.font.font_size == self.smallest_font_size
def is_larger_than_average_font_size(self, layout_token: LayoutToken):
if not layout_token.font.font_size:
return False
return layout_token.font.font_size > self.mean_font_size
class LineIndentationStatusFeature:
def __init__(self):
self._line_start_x = None
self._is_new_line = True
self._is_indented = False
def on_new_block(self):
pass
def on_new_line(self):
self._is_new_line = True
def get_is_indented_and_update(self, layout_token: LayoutToken):
if self._is_new_line and layout_token.coordinates and layout_token.text:
previous_line_start_x = self._line_start_x
self._line_start_x = layout_token.coordinates.x
character_width = layout_token.coordinates.width / len(layout_token.text)
if previous_line_start_x is not None:
if self._line_start_x - previous_line_start_x > character_width:
self._is_indented = True
if previous_line_start_x - self._line_start_x > character_width:
self._is_indented = False
self._is_new_line = False
return self._is_indented
def get_punctuation_type_feature(text: str) -> str:
result = PUNCTUATION_PROFILE_MAP.get(text)
if not result and re.match(IS_PUNCT_PATTERN, text):
return PunctuationProfileValues.PUNCT
if not result:
return PunctuationProfileValues.NOPUNCT
return result
def get_raw_punctuation_profile_feature(text: str) -> str:
if not text:
return ''
return ''.join((
c
for c in text
if not c.isspace() and c in PUNCTUATION_PROFILE_CHARACTERS
))
def get_punctuation_profile_feature_for_raw_punctuation_profile_feature(
raw_punctuation_profile: str
) -> str:
if not raw_punctuation_profile:
return 'no'
return raw_punctuation_profile
def get_punctuation_profile_length_for_raw_punctuation_profile_feature(
raw_punctuation_profile: str,
max_length: Optional[int] = None
) -> str:
if max_length:
return str(min(10, len(raw_punctuation_profile)))
return str(len(raw_punctuation_profile))
def get_char_shape_feature(ch: str) -> str:
if ch.isdigit():
return 'd'
if ch.isalpha():
if ch.isupper():
return 'X'
return 'x'
return ch
def get_word_shape_feature(text: str) -> str:
shape = [
get_char_shape_feature(ch)
for ch in text
]
prefix = shape[:1]
middle = shape[1:-2]
suffix = shape[1:][-2:]
middle_without_consequitive_duplicates = middle[:1].copy()
for ch in middle[1:]:
if ch != middle_without_consequitive_duplicates[-1]:
middle_without_consequitive_duplicates.append(ch)
return ''.join(prefix + middle_without_consequitive_duplicates + suffix)
def get_str_bool_feature_value(value: Optional[bool]) -> str:
return '1' if value else '0'
class CommonLayoutTokenFeatures(ABC): # pylint: disable=too-many-public-methods
def __init__(self, layout_token: LayoutToken) -> None:
self.layout_token = layout_token
self.token_text = layout_token.text or ''
def get_lower_token_text(self) -> str:
return self.token_text.lower()
def get_prefix(self, n: int) -> str:
return self.token_text[:n]
def get_suffix(self, n: int) -> str:
return self.token_text[-n:]
def get_str_is_bold(self) -> str:
return get_str_bool_feature_value(self.layout_token.font.is_bold)
def get_str_is_italic(self) -> str:
return get_str_bool_feature_value(self.layout_token.font.is_italics)
def get_str_is_superscript(self) -> str:
return get_str_bool_feature_value(self.layout_token.font.is_superscript)
def get_str_is_single_char(self) -> str:
return get_str_bool_feature_value(len(self.token_text) == 1)
def get_digit_status_using_containsdigits(self) -> str:
return get_digit_feature(self.token_text)
def get_digit_status_using_containdigit(self) -> str:
digit_status = get_digit_feature(self.token_text)
if digit_status == 'CONTAINSDIGITS':
digit_status = 'CONTAINDIGIT'
return digit_status
def get_capitalisation_status_using_allcap(self) -> str:
if self.get_digit_status_using_containsdigits() == 'ALLDIGIT':
return 'NOCAPS'
return get_capitalisation_feature(self.token_text)
def get_capitalisation_status_using_allcaps(self) -> str:
capitalisation_status = self.get_capitalisation_status_using_allcap()
if capitalisation_status == 'ALLCAP':
return 'ALLCAPS'
return capitalisation_status
def get_punctuation_type_feature(self) -> str:
return get_punctuation_type_feature(self.token_text)
def get_word_shape_feature(self) -> str:
return get_word_shape_feature(self.token_text)
def get_dummy_str_is_proper_name(self) -> str:
return '0'
def get_dummy_str_is_common_name(self) -> str:
return '0'
def get_dummy_str_is_first_name(self) -> str:
return '0'
def get_dummy_str_is_last_name(self) -> str:
return '0'
def get_dummy_str_is_known_title(self) -> str:
return '0'
def get_dummy_str_is_known_suffix(self) -> str:
return '0'
def get_dummy_str_is_location_name(self) -> str:
return '0'
def get_dummy_str_is_country_name(self) -> str:
return '0'
def get_dummy_str_is_year(self) -> str:
return '0'
def get_dummy_str_is_month(self) -> str:
return '0'
def get_dummy_str_is_email(self) -> str:
return '0'
def get_dummy_str_is_http(self) -> str:
return '0'
def get_dummy_str_is_known_collaboration(self) -> str:
return '0'
def get_dummy_str_is_known_journal_title(self) -> str:
return '0'
def get_dummy_str_is_known_conference_title(self) -> str:
return '0'
def get_dummy_str_is_known_publisher(self) -> str:
return '0'
def get_dummy_str_is_known_identifier(self) -> str:
return '0'
def get_dummy_label(self) -> str:
return '0'
_LINESCALE = 10
class ContextAwareLayoutTokenFeatures( # pylint: disable=too-many-public-methods
CommonLayoutTokenFeatures
):
def __init__( # pylint: disable=too-many-locals
self,
layout_token: LayoutToken,
layout_line: LayoutLine,
document_features_context: DocumentFeaturesContext,
previous_layout_token: Optional[LayoutToken] = None,
token_index: int = 0,
token_count: int = 0,
document_token_index: int = 0,
document_token_count: int = 0,
line_index: int = 0,
line_count: int = 0,
concatenated_line_tokens_text: str = '',
max_concatenated_line_tokens_length: int = 0,
line_token_position: int = 0,
relative_font_size_feature: Optional[RelativeFontSizeFeature] = None,
line_indentation_status_feature: Optional[LineIndentationStatusFeature] = None
) -> None:
super().__init__(layout_token)
self.layout_line = layout_line
self.previous_layout_token = previous_layout_token
self.document_features_context = document_features_context
self.token_index = token_index
self.token_count = token_count
self.document_token_index = document_token_index
self.document_token_count = document_token_count
self.line_index = line_index
self.line_count = line_count
self.concatenated_line_tokens_text = concatenated_line_tokens_text
self.max_concatenated_line_tokens_length = max_concatenated_line_tokens_length
self.line_token_position = line_token_position
self.relative_font_size_feature = relative_font_size_feature
self.line_indentation_status_feature = line_indentation_status_feature
def get_layout_model_data(self, features: List[str]) -> LayoutModelData:
return LayoutModelData(
layout_line=self.layout_line,
layout_token=self.layout_token,
data_line=' '.join(features)
)
def get_line_status_with_lineend_for_single_token(self) -> str:
return get_line_status_with_lineend_for_single_token(
token_index=self.token_index, token_count=self.token_count
)
def get_line_status_with_linestart_for_single_token(self) -> str:
return get_line_status_with_linestart_for_single_token(
token_index=self.token_index, token_count=self.token_count
)
def get_block_status_with_blockend_for_single_token(self) -> str:
return get_block_status_with_blockend_for_single_token(
line_index=self.line_index,
line_count=self.line_count,
line_status=self.get_line_status_with_lineend_for_single_token()
)
def get_block_status_with_blockstart_for_single_token(self) -> str:
return get_block_status_with_blockstart_for_single_token(
line_index=self.line_index,
line_count=self.line_count,
line_status=self.get_line_status_with_linestart_for_single_token()
)
def get_dummy_page_status(self) -> str:
return 'PAGEIN'
def get_is_indented_and_update(self) -> bool:
assert self.line_indentation_status_feature
return self.line_indentation_status_feature.get_is_indented_and_update(
self.layout_token
)
def get_alignment_status(self) -> str:
indented = self.get_is_indented_and_update()
return 'LINEINDENT' if indented else 'ALIGNEDLEFT'
def get_token_font_status(self) -> str:
return get_token_font_status(self.previous_layout_token, self.layout_token)
def get_token_font_size_feature(self) -> str:
return get_token_font_size_feature(self.previous_layout_token, self.layout_token)
def get_str_is_largest_font_size(self) -> str:
assert self.relative_font_size_feature
return get_str_bool_feature_value(
self.relative_font_size_feature.is_largest_font_size(
self.layout_token
)
)
def get_dummy_str_is_smallest_font_size(self) -> str:
return '0'
def get_str_is_smallest_font_size(self) -> str:
assert self.relative_font_size_feature
return get_str_bool_feature_value(
self.relative_font_size_feature.is_smallest_font_size(
self.layout_token
)
)
def get_dummy_str_is_larger_than_average_font_size(self, value: str = '0') -> str:
return value
def get_str_is_larger_than_average_font_size(self) -> str:
assert self.relative_font_size_feature
return get_str_bool_feature_value(
self.relative_font_size_feature.is_larger_than_average_font_size(
self.layout_token
)
)
def get_raw_line_punctuation_profile(self) -> str:
return get_raw_punctuation_profile_feature(self.concatenated_line_tokens_text)
def get_line_punctuation_profile(self) -> str:
return get_punctuation_profile_feature_for_raw_punctuation_profile_feature(
self.get_raw_line_punctuation_profile()
)
def get_line_punctuation_profile_length_feature(self) -> str:
return get_punctuation_profile_length_for_raw_punctuation_profile_feature(
self.get_raw_line_punctuation_profile()
)
def get_truncated_line_punctuation_profile_length_feature(self) -> str:
return get_punctuation_profile_length_for_raw_punctuation_profile_feature(
self.get_raw_line_punctuation_profile(),
max_length=10
)
def get_str_line_token_relative_position(self) -> str:
return str(feature_linear_scaling_int(
self.line_token_position,
len(self.concatenated_line_tokens_text),
_LINESCALE
))
def get_str_line_relative_length(self) -> str:
return str(feature_linear_scaling_int(
len(self.concatenated_line_tokens_text),
self.max_concatenated_line_tokens_length,
_LINESCALE
))
def get_str_sentence_token_relative_position(self) -> str:
return str(feature_linear_scaling_int(
# the document is currently the sentence view
self.document_token_index,
self.document_token_count,
12
))
def _get_str_lookup(self, lookup: Optional[TextLookUp]) -> str:
if not lookup:
return get_str_bool_feature_value(False)
return get_str_bool_feature_value(
lookup.contains(self.token_text)
)
def get_str_is_country(self) -> str:
return self._get_str_lookup(
self.document_features_context.app_features_context.country_lookup
)
def get_str_is_first_name(self) -> str:
return self._get_str_lookup(
self.document_features_context.app_features_context.first_name_lookup
)
def get_str_is_last_name(self) -> str:
return self._get_str_lookup(
self.document_features_context.app_features_context.last_name_lookup
)
def get_dummy_str_relative_document_position(self):
# position within whole document
return '0'
def get_dummy_str_relative_page_position(self):
return '0'
def get_dummy_str_is_bitmap_around(self) -> str:
return '0'
def get_dummy_str_is_vector_around(self) -> str:
return '0'
def get_dummy_callout_type(self) -> str:
return 'UNKNOWN' # one of UNKNOWN, NUMBER, AUTHOR
def get_dummy_str_is_callout_known(self) -> str:
return '0'
class ContextAwareLayoutTokenModelDataGenerator(ModelDataGenerator):
def __init__(
self,
document_features_context: DocumentFeaturesContext
):
self.document_features_context = document_features_context
@abstractmethod
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
pass
def iter_model_data_for_layout_document( # pylint: disable=too-many-locals
self,
layout_document: LayoutDocument
) -> Iterable[LayoutModelData]:
relative_font_size_feature = RelativeFontSizeFeature(
layout_document.iter_all_tokens()
)
line_indentation_status_feature = LineIndentationStatusFeature()
previous_layout_token: Optional[LayoutToken] = None
concatenated_line_tokens_length_by_line_id = {
id(line): sum((len(token.text) for token in line.tokens))
for block in layout_document.iter_all_blocks()
for line in block.lines
}
if not concatenated_line_tokens_length_by_line_id:
LOGGER.debug('empty layout document')
return
max_concatenated_line_tokens_length = max(
concatenated_line_tokens_length_by_line_id.values()
)
document_token_count = sum((
1
for _ in layout_document.iter_all_tokens()
))
document_token_index = 0
for block in layout_document.iter_all_blocks():
block_lines = block.lines
line_count = len(block_lines)
for line_index, line in enumerate(block_lines):
line_indentation_status_feature.on_new_line()
line_tokens = line.tokens
token_count = len(line_tokens)
concatenated_line_tokens_text = ''.join([
token.text for token in line_tokens
])
line_token_position = 0
for token_index, token in enumerate(line_tokens):
yield from self.iter_model_data_for_context_layout_token_features(
ContextAwareLayoutTokenFeatures(
token,
layout_line=line,
previous_layout_token=previous_layout_token,
document_features_context=self.document_features_context,
token_index=token_index,
token_count=token_count,
document_token_index=document_token_index,
document_token_count=document_token_count,
line_index=line_index,
line_count=line_count,
concatenated_line_tokens_text=concatenated_line_tokens_text,
max_concatenated_line_tokens_length=max_concatenated_line_tokens_length,
line_token_position=line_token_position,
relative_font_size_feature=relative_font_size_feature,
line_indentation_status_feature=line_indentation_status_feature
)
)
previous_layout_token = token
line_token_position += len(token.text)
document_token_index += 1
| 0.80837 | 0.138841 |
import logging
from typing import Optional, List, Tuple
import tensorflow as tf
from sciencebeam_trainer_delft.embedding.manager import EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
DEFAULT_EMBEDDINGS_PATH,
Sequence
)
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.models.model_impl import ModelImpl
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
class SeparateSessionSequenceWrapper(Sequence):
def __init__(self, *args, **kwargs):
self._graph = tf.Graph()
self._session = tf.Session(graph=self._graph)
super().__init__(*args, **kwargs)
def load_from(self, *args, **kwargs):
with self._graph.as_default():
with self._session.as_default():
return super().load_from(*args, **kwargs)
def tag(self, *args, **kwargs):
with self._graph.as_default():
with self._session.as_default():
return super().tag(*args, **kwargs)
class DelftModelImpl(ModelImpl):
def __init__(self, model_url: str, app_context: AppContext):
self.model_url = model_url
self.app_context = app_context
self._lazy_model = LazyLoaded[Sequence](self._load_model)
def __repr__(self) -> str:
return '%s(%r, loaded=%r)' % (
type(self).__name__, self.model_url, self._lazy_model.is_loaded
)
def _load_model(self) -> Sequence:
embedding_registry_path = DEFAULT_EMBEDDINGS_PATH
embedding_manager = EmbeddingManager(
path=embedding_registry_path,
download_manager=self.app_context.download_manager
)
model = SeparateSessionSequenceWrapper(
'dummy-model',
embedding_manager=embedding_manager
)
model.load_from(self.model_url)
LOGGER.info('loaded delft model: %r', self.model_url)
return model
@property
def model(self) -> Sequence:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
model = self.model
return model.tag(texts, features=features, output_format=output_format)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/delft_model_impl.py
|
delft_model_impl.py
|
import logging
from typing import Optional, List, Tuple
import tensorflow as tf
from sciencebeam_trainer_delft.embedding.manager import EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
DEFAULT_EMBEDDINGS_PATH,
Sequence
)
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.models.model_impl import ModelImpl
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
class SeparateSessionSequenceWrapper(Sequence):
def __init__(self, *args, **kwargs):
self._graph = tf.Graph()
self._session = tf.Session(graph=self._graph)
super().__init__(*args, **kwargs)
def load_from(self, *args, **kwargs):
with self._graph.as_default():
with self._session.as_default():
return super().load_from(*args, **kwargs)
def tag(self, *args, **kwargs):
with self._graph.as_default():
with self._session.as_default():
return super().tag(*args, **kwargs)
class DelftModelImpl(ModelImpl):
def __init__(self, model_url: str, app_context: AppContext):
self.model_url = model_url
self.app_context = app_context
self._lazy_model = LazyLoaded[Sequence](self._load_model)
def __repr__(self) -> str:
return '%s(%r, loaded=%r)' % (
type(self).__name__, self.model_url, self._lazy_model.is_loaded
)
def _load_model(self) -> Sequence:
embedding_registry_path = DEFAULT_EMBEDDINGS_PATH
embedding_manager = EmbeddingManager(
path=embedding_registry_path,
download_manager=self.app_context.download_manager
)
model = SeparateSessionSequenceWrapper(
'dummy-model',
embedding_manager=embedding_manager
)
model.load_from(self.model_url)
LOGGER.info('loaded delft model: %r', self.model_url)
return model
@property
def model(self) -> Sequence:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
model = self.model
return model.tag(texts, features=features, output_format=output_format)
| 0.870446 | 0.14013 |
from abc import ABC, abstractmethod
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticNote
)
from sciencebeam_parser.document.layout_document import EMPTY_BLOCK, LayoutBlock, LayoutTokensText
LOGGER = logging.getLogger(__name__)
class ModelSemanticExtractor(ABC):
@abstractmethod
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
pass
def get_regex_cleaned_layout_block_with_prefix_suffix(
layout_block: LayoutBlock,
regex_pattern: Optional[str]
) -> Tuple[LayoutBlock, LayoutBlock, LayoutBlock]:
if not layout_block or not layout_block.lines or not regex_pattern:
return EMPTY_BLOCK, layout_block, EMPTY_BLOCK
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(regex_pattern, text, re.IGNORECASE)
if not m:
LOGGER.debug('text does not match regex: %r', text)
return EMPTY_BLOCK, layout_block, EMPTY_BLOCK
start = m.start(1)
end = m.end(1)
LOGGER.debug('start: %d, end: %d, len: %d (text: %r)', start, end, len(text), text)
return (
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(0, start)
)),
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(start, end)
)),
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(end, len(text))
))
)
class SimpleModelSemanticExtractor(ModelSemanticExtractor):
def __init__(
self,
semantic_content_class_by_tag: Optional[
Mapping[str, SemanticContentFactoryProtocol]
] = None
):
super().__init__()
self.semantic_content_class_by_tag = semantic_content_class_by_tag or {}
def get_semantic_content_for_entity_name(
self,
name: str,
layout_block: LayoutBlock
) -> SemanticContentWrapper:
semantic_content_class = self.semantic_content_class_by_tag.get(name)
if semantic_content_class:
return semantic_content_class(layout_block=layout_block)
return SemanticNote(
layout_block=layout_block,
note_type=name
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/extract.py
|
extract.py
|
from abc import ABC, abstractmethod
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticNote
)
from sciencebeam_parser.document.layout_document import EMPTY_BLOCK, LayoutBlock, LayoutTokensText
LOGGER = logging.getLogger(__name__)
class ModelSemanticExtractor(ABC):
@abstractmethod
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
pass
def get_regex_cleaned_layout_block_with_prefix_suffix(
layout_block: LayoutBlock,
regex_pattern: Optional[str]
) -> Tuple[LayoutBlock, LayoutBlock, LayoutBlock]:
if not layout_block or not layout_block.lines or not regex_pattern:
return EMPTY_BLOCK, layout_block, EMPTY_BLOCK
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(regex_pattern, text, re.IGNORECASE)
if not m:
LOGGER.debug('text does not match regex: %r', text)
return EMPTY_BLOCK, layout_block, EMPTY_BLOCK
start = m.start(1)
end = m.end(1)
LOGGER.debug('start: %d, end: %d, len: %d (text: %r)', start, end, len(text), text)
return (
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(0, start)
)),
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(start, end)
)),
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(end, len(text))
))
)
class SimpleModelSemanticExtractor(ModelSemanticExtractor):
def __init__(
self,
semantic_content_class_by_tag: Optional[
Mapping[str, SemanticContentFactoryProtocol]
] = None
):
super().__init__()
self.semantic_content_class_by_tag = semantic_content_class_by_tag or {}
def get_semantic_content_for_entity_name(
self,
name: str,
layout_block: LayoutBlock
) -> SemanticContentWrapper:
semantic_content_class = self.semantic_content_class_by_tag.get(name)
if semantic_content_class:
return semantic_content_class(layout_block=layout_block)
return SemanticNote(
layout_block=layout_block,
note_type=name
)
| 0.817684 | 0.124719 |
from abc import ABC, abstractmethod
from dataclasses import dataclass
import logging
from typing import Iterable, List, Mapping, NamedTuple, Optional, Sequence, Tuple, TypeVar, Union
from lxml import etree
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml_writer import XmlTreeWriter
from sciencebeam_parser.utils.labels import get_split_prefix_label
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.document.tei.common import TEI_E, TEI_NS_PREFIX, tei_xpath
from sciencebeam_parser.document.layout_document import (
LayoutLine,
LayoutLineMeta,
LayoutToken
)
from sciencebeam_parser.models.data import (
NEW_DOCUMENT_MARKER,
LabeledLayoutModelData,
LabeledLayoutToken,
LayoutModelData,
NewDocumentMarker
)
LOGGER = logging.getLogger(__name__)
NO_NS_TEI_E = ElementMaker()
OTHER_LABELS = {'<other>', 'O'}
class ExtractInstruction:
pass
class NewLineExtractInstruction(ExtractInstruction):
pass
@dataclass
class ResetExtractInstruction(ExtractInstruction):
reset_element_path: List[str]
def get_model_data_label(model_data: LayoutModelData) -> Optional[str]:
if isinstance(model_data, LabeledLayoutModelData):
return model_data.label
return None
def is_same_layout_line(
layout_line_1: Optional[LayoutLine],
layout_line_2: Optional[LayoutLine]
) -> bool:
assert layout_line_1 is not None
assert layout_line_2 is not None
return id(layout_line_1) == id(layout_line_2)
def is_same_model_data_layout_line(
model_data_1: LayoutModelData,
model_data_2: LayoutModelData
) -> bool:
return is_same_layout_line(model_data_1.layout_line, model_data_2.layout_line)
def iter_group_model_data_by_line(
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Sequence[LayoutModelData]]:
line_model_data_list: List[LayoutModelData] = []
for model_data in model_data_iterable:
if not line_model_data_list:
line_model_data_list.append(model_data)
continue
previous_model_data = line_model_data_list[-1]
if is_same_model_data_layout_line(
model_data,
previous_model_data
):
LOGGER.debug('same line: %r - %r', model_data, previous_model_data)
line_model_data_list.append(model_data)
continue
yield line_model_data_list
line_model_data_list = [model_data]
if line_model_data_list:
yield line_model_data_list
def iter_model_data_with_new_line_instruction(
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
line_model_data_list: List[LayoutModelData] = []
for model_data in model_data_iterable:
if not line_model_data_list:
line_model_data_list.append(model_data)
continue
previous_model_data = line_model_data_list[-1]
if is_same_model_data_layout_line(
model_data,
previous_model_data
):
LOGGER.debug('same line: %r - %r', model_data, previous_model_data)
line_model_data_list.append(model_data)
continue
yield from line_model_data_list
yield NewLineExtractInstruction()
line_model_data_list = [model_data]
if line_model_data_list:
yield from line_model_data_list
yield NewLineExtractInstruction()
def get_default_note_type_for_label(label: str) -> str:
return label.strip('<>')
def is_parent_path_of(
parent_path: Sequence[str],
child_path: Sequence[str]
) -> bool:
if len(parent_path) >= len(child_path):
return False
return tuple(child_path[:len(parent_path)]) == tuple(parent_path)
def is_same_or_parent_path_of(
parent_path: Sequence[str],
child_path: Sequence[str]
) -> bool:
return (
tuple(parent_path) == tuple(child_path)
or is_parent_path_of(parent_path, child_path)
)
class TeiTrainingDataGenerator(ABC):
@abstractmethod
def get_training_tei_xml_for_multiple_model_data_iterables(
self,
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> etree.ElementBase:
pass
@abstractmethod
def get_training_tei_xml_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> etree.ElementBase:
pass
@abstractmethod
def get_default_tei_filename_suffix(self) -> Optional[str]:
pass
def get_default_data_filename_suffix(self) -> Optional[str]:
return None
def get_default_tei_sub_directory(self) -> Optional[str]:
pass
def get_default_data_sub_directory(self) -> Optional[str]:
pass
class AbstractTeiTrainingDataGenerator(TeiTrainingDataGenerator):
def __init__(
self,
root_training_xml_element_path: Sequence[str],
training_xml_element_path_by_label: Mapping[str, Sequence[str]],
root_tag: str = 'tei',
use_tei_namespace: bool = True,
element_maker: Optional[ElementMaker] = None,
reset_training_xml_element_path_by_label: Optional[Mapping[str, Sequence[str]]] = None,
default_tei_filename_suffix: Optional[str] = None,
default_data_filename_suffix: Optional[str] = None,
default_tei_sub_directory: Optional[str] = None,
default_data_sub_directory: Optional[str] = None
):
self.root_training_xml_element_path = root_training_xml_element_path
self.root_parent_training_xml_element_path = root_training_xml_element_path[:-1]
self.training_xml_element_path_by_label = training_xml_element_path_by_label
self.reset_training_xml_element_path_by_label = (
reset_training_xml_element_path_by_label or {}
)
self._training_xml_element_paths = {
tuple(element_path)
for label, element_path in training_xml_element_path_by_label.items()
if (
label not in OTHER_LABELS
and tuple(element_path) != tuple(root_training_xml_element_path)
)
}
self.other_element_path = training_xml_element_path_by_label.get('<other>')
if element_maker is None:
element_maker = TEI_E if use_tei_namespace else NO_NS_TEI_E
self.element_maker = element_maker
self.root_tag = root_tag
self.default_tei_filename_suffix = default_tei_filename_suffix
self.default_data_filename_suffix = default_data_filename_suffix
self.default_tei_sub_directory = default_tei_sub_directory
self.default_data_sub_directory = default_data_sub_directory
def get_default_tei_filename_suffix(self) -> Optional[str]:
return self.default_tei_filename_suffix
def get_default_data_filename_suffix(self) -> Optional[str]:
return self.default_data_filename_suffix
def get_default_tei_sub_directory(self) -> Optional[str]:
return self.default_tei_sub_directory
def get_default_data_sub_directory(self) -> Optional[str]:
return self.default_data_sub_directory
def get_training_xml_path_for_label(
self,
label: Optional[str],
current_path: Sequence[str]
) -> Sequence[str]:
if not label or label in OTHER_LABELS:
if label and self.other_element_path is not None:
return self.other_element_path
if tuple(current_path) in self._training_xml_element_paths:
LOGGER.debug(
'found current path in element paths, returning parent: %r', current_path
)
return current_path[:-1]
LOGGER.debug(
'not found current path in element paths, returning current: %r', current_path
)
return current_path
training_xml_path = self.training_xml_element_path_by_label.get(label or '')
if not training_xml_path:
note_type = get_default_note_type_for_label(label)
LOGGER.info('label not mapped, creating note: %r', label)
training_xml_path = (
list(self.root_training_xml_element_path) + [f'note[@type="{note_type}"]']
)
return training_xml_path
def get_reset_training_xml_path_for_label(
self,
label: Optional[str],
prefix: Optional[str]
) -> Optional[Sequence[str]]:
if prefix != 'B' or not label:
return None
return self.reset_training_xml_element_path_by_label.get(label)
def write_xml_for_model_data_with_instructions_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_or_instruction_iterable: Iterable[Union[LayoutModelData, ExtractInstruction]]
):
default_path = xml_writer.current_path
LOGGER.debug('default_path: %r', default_path)
pending_whitespace = ''
prev_label: str = ''
pending_reset_path: Optional[List[str]] = None
for model_data_or_instruction in model_data_or_instruction_iterable:
if isinstance(model_data_or_instruction, LayoutModelData):
model_data = model_data_or_instruction
layout_token = model_data.layout_token
assert layout_token is not None
prefixed_label = get_model_data_label(model_data)
prefix, label = get_split_prefix_label(prefixed_label or '')
xml_element_path = self.get_training_xml_path_for_label(
label,
current_path=xml_writer.current_path
)
reset_path = self.get_reset_training_xml_path_for_label(
label=label,
prefix=prefix
)
if pending_reset_path is not None:
reset_path = pending_reset_path
pending_reset_path = None
LOGGER.debug(
'label: %r (%r: %r; reset_path=%r)',
label, prefix, xml_element_path, reset_path
)
if reset_path is not None:
xml_writer.require_path(reset_path)
elif (
prev_label not in OTHER_LABELS
and pending_whitespace
and not is_same_or_parent_path_of(xml_writer.current_path, xml_element_path)
):
LOGGER.debug(
'closing element before adding whitespace, %r -> %r',
xml_writer.current_path, xml_element_path
)
xml_writer.require_path(xml_writer.current_path[:-1])
elif prefix == 'B' and label not in OTHER_LABELS:
xml_writer.require_path(xml_element_path[:-1])
xml_writer.require_path_or_below(xml_element_path)
xml_writer.append_text(pending_whitespace)
pending_whitespace = ''
xml_writer.require_path(xml_element_path)
xml_writer.append_text(layout_token.text)
pending_whitespace = layout_token.whitespace
prev_label = label
elif isinstance(model_data_or_instruction, ResetExtractInstruction):
pending_reset_path = model_data_or_instruction.reset_element_path
elif isinstance(model_data_or_instruction, NewLineExtractInstruction):
xml_writer.append(self.element_maker('lb'))
pending_whitespace = '\n'
xml_writer.require_path(default_path)
xml_writer.append_text(pending_whitespace)
def iter_model_data_or_instruction_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
return iter_model_data_with_new_line_instruction(
model_data_iterable
)
def write_xml_for_model_data_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_iterable: Iterable[LayoutModelData]
):
self.write_xml_for_model_data_with_instructions_iterable(
xml_writer,
self.iter_model_data_or_instruction_for_model_data_iterable(
model_data_iterable
)
)
def _get_xml_writer(self) -> XmlTreeWriter:
return XmlTreeWriter(
self.element_maker(self.root_tag),
element_maker=self.element_maker
)
def get_post_processed_xml_root(self, xml_root: etree.ElementBase):
return xml_root
def get_training_tei_xml_for_multiple_model_data_iterables(
self,
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> etree.ElementBase:
xml_writer = self._get_xml_writer()
xml_writer.require_path(self.root_parent_training_xml_element_path)
for model_data_iterable in model_data_iterables:
xml_writer.require_path(self.root_parent_training_xml_element_path)
xml_writer.require_path(self.root_training_xml_element_path)
self.write_xml_for_model_data_iterable(
xml_writer,
model_data_iterable=model_data_iterable
)
return self.get_post_processed_xml_root(xml_writer.root)
def get_training_tei_xml_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> etree.ElementBase:
return self.get_training_tei_xml_for_multiple_model_data_iterables(
[model_data_iterable]
)
TEI_LB = 'lb'
LINE_BREAK_TAGS = {
TEI_LB,
TEI_NS_PREFIX + TEI_LB
}
def _get_tag_expression_for_element(element: etree.ElementBase) -> str:
if not element.attrib:
return element.tag
if len(element.attrib) > 1:
raise ValueError('only supporting up to one attribute')
key, value = list(element.attrib.items())[0]
return '{tag}[@{key}="{value}"]'.format(tag=element.tag, key=key, value=value)
class TeiTrainingElementPath(NamedTuple):
element_list: Sequence[etree.ElementBase] = tuple([])
def get_path(self) -> Sequence[str]:
return [
_get_tag_expression_for_element(element)
for element in self.element_list
]
def append(self, element: etree.ElementBase) -> 'TeiTrainingElementPath':
return TeiTrainingElementPath(
list(self.element_list) + [element]
)
EMPTY_TEI_TRAINING_ELEMENT_PATH = TeiTrainingElementPath()
class TeiTrainingText(NamedTuple):
text: str
path: TeiTrainingElementPath
is_start: bool
class TeiTrainingLine(NamedTuple):
text_list: Sequence[TeiTrainingText]
def is_line_break_element(element: etree.ElementBase) -> bool:
return element.tag in LINE_BREAK_TAGS
def _iter_flat_tei_training_text_from_element(
parent_element: etree.ElementBase,
current_path: TeiTrainingElementPath = EMPTY_TEI_TRAINING_ELEMENT_PATH
) -> Iterable[Union[TeiTrainingText, ExtractInstruction]]:
LOGGER.debug('current_path: %s', current_path)
is_start = True
if parent_element.text:
yield TeiTrainingText(
text=parent_element.text,
path=current_path,
is_start=is_start
)
is_start = False
for child_element in parent_element:
if is_line_break_element(child_element):
yield NewLineExtractInstruction()
else:
child_path = current_path.append(child_element)
yield from _iter_flat_tei_training_text_from_element(
child_element,
child_path
)
if child_element.tail:
yield TeiTrainingText(
text=child_element.tail,
path=current_path,
is_start=is_start
)
is_start = False
def _iter_tei_training_lines_from_element(
parent_element: etree.ElementBase,
current_path: TeiTrainingElementPath = EMPTY_TEI_TRAINING_ELEMENT_PATH
) -> Iterable[TeiTrainingLine]:
line_text_list = []
for item in _iter_flat_tei_training_text_from_element(
parent_element,
current_path
):
if isinstance(item, TeiTrainingText):
line_text_list.append(item)
elif isinstance(item, NewLineExtractInstruction):
yield TeiTrainingLine(line_text_list)
line_text_list = []
else:
raise RuntimeError('unrecognised item: %r' % item)
if line_text_list:
yield TeiTrainingLine(line_text_list)
T = TypeVar('T')
def iter_group_doc_items_with_new_doc_marker(
flat_item_iterable: Iterable[Union[T, NewDocumentMarker]]
) -> Iterable[List[T]]:
doc_items: List[T] = []
for item in flat_item_iterable:
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
continue
doc_items.append(item)
def iter_tag_result_for_flat_tag_result(
flat_tag_result_iterable: Iterable[Union[Tuple[str, str], NewDocumentMarker]]
) -> Iterable[List[Tuple[str, str]]]:
doc_tag_result: List[Tuple[str, str]] = []
for token_tag_result in flat_tag_result_iterable:
if isinstance(token_tag_result, NewDocumentMarker):
yield doc_tag_result
doc_tag_result = []
continue
doc_tag_result.append(token_tag_result)
def get_tag_result_for_flat_tag_result(
flat_tag_result_iterable: Iterable[Union[Tuple[str, str], NewDocumentMarker]]
) -> List[List[Tuple[str, str]]]:
return list(iter_tag_result_for_flat_tag_result(flat_tag_result_iterable))
class TrainingTeiParser(ABC):
@abstractmethod
def parse_training_tei_to_tag_result(
self,
tei_root: etree.ElementBase
) -> List[List[Tuple[str, str]]]:
pass
@abstractmethod
def parse_training_tei_to_labeled_layout_tokens_list(
self,
tei_root: etree.ElementBase
) -> Sequence[Sequence[LabeledLayoutToken]]:
pass
def get_element_path_with_prefix(
element_path: Sequence[str],
prefix: str
) -> Sequence[str]:
return [
prefix + item
for item in element_path
]
class AbstractTrainingTeiParser(TrainingTeiParser):
def __init__(
self,
root_training_xml_element_path: Sequence[str],
training_xml_element_path_by_label: Mapping[str, Sequence[str]],
use_tei_namespace: bool,
line_as_token: bool = False,
) -> None:
tag_namespace_prefix = TEI_NS_PREFIX if use_tei_namespace else ''
if use_tei_namespace:
root_training_xml_element_path = get_element_path_with_prefix(
root_training_xml_element_path,
'tei:'
)
self.label_by_relative_element_path_map = {
tuple(
get_element_path_with_prefix(
element_path[len(root_training_xml_element_path):],
tag_namespace_prefix
)
): label
for label, element_path in training_xml_element_path_by_label.items()
}
for element_path in list(self.label_by_relative_element_path_map.keys()):
if len(element_path) < 2:
continue
parent_element_path = element_path[:-1]
if parent_element_path not in self.label_by_relative_element_path_map:
self.label_by_relative_element_path_map[parent_element_path] = 'O'
self.root_training_xml_xpath = './' + '/'.join(root_training_xml_element_path)
self.line_as_token = line_as_token
def _get_label_for_element_path(
self,
tei_training_element_path: TeiTrainingElementPath,
text: str
) -> str:
element_path = tei_training_element_path.get_path()
label = self.label_by_relative_element_path_map.get(tuple(element_path))
if not label:
raise RuntimeError(
'label not found for %r (available: %r; for text: %r)' % (
element_path,
self.label_by_relative_element_path_map.keys(),
text
)
)
return label
def iter_parse_training_tei_to_flat_labeled_layout_tokens(
self,
tei_root: etree.ElementBase
) -> Iterable[Union[LabeledLayoutToken, NewDocumentMarker]]:
for text_node in tei_xpath(tei_root, self.root_training_xml_xpath):
tei_training_lines = list(
_iter_tei_training_lines_from_element(
text_node, EMPTY_TEI_TRAINING_ELEMENT_PATH
)
)
LOGGER.debug('tei_training_lines: %r', tei_training_lines)
prefix = ''
prev_label = ''
for line_index, line in enumerate(tei_training_lines):
line_meta = LayoutLineMeta(line_id=1 + line_index)
for text in line.text_list:
if text.text.isspace():
continue
token_count = 0
if text.path.element_list:
label = self._get_label_for_element_path(text.path, text=text.text)
if prev_label != label:
prefix = 'B-' if text.is_start else 'I-'
else:
label = 'O'
prefix = ''
if label in OTHER_LABELS:
prefix = ''
prev_label = label
for token_text in get_tokenized_tokens(text.text):
yield LabeledLayoutToken(
label=prefix + label,
layout_token=LayoutToken(
text=token_text,
line_meta=line_meta
)
)
token_count += 1
if prefix:
prefix = 'I-'
if self.line_as_token:
break
if token_count and self.line_as_token:
# we are only outputting the first token of each line
break
yield NEW_DOCUMENT_MARKER
def iter_parse_training_tei_to_flat_tag_result(
self,
tei_root: etree.ElementBase
) -> Iterable[Union[Tuple[str, str], NewDocumentMarker]]:
for item in self.iter_parse_training_tei_to_flat_labeled_layout_tokens(
tei_root
):
if isinstance(item, NewDocumentMarker):
yield item
continue
assert isinstance(item, LabeledLayoutToken)
yield item.layout_token.text, item.label
def parse_training_tei_to_tag_result(
self,
tei_root: etree.ElementBase
) -> List[List[Tuple[str, str]]]:
return list(iter_group_doc_items_with_new_doc_marker(
self.iter_parse_training_tei_to_flat_tag_result(
tei_root
)
))
def parse_training_tei_to_labeled_layout_tokens_list(
self,
tei_root: etree.ElementBase
) -> Sequence[Sequence[LabeledLayoutToken]]:
return list(iter_group_doc_items_with_new_doc_marker(
self.iter_parse_training_tei_to_flat_labeled_layout_tokens(
tei_root
)
))
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/training_data.py
|
training_data.py
|
from abc import ABC, abstractmethod
from dataclasses import dataclass
import logging
from typing import Iterable, List, Mapping, NamedTuple, Optional, Sequence, Tuple, TypeVar, Union
from lxml import etree
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml_writer import XmlTreeWriter
from sciencebeam_parser.utils.labels import get_split_prefix_label
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.document.tei.common import TEI_E, TEI_NS_PREFIX, tei_xpath
from sciencebeam_parser.document.layout_document import (
LayoutLine,
LayoutLineMeta,
LayoutToken
)
from sciencebeam_parser.models.data import (
NEW_DOCUMENT_MARKER,
LabeledLayoutModelData,
LabeledLayoutToken,
LayoutModelData,
NewDocumentMarker
)
LOGGER = logging.getLogger(__name__)
NO_NS_TEI_E = ElementMaker()
OTHER_LABELS = {'<other>', 'O'}
class ExtractInstruction:
pass
class NewLineExtractInstruction(ExtractInstruction):
pass
@dataclass
class ResetExtractInstruction(ExtractInstruction):
reset_element_path: List[str]
def get_model_data_label(model_data: LayoutModelData) -> Optional[str]:
if isinstance(model_data, LabeledLayoutModelData):
return model_data.label
return None
def is_same_layout_line(
layout_line_1: Optional[LayoutLine],
layout_line_2: Optional[LayoutLine]
) -> bool:
assert layout_line_1 is not None
assert layout_line_2 is not None
return id(layout_line_1) == id(layout_line_2)
def is_same_model_data_layout_line(
model_data_1: LayoutModelData,
model_data_2: LayoutModelData
) -> bool:
return is_same_layout_line(model_data_1.layout_line, model_data_2.layout_line)
def iter_group_model_data_by_line(
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Sequence[LayoutModelData]]:
line_model_data_list: List[LayoutModelData] = []
for model_data in model_data_iterable:
if not line_model_data_list:
line_model_data_list.append(model_data)
continue
previous_model_data = line_model_data_list[-1]
if is_same_model_data_layout_line(
model_data,
previous_model_data
):
LOGGER.debug('same line: %r - %r', model_data, previous_model_data)
line_model_data_list.append(model_data)
continue
yield line_model_data_list
line_model_data_list = [model_data]
if line_model_data_list:
yield line_model_data_list
def iter_model_data_with_new_line_instruction(
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
line_model_data_list: List[LayoutModelData] = []
for model_data in model_data_iterable:
if not line_model_data_list:
line_model_data_list.append(model_data)
continue
previous_model_data = line_model_data_list[-1]
if is_same_model_data_layout_line(
model_data,
previous_model_data
):
LOGGER.debug('same line: %r - %r', model_data, previous_model_data)
line_model_data_list.append(model_data)
continue
yield from line_model_data_list
yield NewLineExtractInstruction()
line_model_data_list = [model_data]
if line_model_data_list:
yield from line_model_data_list
yield NewLineExtractInstruction()
def get_default_note_type_for_label(label: str) -> str:
return label.strip('<>')
def is_parent_path_of(
parent_path: Sequence[str],
child_path: Sequence[str]
) -> bool:
if len(parent_path) >= len(child_path):
return False
return tuple(child_path[:len(parent_path)]) == tuple(parent_path)
def is_same_or_parent_path_of(
parent_path: Sequence[str],
child_path: Sequence[str]
) -> bool:
return (
tuple(parent_path) == tuple(child_path)
or is_parent_path_of(parent_path, child_path)
)
class TeiTrainingDataGenerator(ABC):
@abstractmethod
def get_training_tei_xml_for_multiple_model_data_iterables(
self,
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> etree.ElementBase:
pass
@abstractmethod
def get_training_tei_xml_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> etree.ElementBase:
pass
@abstractmethod
def get_default_tei_filename_suffix(self) -> Optional[str]:
pass
def get_default_data_filename_suffix(self) -> Optional[str]:
return None
def get_default_tei_sub_directory(self) -> Optional[str]:
pass
def get_default_data_sub_directory(self) -> Optional[str]:
pass
class AbstractTeiTrainingDataGenerator(TeiTrainingDataGenerator):
def __init__(
self,
root_training_xml_element_path: Sequence[str],
training_xml_element_path_by_label: Mapping[str, Sequence[str]],
root_tag: str = 'tei',
use_tei_namespace: bool = True,
element_maker: Optional[ElementMaker] = None,
reset_training_xml_element_path_by_label: Optional[Mapping[str, Sequence[str]]] = None,
default_tei_filename_suffix: Optional[str] = None,
default_data_filename_suffix: Optional[str] = None,
default_tei_sub_directory: Optional[str] = None,
default_data_sub_directory: Optional[str] = None
):
self.root_training_xml_element_path = root_training_xml_element_path
self.root_parent_training_xml_element_path = root_training_xml_element_path[:-1]
self.training_xml_element_path_by_label = training_xml_element_path_by_label
self.reset_training_xml_element_path_by_label = (
reset_training_xml_element_path_by_label or {}
)
self._training_xml_element_paths = {
tuple(element_path)
for label, element_path in training_xml_element_path_by_label.items()
if (
label not in OTHER_LABELS
and tuple(element_path) != tuple(root_training_xml_element_path)
)
}
self.other_element_path = training_xml_element_path_by_label.get('<other>')
if element_maker is None:
element_maker = TEI_E if use_tei_namespace else NO_NS_TEI_E
self.element_maker = element_maker
self.root_tag = root_tag
self.default_tei_filename_suffix = default_tei_filename_suffix
self.default_data_filename_suffix = default_data_filename_suffix
self.default_tei_sub_directory = default_tei_sub_directory
self.default_data_sub_directory = default_data_sub_directory
def get_default_tei_filename_suffix(self) -> Optional[str]:
return self.default_tei_filename_suffix
def get_default_data_filename_suffix(self) -> Optional[str]:
return self.default_data_filename_suffix
def get_default_tei_sub_directory(self) -> Optional[str]:
return self.default_tei_sub_directory
def get_default_data_sub_directory(self) -> Optional[str]:
return self.default_data_sub_directory
def get_training_xml_path_for_label(
self,
label: Optional[str],
current_path: Sequence[str]
) -> Sequence[str]:
if not label or label in OTHER_LABELS:
if label and self.other_element_path is not None:
return self.other_element_path
if tuple(current_path) in self._training_xml_element_paths:
LOGGER.debug(
'found current path in element paths, returning parent: %r', current_path
)
return current_path[:-1]
LOGGER.debug(
'not found current path in element paths, returning current: %r', current_path
)
return current_path
training_xml_path = self.training_xml_element_path_by_label.get(label or '')
if not training_xml_path:
note_type = get_default_note_type_for_label(label)
LOGGER.info('label not mapped, creating note: %r', label)
training_xml_path = (
list(self.root_training_xml_element_path) + [f'note[@type="{note_type}"]']
)
return training_xml_path
def get_reset_training_xml_path_for_label(
self,
label: Optional[str],
prefix: Optional[str]
) -> Optional[Sequence[str]]:
if prefix != 'B' or not label:
return None
return self.reset_training_xml_element_path_by_label.get(label)
def write_xml_for_model_data_with_instructions_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_or_instruction_iterable: Iterable[Union[LayoutModelData, ExtractInstruction]]
):
default_path = xml_writer.current_path
LOGGER.debug('default_path: %r', default_path)
pending_whitespace = ''
prev_label: str = ''
pending_reset_path: Optional[List[str]] = None
for model_data_or_instruction in model_data_or_instruction_iterable:
if isinstance(model_data_or_instruction, LayoutModelData):
model_data = model_data_or_instruction
layout_token = model_data.layout_token
assert layout_token is not None
prefixed_label = get_model_data_label(model_data)
prefix, label = get_split_prefix_label(prefixed_label or '')
xml_element_path = self.get_training_xml_path_for_label(
label,
current_path=xml_writer.current_path
)
reset_path = self.get_reset_training_xml_path_for_label(
label=label,
prefix=prefix
)
if pending_reset_path is not None:
reset_path = pending_reset_path
pending_reset_path = None
LOGGER.debug(
'label: %r (%r: %r; reset_path=%r)',
label, prefix, xml_element_path, reset_path
)
if reset_path is not None:
xml_writer.require_path(reset_path)
elif (
prev_label not in OTHER_LABELS
and pending_whitespace
and not is_same_or_parent_path_of(xml_writer.current_path, xml_element_path)
):
LOGGER.debug(
'closing element before adding whitespace, %r -> %r',
xml_writer.current_path, xml_element_path
)
xml_writer.require_path(xml_writer.current_path[:-1])
elif prefix == 'B' and label not in OTHER_LABELS:
xml_writer.require_path(xml_element_path[:-1])
xml_writer.require_path_or_below(xml_element_path)
xml_writer.append_text(pending_whitespace)
pending_whitespace = ''
xml_writer.require_path(xml_element_path)
xml_writer.append_text(layout_token.text)
pending_whitespace = layout_token.whitespace
prev_label = label
elif isinstance(model_data_or_instruction, ResetExtractInstruction):
pending_reset_path = model_data_or_instruction.reset_element_path
elif isinstance(model_data_or_instruction, NewLineExtractInstruction):
xml_writer.append(self.element_maker('lb'))
pending_whitespace = '\n'
xml_writer.require_path(default_path)
xml_writer.append_text(pending_whitespace)
def iter_model_data_or_instruction_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
return iter_model_data_with_new_line_instruction(
model_data_iterable
)
def write_xml_for_model_data_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_iterable: Iterable[LayoutModelData]
):
self.write_xml_for_model_data_with_instructions_iterable(
xml_writer,
self.iter_model_data_or_instruction_for_model_data_iterable(
model_data_iterable
)
)
def _get_xml_writer(self) -> XmlTreeWriter:
return XmlTreeWriter(
self.element_maker(self.root_tag),
element_maker=self.element_maker
)
def get_post_processed_xml_root(self, xml_root: etree.ElementBase):
return xml_root
def get_training_tei_xml_for_multiple_model_data_iterables(
self,
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> etree.ElementBase:
xml_writer = self._get_xml_writer()
xml_writer.require_path(self.root_parent_training_xml_element_path)
for model_data_iterable in model_data_iterables:
xml_writer.require_path(self.root_parent_training_xml_element_path)
xml_writer.require_path(self.root_training_xml_element_path)
self.write_xml_for_model_data_iterable(
xml_writer,
model_data_iterable=model_data_iterable
)
return self.get_post_processed_xml_root(xml_writer.root)
def get_training_tei_xml_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> etree.ElementBase:
return self.get_training_tei_xml_for_multiple_model_data_iterables(
[model_data_iterable]
)
TEI_LB = 'lb'
LINE_BREAK_TAGS = {
TEI_LB,
TEI_NS_PREFIX + TEI_LB
}
def _get_tag_expression_for_element(element: etree.ElementBase) -> str:
if not element.attrib:
return element.tag
if len(element.attrib) > 1:
raise ValueError('only supporting up to one attribute')
key, value = list(element.attrib.items())[0]
return '{tag}[@{key}="{value}"]'.format(tag=element.tag, key=key, value=value)
class TeiTrainingElementPath(NamedTuple):
element_list: Sequence[etree.ElementBase] = tuple([])
def get_path(self) -> Sequence[str]:
return [
_get_tag_expression_for_element(element)
for element in self.element_list
]
def append(self, element: etree.ElementBase) -> 'TeiTrainingElementPath':
return TeiTrainingElementPath(
list(self.element_list) + [element]
)
EMPTY_TEI_TRAINING_ELEMENT_PATH = TeiTrainingElementPath()
class TeiTrainingText(NamedTuple):
text: str
path: TeiTrainingElementPath
is_start: bool
class TeiTrainingLine(NamedTuple):
text_list: Sequence[TeiTrainingText]
def is_line_break_element(element: etree.ElementBase) -> bool:
return element.tag in LINE_BREAK_TAGS
def _iter_flat_tei_training_text_from_element(
parent_element: etree.ElementBase,
current_path: TeiTrainingElementPath = EMPTY_TEI_TRAINING_ELEMENT_PATH
) -> Iterable[Union[TeiTrainingText, ExtractInstruction]]:
LOGGER.debug('current_path: %s', current_path)
is_start = True
if parent_element.text:
yield TeiTrainingText(
text=parent_element.text,
path=current_path,
is_start=is_start
)
is_start = False
for child_element in parent_element:
if is_line_break_element(child_element):
yield NewLineExtractInstruction()
else:
child_path = current_path.append(child_element)
yield from _iter_flat_tei_training_text_from_element(
child_element,
child_path
)
if child_element.tail:
yield TeiTrainingText(
text=child_element.tail,
path=current_path,
is_start=is_start
)
is_start = False
def _iter_tei_training_lines_from_element(
parent_element: etree.ElementBase,
current_path: TeiTrainingElementPath = EMPTY_TEI_TRAINING_ELEMENT_PATH
) -> Iterable[TeiTrainingLine]:
line_text_list = []
for item in _iter_flat_tei_training_text_from_element(
parent_element,
current_path
):
if isinstance(item, TeiTrainingText):
line_text_list.append(item)
elif isinstance(item, NewLineExtractInstruction):
yield TeiTrainingLine(line_text_list)
line_text_list = []
else:
raise RuntimeError('unrecognised item: %r' % item)
if line_text_list:
yield TeiTrainingLine(line_text_list)
T = TypeVar('T')
def iter_group_doc_items_with_new_doc_marker(
flat_item_iterable: Iterable[Union[T, NewDocumentMarker]]
) -> Iterable[List[T]]:
doc_items: List[T] = []
for item in flat_item_iterable:
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
continue
doc_items.append(item)
def iter_tag_result_for_flat_tag_result(
flat_tag_result_iterable: Iterable[Union[Tuple[str, str], NewDocumentMarker]]
) -> Iterable[List[Tuple[str, str]]]:
doc_tag_result: List[Tuple[str, str]] = []
for token_tag_result in flat_tag_result_iterable:
if isinstance(token_tag_result, NewDocumentMarker):
yield doc_tag_result
doc_tag_result = []
continue
doc_tag_result.append(token_tag_result)
def get_tag_result_for_flat_tag_result(
flat_tag_result_iterable: Iterable[Union[Tuple[str, str], NewDocumentMarker]]
) -> List[List[Tuple[str, str]]]:
return list(iter_tag_result_for_flat_tag_result(flat_tag_result_iterable))
class TrainingTeiParser(ABC):
@abstractmethod
def parse_training_tei_to_tag_result(
self,
tei_root: etree.ElementBase
) -> List[List[Tuple[str, str]]]:
pass
@abstractmethod
def parse_training_tei_to_labeled_layout_tokens_list(
self,
tei_root: etree.ElementBase
) -> Sequence[Sequence[LabeledLayoutToken]]:
pass
def get_element_path_with_prefix(
element_path: Sequence[str],
prefix: str
) -> Sequence[str]:
return [
prefix + item
for item in element_path
]
class AbstractTrainingTeiParser(TrainingTeiParser):
def __init__(
self,
root_training_xml_element_path: Sequence[str],
training_xml_element_path_by_label: Mapping[str, Sequence[str]],
use_tei_namespace: bool,
line_as_token: bool = False,
) -> None:
tag_namespace_prefix = TEI_NS_PREFIX if use_tei_namespace else ''
if use_tei_namespace:
root_training_xml_element_path = get_element_path_with_prefix(
root_training_xml_element_path,
'tei:'
)
self.label_by_relative_element_path_map = {
tuple(
get_element_path_with_prefix(
element_path[len(root_training_xml_element_path):],
tag_namespace_prefix
)
): label
for label, element_path in training_xml_element_path_by_label.items()
}
for element_path in list(self.label_by_relative_element_path_map.keys()):
if len(element_path) < 2:
continue
parent_element_path = element_path[:-1]
if parent_element_path not in self.label_by_relative_element_path_map:
self.label_by_relative_element_path_map[parent_element_path] = 'O'
self.root_training_xml_xpath = './' + '/'.join(root_training_xml_element_path)
self.line_as_token = line_as_token
def _get_label_for_element_path(
self,
tei_training_element_path: TeiTrainingElementPath,
text: str
) -> str:
element_path = tei_training_element_path.get_path()
label = self.label_by_relative_element_path_map.get(tuple(element_path))
if not label:
raise RuntimeError(
'label not found for %r (available: %r; for text: %r)' % (
element_path,
self.label_by_relative_element_path_map.keys(),
text
)
)
return label
def iter_parse_training_tei_to_flat_labeled_layout_tokens(
self,
tei_root: etree.ElementBase
) -> Iterable[Union[LabeledLayoutToken, NewDocumentMarker]]:
for text_node in tei_xpath(tei_root, self.root_training_xml_xpath):
tei_training_lines = list(
_iter_tei_training_lines_from_element(
text_node, EMPTY_TEI_TRAINING_ELEMENT_PATH
)
)
LOGGER.debug('tei_training_lines: %r', tei_training_lines)
prefix = ''
prev_label = ''
for line_index, line in enumerate(tei_training_lines):
line_meta = LayoutLineMeta(line_id=1 + line_index)
for text in line.text_list:
if text.text.isspace():
continue
token_count = 0
if text.path.element_list:
label = self._get_label_for_element_path(text.path, text=text.text)
if prev_label != label:
prefix = 'B-' if text.is_start else 'I-'
else:
label = 'O'
prefix = ''
if label in OTHER_LABELS:
prefix = ''
prev_label = label
for token_text in get_tokenized_tokens(text.text):
yield LabeledLayoutToken(
label=prefix + label,
layout_token=LayoutToken(
text=token_text,
line_meta=line_meta
)
)
token_count += 1
if prefix:
prefix = 'I-'
if self.line_as_token:
break
if token_count and self.line_as_token:
# we are only outputting the first token of each line
break
yield NEW_DOCUMENT_MARKER
def iter_parse_training_tei_to_flat_tag_result(
self,
tei_root: etree.ElementBase
) -> Iterable[Union[Tuple[str, str], NewDocumentMarker]]:
for item in self.iter_parse_training_tei_to_flat_labeled_layout_tokens(
tei_root
):
if isinstance(item, NewDocumentMarker):
yield item
continue
assert isinstance(item, LabeledLayoutToken)
yield item.layout_token.text, item.label
def parse_training_tei_to_tag_result(
self,
tei_root: etree.ElementBase
) -> List[List[Tuple[str, str]]]:
return list(iter_group_doc_items_with_new_doc_marker(
self.iter_parse_training_tei_to_flat_tag_result(
tei_root
)
))
def parse_training_tei_to_labeled_layout_tokens_list(
self,
tei_root: etree.ElementBase
) -> Sequence[Sequence[LabeledLayoutToken]]:
return list(iter_group_doc_items_with_new_doc_marker(
self.iter_parse_training_tei_to_flat_labeled_layout_tokens(
tei_root
)
))
| 0.843444 | 0.222605 |
import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from typing import (
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_parser.utils.labels import get_split_prefix_label, strip_tag_prefix
from sciencebeam_parser.document.layout_document import (
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument
)
from sciencebeam_parser.models.data import (
NEW_DOCUMENT_MARKER,
AppFeaturesContext,
DocumentFeaturesContext,
LabeledLayoutModelData,
LabeledLayoutToken,
LayoutModelData,
ModelDataGenerator,
NewDocumentMarker
)
from sciencebeam_parser.models.extract import ModelSemanticExtractor
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator, TrainingTeiParser
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper
from sciencebeam_parser.models.model_impl import ModelImpl, T_ModelImplFactory
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
U = TypeVar('U')
@dataclass
class LayoutModelLabel:
label: str
label_token_text: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
def iter_entities_including_other(seq: List[str]) -> Iterable[Tuple[str, int, int]]:
"""
Similar to get_entities, but also other (`O`) tag
"""
prev_tag = 'O'
prev_start = 0
for index, prefixed_tag in enumerate(seq):
prefix, tag = get_split_prefix_label(prefixed_tag)
if prefix == 'B' or tag != prev_tag:
if prev_start < index:
yield prev_tag, prev_start, index - 1
prev_tag = tag
prev_start = index
if prev_start < len(seq):
yield prev_tag, prev_start, len(seq) - 1
def get_entities_including_other(seq: List[str]) -> List[Tuple[str, int, int]]:
return list(iter_entities_including_other(seq))
class LayoutDocumentLabelResult:
def __init__(
self,
layout_document: LayoutDocument,
layout_model_label_iterable: Iterable[LayoutModelLabel]
):
self.layout_document = layout_document
self.layout_model_label_list = list(layout_model_label_iterable)
self.layout_document_labels_by_label: Dict[str, List[LayoutModelLabel]] = (
defaultdict(list)
)
for layout_model_label in self.layout_model_label_list:
tag_without_prefix = strip_tag_prefix(layout_model_label.label)
self.layout_document_labels_by_label[tag_without_prefix].append(
layout_model_label
)
def get_available_labels(self) -> Set[str]:
return set(self.layout_document_labels_by_label.keys())
def get_layout_document_labels_by_labels(self, labels: List[str]) -> List[LayoutModelLabel]:
if not labels:
return []
if len(labels) == 1:
return self.layout_document_labels_by_label.get(labels[0], [])
result: List[LayoutModelLabel] = []
for label in labels:
result.extend(self.layout_document_labels_by_label.get(label, []))
return result
def get_filtered_document_by_label(self, label: str) -> LayoutDocument:
return self.get_filtered_document_by_labels([label])
def get_filtered_document_by_labels(
self,
labels: List[str]
): # pylint: disable=too-many-branches
layout_document = LayoutDocument(pages=[])
layout_document_labels = self.get_layout_document_labels_by_labels(labels)
if not layout_document_labels:
LOGGER.warning(
'no layout_lines_to_include found for: %r, available keys=%r',
labels, self.layout_document_labels_by_label.keys()
)
return layout_document
layout_token_ids_to_include = {
id(layout_document_label.layout_token)
for layout_document_label in layout_document_labels
if layout_document_label.layout_token
}
LOGGER.debug('layout_tokens_to_include: %s', layout_token_ids_to_include)
layout_line_ids_to_include: Set[int] = set()
if not layout_token_ids_to_include:
layout_line_ids_to_include = {
id(layout_document_label.layout_line)
for layout_document_label in layout_document_labels
if layout_document_label.layout_line
}
LOGGER.debug('layout_line_ids_to_include: %s', layout_line_ids_to_include)
result_page: Optional[LayoutPage] = None
for page in self.layout_document.pages: # pylint: disable=too-many-nested-blocks
result_page = None
result_block: Optional[LayoutBlock] = None
for block in page.blocks:
result_block = None
for line in block.lines:
accepted_line: Optional[LayoutLine] = None
if layout_token_ids_to_include:
accepted_tokens: List[LayoutToken] = []
for token in line.tokens:
if id(token) in layout_token_ids_to_include:
accepted_tokens.append(token)
if not accepted_tokens:
continue
if len(line.tokens) == accepted_tokens:
accepted_line = line
else:
accepted_line = LayoutLine(tokens=accepted_tokens)
else:
if id(line) not in layout_line_ids_to_include:
continue
accepted_line = line
if result_page is None:
result_page = LayoutPage(blocks=[])
layout_document.pages.append(result_page)
if result_block is None:
result_block = LayoutBlock(lines=[])
result_page.blocks.append(result_block)
result_block.lines.append(accepted_line)
return layout_document
def iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
layout_tokens = [result.layout_token for result in labeled_layout_tokens]
labels = [result.label for result in labeled_layout_tokens]
LOGGER.debug('layout_tokens: %s', layout_tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, LayoutBlock.for_tokens(layout_tokens[start:end + 1])
def iter_entity_values_predicted_labels(
tag_result: List[Tuple[str, str]]
) -> Iterable[Tuple[str, str]]:
tokens, labels = zip(*tag_result)
LOGGER.debug('tokens: %s', tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, ' '.join(tokens[start:end + 1])
def iter_labeled_layout_token_for_layout_model_label(
layout_model_label_iterable: Iterable[LayoutModelLabel]
) -> Iterable[LabeledLayoutToken]:
for layout_model_label in layout_model_label_iterable:
layout_token = layout_model_label.layout_token
assert layout_token is not None
yield LabeledLayoutToken(
layout_model_label.label,
layout_token
)
def iter_data_lines_for_model_data_iterables(
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> Iterable[str]:
for index, model_data_list in enumerate(model_data_iterables):
if index > 0:
yield ''
for model_data in model_data_list:
yield model_data.data_line
class Model(ABC, Preloadable):
def __init__(
self,
model_impl_factory: Optional[T_ModelImplFactory],
model_config: Optional[dict] = None
) -> None:
self._model_impl_factory = model_impl_factory
self._lazy_model_impl = LazyLoaded[ModelImpl](self._load_model_impl)
self.model_config = model_config or {}
def __repr__(self) -> str:
return '%s(model_config=%r, loaded=%r)' % (
type(self).__name__, self.model_config, self._lazy_model_impl.is_loaded
)
@abstractmethod
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> ModelDataGenerator:
pass
# @abstractmethod
def get_semantic_extractor(self) -> ModelSemanticExtractor:
raise NotImplementedError()
# @abstractmethod
def get_tei_training_data_generator(self) -> TeiTrainingDataGenerator:
raise NotImplementedError()
# @abstractmethod
def get_training_tei_parser(self) -> TrainingTeiParser:
raise NotImplementedError()
def _load_model_impl(self) -> ModelImpl:
assert self._model_impl_factory, 'model impl factory required'
LOGGER.info('creating model impl: %r', self._model_impl_factory)
model_impl = self._model_impl_factory()
if not isinstance(model_impl, ModelImpl):
raise TypeError('invalid model impl type: %r' % model_impl)
return model_impl
@property
def model_impl(self) -> ModelImpl:
was_loaded = self._lazy_model_impl.is_loaded
model_impl = self._lazy_model_impl.get()
if was_loaded:
LOGGER.info('model impl already loaded: %r', model_impl)
return model_impl
def preload(self):
model_impl = self.model_impl
model_impl.preload()
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.get_semantic_extractor().iter_semantic_content_for_entity_blocks(
entity_tokens,
**kwargs
)
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
return self.model_impl.predict_labels(texts, features, output_format)
def _iter_flat_label_model_data_lists_to( # pylint: disable=too-many-locals
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Union[T, NewDocumentMarker]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
data_lines = list(iter_data_lines_for_model_data_iterables(
model_data_lists
))
texts, features = load_data_crf_lines(data_lines)
texts = texts.tolist()
tag_result = self.predict_labels(
texts=texts, features=features, output_format=None
)
if not tag_result:
return
if len(tag_result) != len(model_data_lists):
raise AssertionError('tag result does not match number of docs: %d != %d' % (
len(tag_result), len(model_data_lists)
))
for index, (doc_tag_result, model_data_list) in enumerate(
zip(tag_result, model_data_lists)
):
if index > 0:
yield NEW_DOCUMENT_MARKER
if len(doc_tag_result) != len(model_data_list):
raise AssertionError('doc tag result does not match data: %d != %d' % (
len(doc_tag_result), len(model_data_list)
))
for token_tag_result, token_model_data in zip(doc_tag_result, model_data_list):
label_token_text, token_label = token_tag_result
if label_token_text != token_model_data.label_token_text:
raise AssertionError(
f'actual: {repr(label_token_text)}'
f', expected: {repr(token_model_data.label_token_text)}'
)
yield item_factory(
token_label,
token_model_data
)
def _iter_stacked_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Sequence[T]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
doc_items: List[T] = []
result_doc_count = 0
for item in self._iter_flat_label_model_data_lists_to(
model_data_lists,
item_factory=item_factory
):
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
result_doc_count += 1
continue
doc_items.append(item)
if result_doc_count < len(model_data_lists):
yield doc_items
def iter_label_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[List[LayoutModelLabel]]:
doc_layout_model_labels: List[LayoutModelLabel] = []
result_doc_count = 0
for layout_model_label in self._iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
):
if isinstance(layout_model_label, NewDocumentMarker):
yield doc_layout_model_labels
doc_layout_model_labels = []
result_doc_count += 1
continue
doc_layout_model_labels.append(layout_model_label)
if result_doc_count < len(layout_documents):
yield doc_layout_model_labels
def iter_label_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LayoutModelLabel]:
for layout_model_label in self._iter_label_layout_documents(
[layout_document],
app_features_context=app_features_context
):
assert isinstance(layout_model_label, LayoutModelLabel)
yield layout_model_label
def _iter_label_layout_documents( # pylint: disable=too-many-locals
self,
layout_documents: Iterable[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[Union[LayoutModelLabel, NewDocumentMarker]]:
data_generator = self.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=app_features_context
)
)
model_data_lists = [
list(data_generator.iter_model_data_for_layout_document(
layout_document
))
for layout_document in layout_documents
]
return self._iter_flat_label_model_data_lists_to(
model_data_lists,
lambda label, model_data: LayoutModelLabel(
label=label,
label_token_text=model_data.label_token_text,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token
)
)
def iter_labeled_model_data_list_for_model_data_list_iterable(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]]
) -> Iterable[Sequence[LabeledLayoutModelData]]:
return self._iter_stacked_label_model_data_lists_to(
model_data_list_iterable,
lambda label, model_data: LabeledLayoutModelData.from_model_data(
model_data,
label=label
)
)
def get_label_layout_document_result(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def iter_predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LabeledLayoutToken]:
# Note: this should get merged with Model.iter_label_layout_document
yield from iter_labeled_layout_token_for_layout_model_label(
self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> List[LabeledLayoutToken]:
return list(self.iter_predict_labels_for_layout_document(
layout_document,
app_features_context=app_features_context
))
def predict_labels_for_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> List[List[LabeledLayoutToken]]:
return [
list(iter_labeled_layout_token_for_layout_model_label(
layout_model_labels
))
for layout_model_labels in self.iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
)
]
def iter_entity_layout_blocks_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
return iter_entity_layout_blocks_for_labeled_layout_tokens(labeled_layout_tokens)
def iter_semantic_content_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.iter_semantic_content_for_entity_blocks(
self.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
),
**kwargs
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/model.py
|
model.py
|
import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from typing import (
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_parser.utils.labels import get_split_prefix_label, strip_tag_prefix
from sciencebeam_parser.document.layout_document import (
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument
)
from sciencebeam_parser.models.data import (
NEW_DOCUMENT_MARKER,
AppFeaturesContext,
DocumentFeaturesContext,
LabeledLayoutModelData,
LabeledLayoutToken,
LayoutModelData,
ModelDataGenerator,
NewDocumentMarker
)
from sciencebeam_parser.models.extract import ModelSemanticExtractor
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator, TrainingTeiParser
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper
from sciencebeam_parser.models.model_impl import ModelImpl, T_ModelImplFactory
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
U = TypeVar('U')
@dataclass
class LayoutModelLabel:
label: str
label_token_text: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
def iter_entities_including_other(seq: List[str]) -> Iterable[Tuple[str, int, int]]:
"""
Similar to get_entities, but also other (`O`) tag
"""
prev_tag = 'O'
prev_start = 0
for index, prefixed_tag in enumerate(seq):
prefix, tag = get_split_prefix_label(prefixed_tag)
if prefix == 'B' or tag != prev_tag:
if prev_start < index:
yield prev_tag, prev_start, index - 1
prev_tag = tag
prev_start = index
if prev_start < len(seq):
yield prev_tag, prev_start, len(seq) - 1
def get_entities_including_other(seq: List[str]) -> List[Tuple[str, int, int]]:
return list(iter_entities_including_other(seq))
class LayoutDocumentLabelResult:
def __init__(
self,
layout_document: LayoutDocument,
layout_model_label_iterable: Iterable[LayoutModelLabel]
):
self.layout_document = layout_document
self.layout_model_label_list = list(layout_model_label_iterable)
self.layout_document_labels_by_label: Dict[str, List[LayoutModelLabel]] = (
defaultdict(list)
)
for layout_model_label in self.layout_model_label_list:
tag_without_prefix = strip_tag_prefix(layout_model_label.label)
self.layout_document_labels_by_label[tag_without_prefix].append(
layout_model_label
)
def get_available_labels(self) -> Set[str]:
return set(self.layout_document_labels_by_label.keys())
def get_layout_document_labels_by_labels(self, labels: List[str]) -> List[LayoutModelLabel]:
if not labels:
return []
if len(labels) == 1:
return self.layout_document_labels_by_label.get(labels[0], [])
result: List[LayoutModelLabel] = []
for label in labels:
result.extend(self.layout_document_labels_by_label.get(label, []))
return result
def get_filtered_document_by_label(self, label: str) -> LayoutDocument:
return self.get_filtered_document_by_labels([label])
def get_filtered_document_by_labels(
self,
labels: List[str]
): # pylint: disable=too-many-branches
layout_document = LayoutDocument(pages=[])
layout_document_labels = self.get_layout_document_labels_by_labels(labels)
if not layout_document_labels:
LOGGER.warning(
'no layout_lines_to_include found for: %r, available keys=%r',
labels, self.layout_document_labels_by_label.keys()
)
return layout_document
layout_token_ids_to_include = {
id(layout_document_label.layout_token)
for layout_document_label in layout_document_labels
if layout_document_label.layout_token
}
LOGGER.debug('layout_tokens_to_include: %s', layout_token_ids_to_include)
layout_line_ids_to_include: Set[int] = set()
if not layout_token_ids_to_include:
layout_line_ids_to_include = {
id(layout_document_label.layout_line)
for layout_document_label in layout_document_labels
if layout_document_label.layout_line
}
LOGGER.debug('layout_line_ids_to_include: %s', layout_line_ids_to_include)
result_page: Optional[LayoutPage] = None
for page in self.layout_document.pages: # pylint: disable=too-many-nested-blocks
result_page = None
result_block: Optional[LayoutBlock] = None
for block in page.blocks:
result_block = None
for line in block.lines:
accepted_line: Optional[LayoutLine] = None
if layout_token_ids_to_include:
accepted_tokens: List[LayoutToken] = []
for token in line.tokens:
if id(token) in layout_token_ids_to_include:
accepted_tokens.append(token)
if not accepted_tokens:
continue
if len(line.tokens) == accepted_tokens:
accepted_line = line
else:
accepted_line = LayoutLine(tokens=accepted_tokens)
else:
if id(line) not in layout_line_ids_to_include:
continue
accepted_line = line
if result_page is None:
result_page = LayoutPage(blocks=[])
layout_document.pages.append(result_page)
if result_block is None:
result_block = LayoutBlock(lines=[])
result_page.blocks.append(result_block)
result_block.lines.append(accepted_line)
return layout_document
def iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
layout_tokens = [result.layout_token for result in labeled_layout_tokens]
labels = [result.label for result in labeled_layout_tokens]
LOGGER.debug('layout_tokens: %s', layout_tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, LayoutBlock.for_tokens(layout_tokens[start:end + 1])
def iter_entity_values_predicted_labels(
tag_result: List[Tuple[str, str]]
) -> Iterable[Tuple[str, str]]:
tokens, labels = zip(*tag_result)
LOGGER.debug('tokens: %s', tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, ' '.join(tokens[start:end + 1])
def iter_labeled_layout_token_for_layout_model_label(
layout_model_label_iterable: Iterable[LayoutModelLabel]
) -> Iterable[LabeledLayoutToken]:
for layout_model_label in layout_model_label_iterable:
layout_token = layout_model_label.layout_token
assert layout_token is not None
yield LabeledLayoutToken(
layout_model_label.label,
layout_token
)
def iter_data_lines_for_model_data_iterables(
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> Iterable[str]:
for index, model_data_list in enumerate(model_data_iterables):
if index > 0:
yield ''
for model_data in model_data_list:
yield model_data.data_line
class Model(ABC, Preloadable):
def __init__(
self,
model_impl_factory: Optional[T_ModelImplFactory],
model_config: Optional[dict] = None
) -> None:
self._model_impl_factory = model_impl_factory
self._lazy_model_impl = LazyLoaded[ModelImpl](self._load_model_impl)
self.model_config = model_config or {}
def __repr__(self) -> str:
return '%s(model_config=%r, loaded=%r)' % (
type(self).__name__, self.model_config, self._lazy_model_impl.is_loaded
)
@abstractmethod
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> ModelDataGenerator:
pass
# @abstractmethod
def get_semantic_extractor(self) -> ModelSemanticExtractor:
raise NotImplementedError()
# @abstractmethod
def get_tei_training_data_generator(self) -> TeiTrainingDataGenerator:
raise NotImplementedError()
# @abstractmethod
def get_training_tei_parser(self) -> TrainingTeiParser:
raise NotImplementedError()
def _load_model_impl(self) -> ModelImpl:
assert self._model_impl_factory, 'model impl factory required'
LOGGER.info('creating model impl: %r', self._model_impl_factory)
model_impl = self._model_impl_factory()
if not isinstance(model_impl, ModelImpl):
raise TypeError('invalid model impl type: %r' % model_impl)
return model_impl
@property
def model_impl(self) -> ModelImpl:
was_loaded = self._lazy_model_impl.is_loaded
model_impl = self._lazy_model_impl.get()
if was_loaded:
LOGGER.info('model impl already loaded: %r', model_impl)
return model_impl
def preload(self):
model_impl = self.model_impl
model_impl.preload()
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.get_semantic_extractor().iter_semantic_content_for_entity_blocks(
entity_tokens,
**kwargs
)
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
return self.model_impl.predict_labels(texts, features, output_format)
def _iter_flat_label_model_data_lists_to( # pylint: disable=too-many-locals
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Union[T, NewDocumentMarker]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
data_lines = list(iter_data_lines_for_model_data_iterables(
model_data_lists
))
texts, features = load_data_crf_lines(data_lines)
texts = texts.tolist()
tag_result = self.predict_labels(
texts=texts, features=features, output_format=None
)
if not tag_result:
return
if len(tag_result) != len(model_data_lists):
raise AssertionError('tag result does not match number of docs: %d != %d' % (
len(tag_result), len(model_data_lists)
))
for index, (doc_tag_result, model_data_list) in enumerate(
zip(tag_result, model_data_lists)
):
if index > 0:
yield NEW_DOCUMENT_MARKER
if len(doc_tag_result) != len(model_data_list):
raise AssertionError('doc tag result does not match data: %d != %d' % (
len(doc_tag_result), len(model_data_list)
))
for token_tag_result, token_model_data in zip(doc_tag_result, model_data_list):
label_token_text, token_label = token_tag_result
if label_token_text != token_model_data.label_token_text:
raise AssertionError(
f'actual: {repr(label_token_text)}'
f', expected: {repr(token_model_data.label_token_text)}'
)
yield item_factory(
token_label,
token_model_data
)
def _iter_stacked_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Sequence[T]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
doc_items: List[T] = []
result_doc_count = 0
for item in self._iter_flat_label_model_data_lists_to(
model_data_lists,
item_factory=item_factory
):
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
result_doc_count += 1
continue
doc_items.append(item)
if result_doc_count < len(model_data_lists):
yield doc_items
def iter_label_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[List[LayoutModelLabel]]:
doc_layout_model_labels: List[LayoutModelLabel] = []
result_doc_count = 0
for layout_model_label in self._iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
):
if isinstance(layout_model_label, NewDocumentMarker):
yield doc_layout_model_labels
doc_layout_model_labels = []
result_doc_count += 1
continue
doc_layout_model_labels.append(layout_model_label)
if result_doc_count < len(layout_documents):
yield doc_layout_model_labels
def iter_label_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LayoutModelLabel]:
for layout_model_label in self._iter_label_layout_documents(
[layout_document],
app_features_context=app_features_context
):
assert isinstance(layout_model_label, LayoutModelLabel)
yield layout_model_label
def _iter_label_layout_documents( # pylint: disable=too-many-locals
self,
layout_documents: Iterable[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[Union[LayoutModelLabel, NewDocumentMarker]]:
data_generator = self.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=app_features_context
)
)
model_data_lists = [
list(data_generator.iter_model_data_for_layout_document(
layout_document
))
for layout_document in layout_documents
]
return self._iter_flat_label_model_data_lists_to(
model_data_lists,
lambda label, model_data: LayoutModelLabel(
label=label,
label_token_text=model_data.label_token_text,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token
)
)
def iter_labeled_model_data_list_for_model_data_list_iterable(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]]
) -> Iterable[Sequence[LabeledLayoutModelData]]:
return self._iter_stacked_label_model_data_lists_to(
model_data_list_iterable,
lambda label, model_data: LabeledLayoutModelData.from_model_data(
model_data,
label=label
)
)
def get_label_layout_document_result(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def iter_predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LabeledLayoutToken]:
# Note: this should get merged with Model.iter_label_layout_document
yield from iter_labeled_layout_token_for_layout_model_label(
self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> List[LabeledLayoutToken]:
return list(self.iter_predict_labels_for_layout_document(
layout_document,
app_features_context=app_features_context
))
def predict_labels_for_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> List[List[LabeledLayoutToken]]:
return [
list(iter_labeled_layout_token_for_layout_model_label(
layout_model_labels
))
for layout_model_labels in self.iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
)
]
def iter_entity_layout_blocks_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
return iter_entity_layout_blocks_for_labeled_layout_tokens(labeled_layout_tokens)
def iter_semantic_content_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.iter_semantic_content_for_entity_blocks(
self.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
),
**kwargs
)
| 0.782164 | 0.146118 |
import os
import logging
import threading
from typing import Iterable, Optional, List, Tuple
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import WapitiWrapper
from sciencebeam_trainer_delft.utils.io import copy_file
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_adapters import (
WapitiModelAdapter,
WapitiModel
)
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.models.model_impl import ModelImpl
from sciencebeam_parser.utils.download import download_if_url_from_alternatives
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
class WapitiServiceModelAdapter(WapitiModelAdapter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._lock = threading.Lock()
self._wapiti_timeout = 20.0
self._wapiti_timeout_counter = 0
self._wapiti_trial_count = 10
@staticmethod
def load_from(
model_path: str,
download_manager: DownloadManager,
wapiti_binary_path: str = None) -> 'WapitiModelAdapter':
# overriding method to return WapitiServiceModelAdapter
model_file_path = os.path.join(model_path, 'model.wapiti.gz')
model_file_paths = [model_file_path, os.path.splitext(model_file_path)[0]]
LOGGER.debug('checking for existing local model files: %r', model_file_paths)
local_model_file_path = download_if_url_from_alternatives(
download_manager=download_manager,
alternative_file_url_or_path_list=model_file_paths
)
LOGGER.debug('local_model_file_path: %s', local_model_file_path)
if local_model_file_path.endswith('.gz'):
local_uncompressed_file_path = os.path.splitext(local_model_file_path)[0]
copy_file(local_model_file_path, local_uncompressed_file_path, overwrite=False)
local_model_file_path = local_uncompressed_file_path
return WapitiServiceModelAdapter(
WapitiWrapper(
wapiti_binary_path=wapiti_binary_path
),
model_file_path=local_model_file_path,
model_path=model_path
)
def stop(self):
wapiti_model = self._wapiti_model
if wapiti_model is None:
return
self._wapiti_model = None
LOGGER.info('stopping wapiti process: %s', wapiti_model.process.pid)
wapiti_model.process.kill()
def on_wapiti_timeout(self):
self._wapiti_timeout_counter += 1
LOGGER.info(
'wapiti timeout (%s, counter=%d)',
self._wapiti_timeout, self._wapiti_timeout_counter
)
self.stop()
def _get_tag_results_with_timeout(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> List[List[Tuple[str, str]]]:
prev_wapiti_timeout_counter = self._wapiti_timeout_counter
timer = threading.Timer(self._wapiti_timeout, self.on_wapiti_timeout)
timer.start()
result = list(self.iter_tag_using_model(x, features, output_format))
timer.cancel()
if self._wapiti_timeout_counter != prev_wapiti_timeout_counter:
raise TimeoutError('wapiti timeout received during processing')
return result
def _get_tag_results_with_timeout_and_retry(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> List[List[Tuple[str, str]]]:
attempt = 0
while True:
try:
return self._get_tag_results_with_timeout(x, features, output_format)
except Exception as exc: # pylint: disable=broad-except
attempt += 1
LOGGER.warning(
'received error processing data: %r, attempt=%d/%d, texts=%r',
exc, attempt, self._wapiti_trial_count, list(x), exc_info=True
)
if attempt >= self._wapiti_trial_count:
LOGGER.warning('final attempt, re-raising exception')
raise
def iter_tag(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> Iterable[List[Tuple[str, str]]]:
# by default, WapitiModelAdapter will run the binary for each call
# using "iter_tag_using_model" will result in a wapiti process
# that we communicate with via stdin / stdout
with self._lock:
yield from self._get_tag_results_with_timeout_and_retry(x, features, output_format)
class WapitiModelImpl(ModelImpl):
def __init__(self, model_url: str, app_context: AppContext):
self.model_url = model_url
self.app_context = app_context
self._lazy_model = LazyLoaded[WapitiModelAdapter](self._load_model)
def __repr__(self) -> str:
return '%s(%r, loaded=%r)' % (
type(self).__name__, self.model_url, self._lazy_model.is_loaded
)
def _load_model(self) -> WapitiModel:
model = WapitiServiceModelAdapter.load_from(
self.model_url,
wapiti_binary_path=self.app_context.lazy_wapiti_binary_wrapper.get_binary_path(),
download_manager=self.app_context.download_manager
)
LOGGER.info('loaded wapiti model: %r', self.model_url)
return model
@property
def model(self) -> WapitiModel:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
model = self.model
result = model.tag(texts, features=features, output_format=output_format)
token_count = sum(len(text) for text in texts)
LOGGER.info(
'predicted labels using wapiti model (document count: %d, token count: %d)',
len(texts), token_count
)
return result
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/wapiti_model_impl.py
|
wapiti_model_impl.py
|
import os
import logging
import threading
from typing import Iterable, Optional, List, Tuple
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import WapitiWrapper
from sciencebeam_trainer_delft.utils.io import copy_file
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_adapters import (
WapitiModelAdapter,
WapitiModel
)
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.models.model_impl import ModelImpl
from sciencebeam_parser.utils.download import download_if_url_from_alternatives
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
class WapitiServiceModelAdapter(WapitiModelAdapter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._lock = threading.Lock()
self._wapiti_timeout = 20.0
self._wapiti_timeout_counter = 0
self._wapiti_trial_count = 10
@staticmethod
def load_from(
model_path: str,
download_manager: DownloadManager,
wapiti_binary_path: str = None) -> 'WapitiModelAdapter':
# overriding method to return WapitiServiceModelAdapter
model_file_path = os.path.join(model_path, 'model.wapiti.gz')
model_file_paths = [model_file_path, os.path.splitext(model_file_path)[0]]
LOGGER.debug('checking for existing local model files: %r', model_file_paths)
local_model_file_path = download_if_url_from_alternatives(
download_manager=download_manager,
alternative_file_url_or_path_list=model_file_paths
)
LOGGER.debug('local_model_file_path: %s', local_model_file_path)
if local_model_file_path.endswith('.gz'):
local_uncompressed_file_path = os.path.splitext(local_model_file_path)[0]
copy_file(local_model_file_path, local_uncompressed_file_path, overwrite=False)
local_model_file_path = local_uncompressed_file_path
return WapitiServiceModelAdapter(
WapitiWrapper(
wapiti_binary_path=wapiti_binary_path
),
model_file_path=local_model_file_path,
model_path=model_path
)
def stop(self):
wapiti_model = self._wapiti_model
if wapiti_model is None:
return
self._wapiti_model = None
LOGGER.info('stopping wapiti process: %s', wapiti_model.process.pid)
wapiti_model.process.kill()
def on_wapiti_timeout(self):
self._wapiti_timeout_counter += 1
LOGGER.info(
'wapiti timeout (%s, counter=%d)',
self._wapiti_timeout, self._wapiti_timeout_counter
)
self.stop()
def _get_tag_results_with_timeout(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> List[List[Tuple[str, str]]]:
prev_wapiti_timeout_counter = self._wapiti_timeout_counter
timer = threading.Timer(self._wapiti_timeout, self.on_wapiti_timeout)
timer.start()
result = list(self.iter_tag_using_model(x, features, output_format))
timer.cancel()
if self._wapiti_timeout_counter != prev_wapiti_timeout_counter:
raise TimeoutError('wapiti timeout received during processing')
return result
def _get_tag_results_with_timeout_and_retry(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> List[List[Tuple[str, str]]]:
attempt = 0
while True:
try:
return self._get_tag_results_with_timeout(x, features, output_format)
except Exception as exc: # pylint: disable=broad-except
attempt += 1
LOGGER.warning(
'received error processing data: %r, attempt=%d/%d, texts=%r',
exc, attempt, self._wapiti_trial_count, list(x), exc_info=True
)
if attempt >= self._wapiti_trial_count:
LOGGER.warning('final attempt, re-raising exception')
raise
def iter_tag(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> Iterable[List[Tuple[str, str]]]:
# by default, WapitiModelAdapter will run the binary for each call
# using "iter_tag_using_model" will result in a wapiti process
# that we communicate with via stdin / stdout
with self._lock:
yield from self._get_tag_results_with_timeout_and_retry(x, features, output_format)
class WapitiModelImpl(ModelImpl):
def __init__(self, model_url: str, app_context: AppContext):
self.model_url = model_url
self.app_context = app_context
self._lazy_model = LazyLoaded[WapitiModelAdapter](self._load_model)
def __repr__(self) -> str:
return '%s(%r, loaded=%r)' % (
type(self).__name__, self.model_url, self._lazy_model.is_loaded
)
def _load_model(self) -> WapitiModel:
model = WapitiServiceModelAdapter.load_from(
self.model_url,
wapiti_binary_path=self.app_context.lazy_wapiti_binary_wrapper.get_binary_path(),
download_manager=self.app_context.download_manager
)
LOGGER.info('loaded wapiti model: %r', self.model_url)
return model
@property
def model(self) -> WapitiModel:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
model = self.model
result = model.tag(texts, features=features, output_format=output_format)
token_count = sum(len(text) for text in texts)
LOGGER.info(
'predicted labels using wapiti model (document count: %d, token count: %d)',
len(texts), token_count
)
return result
| 0.719482 | 0.154887 |
import logging
import re
from typing import Counter, Iterable, List, Optional, Set
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument,
LayoutLine,
LayoutToken
)
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
DocumentFeaturesContext,
ModelDataGenerator,
LayoutModelData,
feature_linear_scaling_int,
_LINESCALE,
get_str_bool_feature_value
)
LOGGER = logging.getLogger(__name__)
NBSP = '\u00A0'
def format_feature_text(text: str) -> str:
return re.sub(" |\t", NBSP, text.strip())
NBBINS_POSITION = 12
EMPTY_LAYOUT_TOKEN = LayoutToken('')
EMPTY_LAYOUT_LINE = LayoutLine([])
def get_block_status(line_index: int, line_count: int) -> str:
return (
'BLOCKSTART' if line_index == 0
else (
'BLOCKEND'
if line_index == line_count - 1
else 'BLOCKIN'
)
)
def get_page_status(
block_index: int, block_count: int,
is_first_block_token: bool,
is_last_block_token: bool
) -> str:
return (
'PAGESTART' if block_index == 0 and is_first_block_token
else (
'PAGEEND'
if block_index == block_count - 1 and is_last_block_token
else 'PAGEIN'
)
)
# based on:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/features/FeatureFactory.java#L359-L367
def get_text_pattern(text: str) -> str:
# Note: original code is meant to shadow numbers but are actually removed
return re.sub(r'[^a-zA-Z ]', '', text).lower()
class SegmentationLineFeatures(ContextAwareLayoutTokenFeatures):
def __init__(
self,
document_features_context: DocumentFeaturesContext,
layout_token: LayoutToken = EMPTY_LAYOUT_TOKEN
):
super().__init__(
layout_token,
document_features_context=document_features_context,
layout_line=EMPTY_LAYOUT_LINE
)
self.line_text = ''
self.second_token_text = ''
self.page_blocks: List[LayoutBlock] = []
self.page_block_index: int = 0
self.block_lines: List[LayoutLine] = []
self.block_line_index: int = 0
self.previous_layout_token: Optional[LayoutToken] = None
self.max_block_line_text_length = 0
self.document_token_count = 0
self.document_token_index = 0
self.is_repetitive_pattern: bool = False
self.is_first_repetitive_pattern: bool = False
def get_block_status(self) -> str:
return get_block_status(self.block_line_index, len(self.block_lines))
def get_page_status(self) -> str:
return get_page_status(
self.page_block_index, len(self.page_blocks),
is_first_block_token=self.block_line_index == 0,
is_last_block_token=self.block_line_index == len(self.block_lines) - 1
)
def get_formatted_whole_line_feature(self) -> str:
return format_feature_text(self.line_text)
def get_str_is_repetitive_pattern(self) -> str:
return get_str_bool_feature_value(self.is_repetitive_pattern)
def get_str_is_first_repetitive_pattern(self) -> str:
return get_str_bool_feature_value(self.is_first_repetitive_pattern)
def get_dummy_str_is_repetitive_pattern(self) -> str:
return '0'
def get_dummy_str_is_first_repetitive_pattern(self) -> str:
return '0'
def get_dummy_str_is_main_area(self) -> str:
# whether the block's bounding box intersects with the page bounding box
return '1'
def get_str_block_relative_line_length_feature(self) -> str:
return str(feature_linear_scaling_int(
len(self.line_text),
self.max_block_line_text_length,
_LINESCALE
))
def get_str_relative_document_position(self) -> str:
return str(feature_linear_scaling_int(
self.document_token_index,
self.document_token_count,
NBBINS_POSITION
))
class SegmentationLineFeaturesProvider:
def __init__(
self,
document_features_context: DocumentFeaturesContext,
use_first_token_of_block: bool
):
self.document_features_context = document_features_context
self.use_first_token_of_block = use_first_token_of_block
def iter_line_features( # pylint: disable=too-many-locals
self,
layout_document: LayoutDocument
) -> Iterable[SegmentationLineFeatures]:
segmentation_line_features = SegmentationLineFeatures(
document_features_context=self.document_features_context
)
previous_token: Optional[LayoutToken] = None
segmentation_line_features.document_token_count = sum(
len(line.tokens)
for block in layout_document.iter_all_blocks()
for line in block.lines
)
pattern_candididate_block_iterable = (
block
for page in layout_document.pages
for block_index, block in enumerate(page.blocks)
if block_index < 2 or block_index > len(page.blocks) - 2
)
pattern_candididate_line_iterable = (
block.lines[0]
for block in pattern_candididate_block_iterable
if block.lines and block.lines[0].tokens
)
all_pattern_by_line_id = {
id(line): get_text_pattern(line.text)
for line in pattern_candididate_line_iterable
}
LOGGER.debug('all_pattern_by_line_id: %s', all_pattern_by_line_id)
pattern_by_line_id = {
key: value
for key, value in all_pattern_by_line_id.items()
if len(value) >= 8 # Java GROBID sometimes counts an additional trailing space
}
pattern_counter = Counter(pattern_by_line_id.values())
LOGGER.debug('pattern_counter: %s', pattern_counter)
seen_repetitive_patterns: Set[str] = set()
document_token_index = 0
for page in layout_document.pages:
blocks = page.blocks
segmentation_line_features.page_blocks = blocks
for block_index, block in enumerate(blocks):
segmentation_line_features.page_block_index = block_index
block_lines = block.lines
segmentation_line_features.block_lines = block_lines
block_line_texts = [line.text for line in block_lines]
max_block_line_text_length = max(len(text) for text in block_line_texts)
first_block_token = next(iter(block.iter_all_tokens()), None)
assert first_block_token
for line_index, line in enumerate(block_lines):
segmentation_line_features.document_token_index = document_token_index
document_token_index += len(line.tokens)
segmentation_line_features.layout_line = line
segmentation_line_features.block_line_index = line_index
segmentation_line_features.max_block_line_text_length = (
max_block_line_text_length
)
line_text = block_line_texts[line_index]
retokenized_token_texts = re.split(r" |\t|\f|\u00A0", line_text)
if not retokenized_token_texts:
continue
if self.use_first_token_of_block:
# Java GROBID uses the first token in the block
token = first_block_token
else:
token = line.tokens[0]
segmentation_line_features.layout_token = token
segmentation_line_features.line_text = line_text
segmentation_line_features.concatenated_line_tokens_text = line_text
segmentation_line_features.token_text = retokenized_token_texts[0].strip()
segmentation_line_features.second_token_text = (
retokenized_token_texts[1] if len(retokenized_token_texts) >= 2 else ''
)
segmentation_line_features.previous_layout_token = previous_token
line_pattern = pattern_by_line_id.get(id(line), '')
LOGGER.debug('line_pattern: %r', line_pattern)
segmentation_line_features.is_repetitive_pattern = (
pattern_counter[line_pattern] > 1
)
segmentation_line_features.is_first_repetitive_pattern = (
segmentation_line_features.is_repetitive_pattern
and line_pattern not in seen_repetitive_patterns
)
if segmentation_line_features.is_first_repetitive_pattern:
seen_repetitive_patterns.add(line_pattern)
yield segmentation_line_features
previous_token = token
class SegmentationDataGenerator(ModelDataGenerator):
def __init__(
self,
document_features_context: DocumentFeaturesContext,
use_first_token_of_block: bool
):
self.document_features_context = document_features_context
self.use_first_token_of_block = use_first_token_of_block
def iter_model_data_for_layout_document(
self,
layout_document: LayoutDocument
) -> Iterable[LayoutModelData]:
features_provider = SegmentationLineFeaturesProvider(
document_features_context=self.document_features_context,
use_first_token_of_block=self.use_first_token_of_block
)
for features in features_provider.iter_line_features(
layout_document
):
line_features: List[str] = [
features.token_text,
features.second_token_text or features.token_text,
features.get_lower_token_text(),
features.get_prefix(1),
features.get_prefix(2),
features.get_prefix(3),
features.get_prefix(4),
features.get_block_status(),
features.get_page_status(),
features.get_token_font_status(),
features.get_token_font_size_feature(),
features.get_str_is_bold(),
features.get_str_is_italic(),
features.get_capitalisation_status_using_allcap(),
features.get_digit_status_using_containsdigits(),
features.get_str_is_single_char(),
features.get_dummy_str_is_proper_name(),
features.get_dummy_str_is_common_name(),
features.get_dummy_str_is_first_name(),
features.get_dummy_str_is_year(),
features.get_dummy_str_is_month(),
features.get_dummy_str_is_email(),
features.get_dummy_str_is_http(),
features.get_str_relative_document_position(),
features.get_dummy_str_relative_page_position(),
features.get_line_punctuation_profile(),
features.get_line_punctuation_profile_length_feature(),
features.get_str_block_relative_line_length_feature(),
features.get_dummy_str_is_bitmap_around(),
features.get_dummy_str_is_vector_around(),
features.get_str_is_repetitive_pattern(),
features.get_str_is_first_repetitive_pattern(),
features.get_dummy_str_is_main_area(),
features.get_formatted_whole_line_feature()
]
if len(line_features) != 34:
raise AssertionError(
'expected features to have 34 features, but was=%d (features=%s)' % (
len(line_features), line_features
)
)
yield LayoutModelData(
layout_line=features.layout_line,
data_line=' '.join(line_features)
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/segmentation/data.py
|
data.py
|
import logging
import re
from typing import Counter, Iterable, List, Optional, Set
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument,
LayoutLine,
LayoutToken
)
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
DocumentFeaturesContext,
ModelDataGenerator,
LayoutModelData,
feature_linear_scaling_int,
_LINESCALE,
get_str_bool_feature_value
)
LOGGER = logging.getLogger(__name__)
NBSP = '\u00A0'
def format_feature_text(text: str) -> str:
return re.sub(" |\t", NBSP, text.strip())
NBBINS_POSITION = 12
EMPTY_LAYOUT_TOKEN = LayoutToken('')
EMPTY_LAYOUT_LINE = LayoutLine([])
def get_block_status(line_index: int, line_count: int) -> str:
return (
'BLOCKSTART' if line_index == 0
else (
'BLOCKEND'
if line_index == line_count - 1
else 'BLOCKIN'
)
)
def get_page_status(
block_index: int, block_count: int,
is_first_block_token: bool,
is_last_block_token: bool
) -> str:
return (
'PAGESTART' if block_index == 0 and is_first_block_token
else (
'PAGEEND'
if block_index == block_count - 1 and is_last_block_token
else 'PAGEIN'
)
)
# based on:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/features/FeatureFactory.java#L359-L367
def get_text_pattern(text: str) -> str:
# Note: original code is meant to shadow numbers but are actually removed
return re.sub(r'[^a-zA-Z ]', '', text).lower()
class SegmentationLineFeatures(ContextAwareLayoutTokenFeatures):
def __init__(
self,
document_features_context: DocumentFeaturesContext,
layout_token: LayoutToken = EMPTY_LAYOUT_TOKEN
):
super().__init__(
layout_token,
document_features_context=document_features_context,
layout_line=EMPTY_LAYOUT_LINE
)
self.line_text = ''
self.second_token_text = ''
self.page_blocks: List[LayoutBlock] = []
self.page_block_index: int = 0
self.block_lines: List[LayoutLine] = []
self.block_line_index: int = 0
self.previous_layout_token: Optional[LayoutToken] = None
self.max_block_line_text_length = 0
self.document_token_count = 0
self.document_token_index = 0
self.is_repetitive_pattern: bool = False
self.is_first_repetitive_pattern: bool = False
def get_block_status(self) -> str:
return get_block_status(self.block_line_index, len(self.block_lines))
def get_page_status(self) -> str:
return get_page_status(
self.page_block_index, len(self.page_blocks),
is_first_block_token=self.block_line_index == 0,
is_last_block_token=self.block_line_index == len(self.block_lines) - 1
)
def get_formatted_whole_line_feature(self) -> str:
return format_feature_text(self.line_text)
def get_str_is_repetitive_pattern(self) -> str:
return get_str_bool_feature_value(self.is_repetitive_pattern)
def get_str_is_first_repetitive_pattern(self) -> str:
return get_str_bool_feature_value(self.is_first_repetitive_pattern)
def get_dummy_str_is_repetitive_pattern(self) -> str:
return '0'
def get_dummy_str_is_first_repetitive_pattern(self) -> str:
return '0'
def get_dummy_str_is_main_area(self) -> str:
# whether the block's bounding box intersects with the page bounding box
return '1'
def get_str_block_relative_line_length_feature(self) -> str:
return str(feature_linear_scaling_int(
len(self.line_text),
self.max_block_line_text_length,
_LINESCALE
))
def get_str_relative_document_position(self) -> str:
return str(feature_linear_scaling_int(
self.document_token_index,
self.document_token_count,
NBBINS_POSITION
))
class SegmentationLineFeaturesProvider:
def __init__(
self,
document_features_context: DocumentFeaturesContext,
use_first_token_of_block: bool
):
self.document_features_context = document_features_context
self.use_first_token_of_block = use_first_token_of_block
def iter_line_features( # pylint: disable=too-many-locals
self,
layout_document: LayoutDocument
) -> Iterable[SegmentationLineFeatures]:
segmentation_line_features = SegmentationLineFeatures(
document_features_context=self.document_features_context
)
previous_token: Optional[LayoutToken] = None
segmentation_line_features.document_token_count = sum(
len(line.tokens)
for block in layout_document.iter_all_blocks()
for line in block.lines
)
pattern_candididate_block_iterable = (
block
for page in layout_document.pages
for block_index, block in enumerate(page.blocks)
if block_index < 2 or block_index > len(page.blocks) - 2
)
pattern_candididate_line_iterable = (
block.lines[0]
for block in pattern_candididate_block_iterable
if block.lines and block.lines[0].tokens
)
all_pattern_by_line_id = {
id(line): get_text_pattern(line.text)
for line in pattern_candididate_line_iterable
}
LOGGER.debug('all_pattern_by_line_id: %s', all_pattern_by_line_id)
pattern_by_line_id = {
key: value
for key, value in all_pattern_by_line_id.items()
if len(value) >= 8 # Java GROBID sometimes counts an additional trailing space
}
pattern_counter = Counter(pattern_by_line_id.values())
LOGGER.debug('pattern_counter: %s', pattern_counter)
seen_repetitive_patterns: Set[str] = set()
document_token_index = 0
for page in layout_document.pages:
blocks = page.blocks
segmentation_line_features.page_blocks = blocks
for block_index, block in enumerate(blocks):
segmentation_line_features.page_block_index = block_index
block_lines = block.lines
segmentation_line_features.block_lines = block_lines
block_line_texts = [line.text for line in block_lines]
max_block_line_text_length = max(len(text) for text in block_line_texts)
first_block_token = next(iter(block.iter_all_tokens()), None)
assert first_block_token
for line_index, line in enumerate(block_lines):
segmentation_line_features.document_token_index = document_token_index
document_token_index += len(line.tokens)
segmentation_line_features.layout_line = line
segmentation_line_features.block_line_index = line_index
segmentation_line_features.max_block_line_text_length = (
max_block_line_text_length
)
line_text = block_line_texts[line_index]
retokenized_token_texts = re.split(r" |\t|\f|\u00A0", line_text)
if not retokenized_token_texts:
continue
if self.use_first_token_of_block:
# Java GROBID uses the first token in the block
token = first_block_token
else:
token = line.tokens[0]
segmentation_line_features.layout_token = token
segmentation_line_features.line_text = line_text
segmentation_line_features.concatenated_line_tokens_text = line_text
segmentation_line_features.token_text = retokenized_token_texts[0].strip()
segmentation_line_features.second_token_text = (
retokenized_token_texts[1] if len(retokenized_token_texts) >= 2 else ''
)
segmentation_line_features.previous_layout_token = previous_token
line_pattern = pattern_by_line_id.get(id(line), '')
LOGGER.debug('line_pattern: %r', line_pattern)
segmentation_line_features.is_repetitive_pattern = (
pattern_counter[line_pattern] > 1
)
segmentation_line_features.is_first_repetitive_pattern = (
segmentation_line_features.is_repetitive_pattern
and line_pattern not in seen_repetitive_patterns
)
if segmentation_line_features.is_first_repetitive_pattern:
seen_repetitive_patterns.add(line_pattern)
yield segmentation_line_features
previous_token = token
class SegmentationDataGenerator(ModelDataGenerator):
def __init__(
self,
document_features_context: DocumentFeaturesContext,
use_first_token_of_block: bool
):
self.document_features_context = document_features_context
self.use_first_token_of_block = use_first_token_of_block
def iter_model_data_for_layout_document(
self,
layout_document: LayoutDocument
) -> Iterable[LayoutModelData]:
features_provider = SegmentationLineFeaturesProvider(
document_features_context=self.document_features_context,
use_first_token_of_block=self.use_first_token_of_block
)
for features in features_provider.iter_line_features(
layout_document
):
line_features: List[str] = [
features.token_text,
features.second_token_text or features.token_text,
features.get_lower_token_text(),
features.get_prefix(1),
features.get_prefix(2),
features.get_prefix(3),
features.get_prefix(4),
features.get_block_status(),
features.get_page_status(),
features.get_token_font_status(),
features.get_token_font_size_feature(),
features.get_str_is_bold(),
features.get_str_is_italic(),
features.get_capitalisation_status_using_allcap(),
features.get_digit_status_using_containsdigits(),
features.get_str_is_single_char(),
features.get_dummy_str_is_proper_name(),
features.get_dummy_str_is_common_name(),
features.get_dummy_str_is_first_name(),
features.get_dummy_str_is_year(),
features.get_dummy_str_is_month(),
features.get_dummy_str_is_email(),
features.get_dummy_str_is_http(),
features.get_str_relative_document_position(),
features.get_dummy_str_relative_page_position(),
features.get_line_punctuation_profile(),
features.get_line_punctuation_profile_length_feature(),
features.get_str_block_relative_line_length_feature(),
features.get_dummy_str_is_bitmap_around(),
features.get_dummy_str_is_vector_around(),
features.get_str_is_repetitive_pattern(),
features.get_str_is_first_repetitive_pattern(),
features.get_dummy_str_is_main_area(),
features.get_formatted_whole_line_feature()
]
if len(line_features) != 34:
raise AssertionError(
'expected features to have 34 features, but was=%d (features=%s)' % (
len(line_features), line_features
)
)
yield LayoutModelData(
layout_line=features.layout_line,
data_line=' '.join(line_features)
)
| 0.855323 | 0.146087 |
import logging
from typing import Iterable, List
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml_writer import XmlTreeWriter
from sciencebeam_parser.document.layout_document import (
LayoutLine,
LayoutToken,
join_layout_tokens
)
from sciencebeam_parser.models.data import LayoutModelData
from sciencebeam_parser.models.model import (
get_split_prefix_label
)
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser,
get_model_data_label
)
LOGGER = logging.getLogger(__name__)
TEI_E = ElementMaker()
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/Segmentation.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<other>': ROOT_TRAINING_XML_ELEMENT_PATH,
'O': ROOT_TRAINING_XML_ELEMENT_PATH,
'<header>': ROOT_TRAINING_XML_ELEMENT_PATH + ['front'],
'<headnote>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@place="headnote"]'],
'<footnote>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@place="footnote"]'],
'<marginnote>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@place="marginnote"]'],
'<page>': ROOT_TRAINING_XML_ELEMENT_PATH + ['page'],
'<references>': ROOT_TRAINING_XML_ELEMENT_PATH + ['listBibl'],
'<body>': ROOT_TRAINING_XML_ELEMENT_PATH + ['body'],
'<cover>': ROOT_TRAINING_XML_ELEMENT_PATH + ['titlePage'],
'<toc>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="toc"]'],
'<annex>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="annex"]'],
'<acknowledgement>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="acknowledgement"]'],
}
def iter_tokens_from_model_data(model_data: LayoutModelData) -> Iterable[LayoutToken]:
if model_data.layout_token is not None:
yield model_data.layout_token
return
assert model_data.layout_line is not None
yield from model_data.layout_line.tokens
def iter_layout_lines_from_layout_tokens(
layout_tokens: Iterable[LayoutToken]
) -> Iterable[LayoutLine]:
line_layout_tokens: List[LayoutToken] = []
for layout_token in layout_tokens:
if not line_layout_tokens:
line_layout_tokens.append(layout_token)
continue
if (
layout_token.line_meta.line_id
== line_layout_tokens[0].line_meta.line_id
):
LOGGER.debug('line id matching: %r - %r', layout_token, line_layout_tokens[0])
line_layout_tokens.append(layout_token)
continue
yield LayoutLine(tokens=line_layout_tokens)
line_layout_tokens = [layout_token]
if line_layout_tokens:
yield LayoutLine(tokens=line_layout_tokens)
class SegmentationTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.segmentation.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.segmentation'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=TEI_E,
default_tei_filename_suffix=(
SegmentationTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
SegmentationTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='segmentation/corpus/tei',
default_data_sub_directory='segmentation/corpus/raw'
)
def write_xml_line_for_layout_tokens(
self,
xml_writer: XmlTreeWriter,
layout_tokens: Iterable[LayoutToken]
):
xml_writer.append_text(join_layout_tokens(layout_tokens))
xml_writer.append(TEI_E('lb'))
def write_xml_for_model_data_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_iterable: Iterable[LayoutModelData]
):
default_path = xml_writer.current_path
pending_whitespace = ''
for model_data in model_data_iterable:
prefixed_label = get_model_data_label(model_data)
_prefix, label = get_split_prefix_label(prefixed_label or '')
xml_element_path = self.get_training_xml_path_for_label(
label,
current_path=xml_writer.current_path
)
LOGGER.debug('label: %r (%r)', label, xml_element_path)
if xml_writer.current_path != xml_element_path:
xml_writer.require_path(default_path)
xml_writer.append_text(pending_whitespace)
pending_whitespace = ''
xml_writer.require_path(xml_element_path)
for layout_line in iter_layout_lines_from_layout_tokens(
iter_tokens_from_model_data(model_data)
):
self.write_xml_line_for_layout_tokens(
xml_writer,
layout_line.tokens
)
pending_whitespace = '\n'
xml_writer.require_path(default_path)
xml_writer.append_text(pending_whitespace)
class SegmentationTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=False,
line_as_token=True
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/segmentation/training_data.py
|
training_data.py
|
import logging
from typing import Iterable, List
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml_writer import XmlTreeWriter
from sciencebeam_parser.document.layout_document import (
LayoutLine,
LayoutToken,
join_layout_tokens
)
from sciencebeam_parser.models.data import LayoutModelData
from sciencebeam_parser.models.model import (
get_split_prefix_label
)
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser,
get_model_data_label
)
LOGGER = logging.getLogger(__name__)
TEI_E = ElementMaker()
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/Segmentation.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<other>': ROOT_TRAINING_XML_ELEMENT_PATH,
'O': ROOT_TRAINING_XML_ELEMENT_PATH,
'<header>': ROOT_TRAINING_XML_ELEMENT_PATH + ['front'],
'<headnote>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@place="headnote"]'],
'<footnote>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@place="footnote"]'],
'<marginnote>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@place="marginnote"]'],
'<page>': ROOT_TRAINING_XML_ELEMENT_PATH + ['page'],
'<references>': ROOT_TRAINING_XML_ELEMENT_PATH + ['listBibl'],
'<body>': ROOT_TRAINING_XML_ELEMENT_PATH + ['body'],
'<cover>': ROOT_TRAINING_XML_ELEMENT_PATH + ['titlePage'],
'<toc>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="toc"]'],
'<annex>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="annex"]'],
'<acknowledgement>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="acknowledgement"]'],
}
def iter_tokens_from_model_data(model_data: LayoutModelData) -> Iterable[LayoutToken]:
if model_data.layout_token is not None:
yield model_data.layout_token
return
assert model_data.layout_line is not None
yield from model_data.layout_line.tokens
def iter_layout_lines_from_layout_tokens(
layout_tokens: Iterable[LayoutToken]
) -> Iterable[LayoutLine]:
line_layout_tokens: List[LayoutToken] = []
for layout_token in layout_tokens:
if not line_layout_tokens:
line_layout_tokens.append(layout_token)
continue
if (
layout_token.line_meta.line_id
== line_layout_tokens[0].line_meta.line_id
):
LOGGER.debug('line id matching: %r - %r', layout_token, line_layout_tokens[0])
line_layout_tokens.append(layout_token)
continue
yield LayoutLine(tokens=line_layout_tokens)
line_layout_tokens = [layout_token]
if line_layout_tokens:
yield LayoutLine(tokens=line_layout_tokens)
class SegmentationTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.segmentation.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.segmentation'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=TEI_E,
default_tei_filename_suffix=(
SegmentationTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
SegmentationTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='segmentation/corpus/tei',
default_data_sub_directory='segmentation/corpus/raw'
)
def write_xml_line_for_layout_tokens(
self,
xml_writer: XmlTreeWriter,
layout_tokens: Iterable[LayoutToken]
):
xml_writer.append_text(join_layout_tokens(layout_tokens))
xml_writer.append(TEI_E('lb'))
def write_xml_for_model_data_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_iterable: Iterable[LayoutModelData]
):
default_path = xml_writer.current_path
pending_whitespace = ''
for model_data in model_data_iterable:
prefixed_label = get_model_data_label(model_data)
_prefix, label = get_split_prefix_label(prefixed_label or '')
xml_element_path = self.get_training_xml_path_for_label(
label,
current_path=xml_writer.current_path
)
LOGGER.debug('label: %r (%r)', label, xml_element_path)
if xml_writer.current_path != xml_element_path:
xml_writer.require_path(default_path)
xml_writer.append_text(pending_whitespace)
pending_whitespace = ''
xml_writer.require_path(xml_element_path)
for layout_line in iter_layout_lines_from_layout_tokens(
iter_tokens_from_model_data(model_data)
):
self.write_xml_line_for_layout_tokens(
xml_writer,
layout_line.tokens
)
pending_whitespace = '\n'
xml_writer.require_path(default_path)
xml_writer.append_text(pending_whitespace)
class SegmentationTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=False,
line_as_token=True
)
| 0.664323 | 0.118717 |
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class CitationDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_str_is_first_name(),
token_features.get_str_is_last_name(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_http(),
token_features.get_dummy_str_is_known_collaboration(),
token_features.get_dummy_str_is_known_journal_title(),
token_features.get_dummy_str_is_known_conference_title(),
token_features.get_dummy_str_is_known_publisher(),
token_features.get_dummy_str_is_known_identifier(),
token_features.get_punctuation_type_feature(),
token_features.get_str_sentence_token_relative_position(),
token_features.get_dummy_label()
])
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/citation/data.py
|
data.py
|
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class CitationDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_str_is_first_name(),
token_features.get_str_is_last_name(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_http(),
token_features.get_dummy_str_is_known_collaboration(),
token_features.get_dummy_str_is_known_journal_title(),
token_features.get_dummy_str_is_known_conference_title(),
token_features.get_dummy_str_is_known_publisher(),
token_features.get_dummy_str_is_known_identifier(),
token_features.get_punctuation_type_feature(),
token_features.get_str_sentence_token_relative_position(),
token_features.get_dummy_label()
])
| 0.763836 | 0.180467 |
import logging
import re
from typing import Iterable, Mapping, Optional, Set, Tuple, Type, Union
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticDate,
SemanticExternalIdentifier,
SemanticExternalIdentifierTypes,
SemanticExternalUrl,
SemanticInvalidReference,
SemanticIssue,
SemanticJournal,
SemanticLocation,
SemanticPageRange,
SemanticPublisher,
SemanticRawAuthors,
SemanticRawEditors,
SemanticRawReference,
SemanticRawReferenceText,
SemanticReference,
SemanticTitle,
SemanticVolume
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
# https://en.wikipedia.org/wiki/Digital_Object_Identifier
# https://www.doi.org/doi_handbook/2_Numbering.html
DOI_PATTERN = r'\b(10\.\d{4,}(?:\.\d{1,})*/.+)'
# copied and adapted from:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/utilities/TextUtilities.java#L66
PMID_PATTERN = r"(?:(?:PMID)|(?:Pub(?:\s)?Med(?:\s)?(?:ID)?))(?:\s)?(?:\:)?(?:\s)*(\d{1,8})"
PMCID_PATTERN = r"(?:PMC)(\d{1,})"
# copied and adapted from:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/utilities/TextUtilities.java#L62-L63
ARXIV_PATTERN = (
r"(?:arXiv\s?(?:\.org)?\s?\:\s?(\d{4}\s?\.\s?\d{4,5}(?:v\d+)?))"
r"|(?:arXiv\s?(?:\.org)?\s?\:\s?([ a-zA-Z\-\.]*\s?/\s?\d{7}(?:v\d+)?))"
)
# https://en.wikipedia.org/wiki/Publisher_Item_Identifier
PII_PATTERN = r'\b([S,B]\W*(?:[0-9xX]\W*){15,}[0-9xX])'
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<author>': SemanticRawAuthors,
'<editor>': SemanticRawEditors,
'<title>': SemanticTitle,
'<journal>': SemanticJournal,
'<volume>': SemanticVolume,
'<issue>': SemanticIssue,
'<publisher>': SemanticPublisher,
'<location>': SemanticLocation
}
VALID_REFERENCE_TYPES: Set[Type[SemanticContentWrapper]] = {
SemanticTitle,
SemanticJournal,
SemanticRawAuthors,
SemanticRawEditors,
SemanticExternalIdentifier,
SemanticExternalUrl
}
def parse_page_range(layout_block: LayoutBlock) -> SemanticPageRange:
page_range_text = layout_block.text
page_parts = page_range_text.split('-')
if len(page_parts) == 2:
from_page = page_parts[0].strip()
to_page = page_parts[1].strip()
if to_page and len(to_page) < len(from_page):
to_page = from_page[:-(len(to_page))] + to_page
return SemanticPageRange(
layout_block=layout_block,
from_page=from_page,
to_page=to_page
)
return SemanticPageRange(layout_block=layout_block)
def parse_web(layout_block: LayoutBlock) -> Union[SemanticExternalUrl, SemanticExternalIdentifier]:
value = re.sub(r'\s', '', layout_block.text)
m = re.search(DOI_PATTERN, value)
if m:
return SemanticExternalIdentifier(
layout_block=layout_block,
value=m.group(1),
external_identifier_type=SemanticExternalIdentifierTypes.DOI
)
return SemanticExternalUrl(
layout_block=layout_block,
value=value
)
def get_detected_external_identifier_type_and_value_for_text(
text: str
) -> Tuple[Optional[str], str]:
value = re.sub(r'\s', '', text)
m = re.search(DOI_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.DOI, value
m = re.search(PMCID_PATTERN, value)
if m:
value = 'PMC' + m.group(1)
return SemanticExternalIdentifierTypes.PMCID, value
m = re.search(ARXIV_PATTERN, value)
if m:
value = m.group(1) or m.group(2)
return SemanticExternalIdentifierTypes.ARXIV, value
m = re.match(PMID_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.PMID, value
m = re.search(PII_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.PII, value
return None, value
def get_detected_external_identifier_type_for_text(text: str) -> Optional[str]:
external_identifier_type, _ = get_detected_external_identifier_type_and_value_for_text(
text
)
return external_identifier_type
def parse_pubnum(layout_block: LayoutBlock) -> SemanticExternalIdentifier:
external_identifier_type, value = get_detected_external_identifier_type_and_value_for_text(
layout_block.text
)
return SemanticExternalIdentifier(
layout_block=layout_block,
value=value,
external_identifier_type=external_identifier_type
)
def parse_date(layout_block: LayoutBlock) -> SemanticDate:
value = re.sub(r'\s', '', layout_block.text)
year: Optional[int] = None
m = re.search(r'(\d{4})', value)
if m:
year = int(m.group(1))
return SemanticDate(
layout_block=layout_block,
year=year
)
def is_reference_valid(ref: SemanticReference) -> bool:
for semantic_content in ref:
if type(semantic_content) in VALID_REFERENCE_TYPES:
return True
return False
def get_invalid_reference(ref: SemanticReference) -> SemanticInvalidReference:
return SemanticInvalidReference(
mixed_content=[
semantic_content
for semantic_content in ref
if not isinstance(semantic_content, SemanticRawReferenceText)
]
)
class CitationSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def get_semantic_content_for_entity_name( # pylint: disable=too-many-return-statements
self,
name: str,
layout_block: LayoutBlock
) -> SemanticContentWrapper:
if name == '<pages>':
return parse_page_range(layout_block)
if name == '<web>':
return parse_web(layout_block)
if name == '<pubnum>':
return parse_pubnum(layout_block)
if name == '<date>':
return parse_date(layout_block)
return super().get_semantic_content_for_entity_name(name, layout_block)
def iter_semantic_content_for_entity_blocks( # pylint: disable=arguments-differ
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
semantic_raw_reference: Optional[SemanticRawReference] = None,
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('b'))
ref: Optional[SemanticReference] = None
for name, layout_block in entity_tokens:
if not ref:
ref = SemanticReference()
if semantic_raw_reference:
ref.content_id = semantic_raw_reference.content_id
for semantic_content in semantic_raw_reference:
ref.add_content(semantic_content)
if not ref.content_id:
ref.content_id = next(ids_iterator, '?')
semantic_content = self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
)
ref.add_content(semantic_content)
if ref and not is_reference_valid(ref):
yield get_invalid_reference(ref)
elif ref:
yield ref
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/citation/extract.py
|
extract.py
|
import logging
import re
from typing import Iterable, Mapping, Optional, Set, Tuple, Type, Union
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticDate,
SemanticExternalIdentifier,
SemanticExternalIdentifierTypes,
SemanticExternalUrl,
SemanticInvalidReference,
SemanticIssue,
SemanticJournal,
SemanticLocation,
SemanticPageRange,
SemanticPublisher,
SemanticRawAuthors,
SemanticRawEditors,
SemanticRawReference,
SemanticRawReferenceText,
SemanticReference,
SemanticTitle,
SemanticVolume
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
# https://en.wikipedia.org/wiki/Digital_Object_Identifier
# https://www.doi.org/doi_handbook/2_Numbering.html
DOI_PATTERN = r'\b(10\.\d{4,}(?:\.\d{1,})*/.+)'
# copied and adapted from:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/utilities/TextUtilities.java#L66
PMID_PATTERN = r"(?:(?:PMID)|(?:Pub(?:\s)?Med(?:\s)?(?:ID)?))(?:\s)?(?:\:)?(?:\s)*(\d{1,8})"
PMCID_PATTERN = r"(?:PMC)(\d{1,})"
# copied and adapted from:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/utilities/TextUtilities.java#L62-L63
ARXIV_PATTERN = (
r"(?:arXiv\s?(?:\.org)?\s?\:\s?(\d{4}\s?\.\s?\d{4,5}(?:v\d+)?))"
r"|(?:arXiv\s?(?:\.org)?\s?\:\s?([ a-zA-Z\-\.]*\s?/\s?\d{7}(?:v\d+)?))"
)
# https://en.wikipedia.org/wiki/Publisher_Item_Identifier
PII_PATTERN = r'\b([S,B]\W*(?:[0-9xX]\W*){15,}[0-9xX])'
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<author>': SemanticRawAuthors,
'<editor>': SemanticRawEditors,
'<title>': SemanticTitle,
'<journal>': SemanticJournal,
'<volume>': SemanticVolume,
'<issue>': SemanticIssue,
'<publisher>': SemanticPublisher,
'<location>': SemanticLocation
}
VALID_REFERENCE_TYPES: Set[Type[SemanticContentWrapper]] = {
SemanticTitle,
SemanticJournal,
SemanticRawAuthors,
SemanticRawEditors,
SemanticExternalIdentifier,
SemanticExternalUrl
}
def parse_page_range(layout_block: LayoutBlock) -> SemanticPageRange:
page_range_text = layout_block.text
page_parts = page_range_text.split('-')
if len(page_parts) == 2:
from_page = page_parts[0].strip()
to_page = page_parts[1].strip()
if to_page and len(to_page) < len(from_page):
to_page = from_page[:-(len(to_page))] + to_page
return SemanticPageRange(
layout_block=layout_block,
from_page=from_page,
to_page=to_page
)
return SemanticPageRange(layout_block=layout_block)
def parse_web(layout_block: LayoutBlock) -> Union[SemanticExternalUrl, SemanticExternalIdentifier]:
value = re.sub(r'\s', '', layout_block.text)
m = re.search(DOI_PATTERN, value)
if m:
return SemanticExternalIdentifier(
layout_block=layout_block,
value=m.group(1),
external_identifier_type=SemanticExternalIdentifierTypes.DOI
)
return SemanticExternalUrl(
layout_block=layout_block,
value=value
)
def get_detected_external_identifier_type_and_value_for_text(
text: str
) -> Tuple[Optional[str], str]:
value = re.sub(r'\s', '', text)
m = re.search(DOI_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.DOI, value
m = re.search(PMCID_PATTERN, value)
if m:
value = 'PMC' + m.group(1)
return SemanticExternalIdentifierTypes.PMCID, value
m = re.search(ARXIV_PATTERN, value)
if m:
value = m.group(1) or m.group(2)
return SemanticExternalIdentifierTypes.ARXIV, value
m = re.match(PMID_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.PMID, value
m = re.search(PII_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.PII, value
return None, value
def get_detected_external_identifier_type_for_text(text: str) -> Optional[str]:
external_identifier_type, _ = get_detected_external_identifier_type_and_value_for_text(
text
)
return external_identifier_type
def parse_pubnum(layout_block: LayoutBlock) -> SemanticExternalIdentifier:
external_identifier_type, value = get_detected_external_identifier_type_and_value_for_text(
layout_block.text
)
return SemanticExternalIdentifier(
layout_block=layout_block,
value=value,
external_identifier_type=external_identifier_type
)
def parse_date(layout_block: LayoutBlock) -> SemanticDate:
value = re.sub(r'\s', '', layout_block.text)
year: Optional[int] = None
m = re.search(r'(\d{4})', value)
if m:
year = int(m.group(1))
return SemanticDate(
layout_block=layout_block,
year=year
)
def is_reference_valid(ref: SemanticReference) -> bool:
for semantic_content in ref:
if type(semantic_content) in VALID_REFERENCE_TYPES:
return True
return False
def get_invalid_reference(ref: SemanticReference) -> SemanticInvalidReference:
return SemanticInvalidReference(
mixed_content=[
semantic_content
for semantic_content in ref
if not isinstance(semantic_content, SemanticRawReferenceText)
]
)
class CitationSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def get_semantic_content_for_entity_name( # pylint: disable=too-many-return-statements
self,
name: str,
layout_block: LayoutBlock
) -> SemanticContentWrapper:
if name == '<pages>':
return parse_page_range(layout_block)
if name == '<web>':
return parse_web(layout_block)
if name == '<pubnum>':
return parse_pubnum(layout_block)
if name == '<date>':
return parse_date(layout_block)
return super().get_semantic_content_for_entity_name(name, layout_block)
def iter_semantic_content_for_entity_blocks( # pylint: disable=arguments-differ
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
semantic_raw_reference: Optional[SemanticRawReference] = None,
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('b'))
ref: Optional[SemanticReference] = None
for name, layout_block in entity_tokens:
if not ref:
ref = SemanticReference()
if semantic_raw_reference:
ref.content_id = semantic_raw_reference.content_id
for semantic_content in semantic_raw_reference:
ref.add_content(semantic_content)
if not ref.content_id:
ref.content_id = next(ids_iterator, '?')
semantic_content = self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
)
ref.add_content(semantic_content)
if ref and not is_reference_valid(ref):
yield get_invalid_reference(ref)
elif ref:
yield ref
| 0.799521 | 0.208259 |
import logging
from lxml import etree
from sciencebeam_parser.document.tei.common import tei_xpath
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
from sciencebeam_parser.models.citation.extract import (
get_detected_external_identifier_type_for_text
)
from sciencebeam_parser.utils.xml import get_text_content
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/CitationParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'back', 'listBibl', 'bibl']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<title>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="a"]'],
'<author>': ROOT_TRAINING_XML_ELEMENT_PATH + ['author'],
'<editor>': ROOT_TRAINING_XML_ELEMENT_PATH + ['editor'],
'<institution>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName'],
'<collaboration>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="collaboration"]'],
'<journal>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="j"]'],
'<series>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="s"]'],
'<booktitle>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="m"]'],
'<date>': ROOT_TRAINING_XML_ELEMENT_PATH + ['date'],
'<volume>': ROOT_TRAINING_XML_ELEMENT_PATH + ['biblScope[@unit="volume"]'],
'<issue>': ROOT_TRAINING_XML_ELEMENT_PATH + ['biblScope[@unit="issue"]'],
'<pages>': ROOT_TRAINING_XML_ELEMENT_PATH + ['biblScope[@unit="page"]'],
'<publisher>': ROOT_TRAINING_XML_ELEMENT_PATH + ['publisher'],
'<location>': ROOT_TRAINING_XML_ELEMENT_PATH + ['pubPlace'],
'<tech>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="report"]'],
'<web>': ROOT_TRAINING_XML_ELEMENT_PATH + ['ptr[@type="web"]'],
'<pubnum>': ROOT_TRAINING_XML_ELEMENT_PATH + ['idno'],
'<note>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note']
}
class CitationTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.references.tei.xml'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=True,
root_tag='TEI',
default_tei_filename_suffix=(
CitationTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=None,
default_tei_sub_directory='citation/corpus'
)
def get_post_processed_xml_root(self, xml_root: etree.ElementBase):
for idno_element in tei_xpath(xml_root, '//tei:idno'):
external_identifier_type = get_detected_external_identifier_type_for_text(
get_text_content(idno_element)
)
if not external_identifier_type:
continue
idno_element.attrib['type'] = external_identifier_type
return xml_root
class CitationTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=True
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/citation/training_data.py
|
training_data.py
|
import logging
from lxml import etree
from sciencebeam_parser.document.tei.common import tei_xpath
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
from sciencebeam_parser.models.citation.extract import (
get_detected_external_identifier_type_for_text
)
from sciencebeam_parser.utils.xml import get_text_content
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/CitationParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'back', 'listBibl', 'bibl']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<title>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="a"]'],
'<author>': ROOT_TRAINING_XML_ELEMENT_PATH + ['author'],
'<editor>': ROOT_TRAINING_XML_ELEMENT_PATH + ['editor'],
'<institution>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName'],
'<collaboration>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="collaboration"]'],
'<journal>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="j"]'],
'<series>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="s"]'],
'<booktitle>': ROOT_TRAINING_XML_ELEMENT_PATH + ['title[@level="m"]'],
'<date>': ROOT_TRAINING_XML_ELEMENT_PATH + ['date'],
'<volume>': ROOT_TRAINING_XML_ELEMENT_PATH + ['biblScope[@unit="volume"]'],
'<issue>': ROOT_TRAINING_XML_ELEMENT_PATH + ['biblScope[@unit="issue"]'],
'<pages>': ROOT_TRAINING_XML_ELEMENT_PATH + ['biblScope[@unit="page"]'],
'<publisher>': ROOT_TRAINING_XML_ELEMENT_PATH + ['publisher'],
'<location>': ROOT_TRAINING_XML_ELEMENT_PATH + ['pubPlace'],
'<tech>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="report"]'],
'<web>': ROOT_TRAINING_XML_ELEMENT_PATH + ['ptr[@type="web"]'],
'<pubnum>': ROOT_TRAINING_XML_ELEMENT_PATH + ['idno'],
'<note>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note']
}
class CitationTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.references.tei.xml'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=True,
root_tag='TEI',
default_tei_filename_suffix=(
CitationTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=None,
default_tei_sub_directory='citation/corpus'
)
def get_post_processed_xml_root(self, xml_root: etree.ElementBase):
for idno_element in tei_xpath(xml_root, '//tei:idno'):
external_identifier_type = get_detected_external_identifier_type_for_text(
get_text_content(idno_element)
)
if not external_identifier_type:
continue
idno_element.attrib['type'] = external_identifier_type
return xml_root
class CitationTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=True
)
| 0.514644 | 0.089256 |
import logging
import re
from typing import Iterable, List, Mapping, Optional, Tuple, Type, Union, cast
from sciencebeam_parser.document.semantic_document import (
SemanticAuthor,
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticMarker,
SemanticMiddleName,
SemanticMixedContentWrapper,
SemanticNamePart,
SemanticNameSuffix,
SemanticNameTitle,
SemanticNote,
SemanticGivenName,
SemanticSurname,
T_SemanticName
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutDocument, LayoutToken
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
SPLIT_ON_SECOND_ENTIY_NAME = {'<title>', '<forename>', '<surname>'}
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<title>': SemanticNameTitle,
'<forename>': SemanticGivenName,
'<middlename>': SemanticMiddleName,
'<surname>': SemanticSurname,
'<suffix>': SemanticNameSuffix
}
def tokenize_individual_characters(text: str) -> List[str]:
return list(text)
def convert_two_letter_uppercase_given_name_to_given_middle_name(
name: T_SemanticName
):
given_names = list(name.iter_by_type(SemanticGivenName))
middle_names = list(name.iter_by_type(SemanticMiddleName))
if middle_names:
LOGGER.debug('already has a middle name: %r', middle_names)
return
if len(given_names) != 1:
LOGGER.debug('no or too many given names: %r', given_names)
return
given_name_text = given_names[0].get_text()
if len(given_name_text) != 2 or not given_name_text.isupper():
LOGGER.debug('not two uppercase characters: %r', given_name_text)
return
layout_document = LayoutDocument.for_blocks(list(given_names[0].iter_blocks()))
retokenized_layout_document = layout_document.retokenize(
tokenize_fn=tokenize_individual_characters
)
LOGGER.debug('retokenized_layout_document: %r', retokenized_layout_document)
split_name_parts = [
(
SemanticGivenName(layout_block=LayoutBlock.for_tokens([token])) if index == 0
else SemanticMiddleName(layout_block=LayoutBlock.for_tokens([token]))
)
for index, token in enumerate(retokenized_layout_document.iter_all_tokens())
]
LOGGER.debug('split_name_parts: %r', split_name_parts)
name.flat_map_inplace_by_type(
SemanticGivenName,
lambda _: split_name_parts
)
def convert_name_parts_to_title_case(name: T_SemanticName):
for semantic_content in name:
if not isinstance(semantic_content, SemanticNamePart):
continue
semantic_content.value = semantic_content.get_text().title()
# based on:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/data/Person.java#L375-L391
# and:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/data/Person.java#L756-L775
def normalize_name_parts(name: T_SemanticName):
if not list(name.iter_by_type(SemanticSurname)):
return SemanticNote(
layout_block=LayoutBlock.merge_blocks(name.iter_blocks()),
note_type='invalid_author_name'
)
convert_two_letter_uppercase_given_name_to_given_middle_name(name)
convert_name_parts_to_title_case(name)
return name
def iter_semantic_markers_for_layout_block(
layout_block: LayoutBlock
) -> Iterable[Union[SemanticMarker, SemanticContentWrapper]]:
for text in re.split(r'(\D)', layout_block.text):
if not text:
continue
local_block = LayoutBlock.for_tokens([
LayoutToken(text, whitespace='')
])
if text == ',' or text.isspace():
yield SemanticNote(
layout_block=local_block,
note_type='marker_delimiter'
)
continue
yield SemanticMarker(layout_block=local_block)
def append_semantic_markers_for_layout_block(
parent_semantic_content: SemanticMixedContentWrapper,
layout_block: LayoutBlock
) -> None:
semantic_markers = list(iter_semantic_markers_for_layout_block(layout_block))
for semantic_marker in semantic_markers:
parent_semantic_content.add_content(semantic_marker)
class NameSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks( # type: ignore # pylint: disable=arguments-differ
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
name_type: Optional[Type[T_SemanticName]] = None,
**kwargs
) -> Iterable[T_SemanticName]:
_name_type: Type[T_SemanticName] = cast(
Type[T_SemanticName],
name_type if name_type is not None else SemanticAuthor
)
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
semantic_name: Optional[T_SemanticName] = None
seen_entity_tokens: List[Tuple[str, LayoutBlock]] = []
seen_name_labels: List[str] = []
has_tail_marker: bool = False
for name, layout_block in entity_tokens:
seen_entity_tokens.append((name, layout_block,))
if name == '<marker>':
if not semantic_name:
LOGGER.debug('new semantic_name with marker in the beginning')
semantic_name = _name_type()
append_semantic_markers_for_layout_block(semantic_name, layout_block)
continue
if len(seen_entity_tokens) >= 2 and seen_name_labels and not has_tail_marker:
previous_layout_block = seen_entity_tokens[-2][1]
if previous_layout_block.text.strip().endswith(','):
LOGGER.debug(
'new semantic_name marker after comma, seen_name_labels=%s',
seen_name_labels
)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
semantic_name = _name_type()
append_semantic_markers_for_layout_block(semantic_name, layout_block)
continue
append_semantic_markers_for_layout_block(semantic_name, layout_block)
has_tail_marker = True
continue
if semantic_name and name in SPLIT_ON_SECOND_ENTIY_NAME and name in seen_name_labels:
LOGGER.debug(
'starting new semantic_name after having seen name part again, name=%r',
name
)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
has_tail_marker = False
semantic_name = None
semantic_content = self.get_semantic_content_for_entity_name(
name, layout_block
)
if not isinstance(semantic_content, SemanticNote):
if has_tail_marker and semantic_name:
LOGGER.debug('starting new semantic_name after tail markers, name=%r', name)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
has_tail_marker = False
semantic_name = None
seen_name_labels.append(name)
if not semantic_name:
semantic_name = _name_type()
semantic_name.add_content(semantic_content)
if semantic_name:
yield normalize_name_parts(semantic_name)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/name/extract.py
|
extract.py
|
import logging
import re
from typing import Iterable, List, Mapping, Optional, Tuple, Type, Union, cast
from sciencebeam_parser.document.semantic_document import (
SemanticAuthor,
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticMarker,
SemanticMiddleName,
SemanticMixedContentWrapper,
SemanticNamePart,
SemanticNameSuffix,
SemanticNameTitle,
SemanticNote,
SemanticGivenName,
SemanticSurname,
T_SemanticName
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutDocument, LayoutToken
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
SPLIT_ON_SECOND_ENTIY_NAME = {'<title>', '<forename>', '<surname>'}
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<title>': SemanticNameTitle,
'<forename>': SemanticGivenName,
'<middlename>': SemanticMiddleName,
'<surname>': SemanticSurname,
'<suffix>': SemanticNameSuffix
}
def tokenize_individual_characters(text: str) -> List[str]:
return list(text)
def convert_two_letter_uppercase_given_name_to_given_middle_name(
name: T_SemanticName
):
given_names = list(name.iter_by_type(SemanticGivenName))
middle_names = list(name.iter_by_type(SemanticMiddleName))
if middle_names:
LOGGER.debug('already has a middle name: %r', middle_names)
return
if len(given_names) != 1:
LOGGER.debug('no or too many given names: %r', given_names)
return
given_name_text = given_names[0].get_text()
if len(given_name_text) != 2 or not given_name_text.isupper():
LOGGER.debug('not two uppercase characters: %r', given_name_text)
return
layout_document = LayoutDocument.for_blocks(list(given_names[0].iter_blocks()))
retokenized_layout_document = layout_document.retokenize(
tokenize_fn=tokenize_individual_characters
)
LOGGER.debug('retokenized_layout_document: %r', retokenized_layout_document)
split_name_parts = [
(
SemanticGivenName(layout_block=LayoutBlock.for_tokens([token])) if index == 0
else SemanticMiddleName(layout_block=LayoutBlock.for_tokens([token]))
)
for index, token in enumerate(retokenized_layout_document.iter_all_tokens())
]
LOGGER.debug('split_name_parts: %r', split_name_parts)
name.flat_map_inplace_by_type(
SemanticGivenName,
lambda _: split_name_parts
)
def convert_name_parts_to_title_case(name: T_SemanticName):
for semantic_content in name:
if not isinstance(semantic_content, SemanticNamePart):
continue
semantic_content.value = semantic_content.get_text().title()
# based on:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/data/Person.java#L375-L391
# and:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/data/Person.java#L756-L775
def normalize_name_parts(name: T_SemanticName):
if not list(name.iter_by_type(SemanticSurname)):
return SemanticNote(
layout_block=LayoutBlock.merge_blocks(name.iter_blocks()),
note_type='invalid_author_name'
)
convert_two_letter_uppercase_given_name_to_given_middle_name(name)
convert_name_parts_to_title_case(name)
return name
def iter_semantic_markers_for_layout_block(
layout_block: LayoutBlock
) -> Iterable[Union[SemanticMarker, SemanticContentWrapper]]:
for text in re.split(r'(\D)', layout_block.text):
if not text:
continue
local_block = LayoutBlock.for_tokens([
LayoutToken(text, whitespace='')
])
if text == ',' or text.isspace():
yield SemanticNote(
layout_block=local_block,
note_type='marker_delimiter'
)
continue
yield SemanticMarker(layout_block=local_block)
def append_semantic_markers_for_layout_block(
parent_semantic_content: SemanticMixedContentWrapper,
layout_block: LayoutBlock
) -> None:
semantic_markers = list(iter_semantic_markers_for_layout_block(layout_block))
for semantic_marker in semantic_markers:
parent_semantic_content.add_content(semantic_marker)
class NameSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks( # type: ignore # pylint: disable=arguments-differ
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
name_type: Optional[Type[T_SemanticName]] = None,
**kwargs
) -> Iterable[T_SemanticName]:
_name_type: Type[T_SemanticName] = cast(
Type[T_SemanticName],
name_type if name_type is not None else SemanticAuthor
)
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
semantic_name: Optional[T_SemanticName] = None
seen_entity_tokens: List[Tuple[str, LayoutBlock]] = []
seen_name_labels: List[str] = []
has_tail_marker: bool = False
for name, layout_block in entity_tokens:
seen_entity_tokens.append((name, layout_block,))
if name == '<marker>':
if not semantic_name:
LOGGER.debug('new semantic_name with marker in the beginning')
semantic_name = _name_type()
append_semantic_markers_for_layout_block(semantic_name, layout_block)
continue
if len(seen_entity_tokens) >= 2 and seen_name_labels and not has_tail_marker:
previous_layout_block = seen_entity_tokens[-2][1]
if previous_layout_block.text.strip().endswith(','):
LOGGER.debug(
'new semantic_name marker after comma, seen_name_labels=%s',
seen_name_labels
)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
semantic_name = _name_type()
append_semantic_markers_for_layout_block(semantic_name, layout_block)
continue
append_semantic_markers_for_layout_block(semantic_name, layout_block)
has_tail_marker = True
continue
if semantic_name and name in SPLIT_ON_SECOND_ENTIY_NAME and name in seen_name_labels:
LOGGER.debug(
'starting new semantic_name after having seen name part again, name=%r',
name
)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
has_tail_marker = False
semantic_name = None
semantic_content = self.get_semantic_content_for_entity_name(
name, layout_block
)
if not isinstance(semantic_content, SemanticNote):
if has_tail_marker and semantic_name:
LOGGER.debug('starting new semantic_name after tail markers, name=%r', name)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
has_tail_marker = False
semantic_name = None
seen_name_labels.append(name)
if not semantic_name:
semantic_name = _name_type()
semantic_name.add_content(semantic_content)
if semantic_name:
yield normalize_name_parts(semantic_name)
| 0.7324 | 0.162912 |
import logging
from typing import Iterable, Set, Union
from sciencebeam_parser.document.semantic_document import SemanticAuthor
from sciencebeam_parser.models.data import LayoutModelData
from sciencebeam_parser.models.model import (
LabeledLayoutToken,
iter_entity_layout_blocks_for_labeled_layout_tokens
)
from sciencebeam_parser.models.name.extract import NameSemanticExtractor
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser,
ExtractInstruction,
ResetExtractInstruction,
get_model_data_label
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/AuthorParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = [
'teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic', 'author', 'persName'
]
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['marker'],
'<title>': ROOT_TRAINING_XML_ELEMENT_PATH + ['roleName'],
'<forename>': ROOT_TRAINING_XML_ELEMENT_PATH + ['forename'],
'<middlename>': ROOT_TRAINING_XML_ELEMENT_PATH + ['middlename'],
'<surname>': ROOT_TRAINING_XML_ELEMENT_PATH + ['surname'],
'<suffix>': ROOT_TRAINING_XML_ELEMENT_PATH + ['suffix']
}
def iter_model_data_with_reset_instruction_iterable(
model_data_or_instruction_iterable: Iterable[Union[LayoutModelData, ExtractInstruction]]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
# using extractor to re-use logic to split author names
# here we will split on the first token of the extracted semantic content
extractor = NameSemanticExtractor()
model_data_or_instruction_list = list(
model_data_or_instruction_iterable
)
entity_tokens = iter_entity_layout_blocks_for_labeled_layout_tokens([
LabeledLayoutToken(
label=get_model_data_label(model_data) or '',
layout_token=model_data.layout_token
)
for model_data in model_data_or_instruction_list
if (
isinstance(model_data, LayoutModelData)
and model_data.layout_token is not None
)
])
LOGGER.debug('entity_tokens: %r', entity_tokens)
reset_token_ids: Set[int] = set()
for index, semantic_content in enumerate(extractor.iter_semantic_content_for_entity_blocks(
entity_tokens=entity_tokens,
name_type=SemanticAuthor
)):
if index == 0:
continue
for semantic_token in semantic_content.iter_tokens():
reset_token_ids.add(id(semantic_token))
break
for model_data_or_instruction in model_data_or_instruction_list:
if isinstance(model_data_or_instruction, LayoutModelData):
model_data = model_data_or_instruction
if id(model_data.layout_token) in reset_token_ids:
yield ResetExtractInstruction(
ROOT_TRAINING_XML_ELEMENT_PATH[:-1]
)
yield model_data_or_instruction
class NameTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.authors.tei.xml'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=True,
root_tag='TEI',
default_tei_filename_suffix=(
NameTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=None
)
def iter_model_data_or_instruction_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
parent_model_data_or_instruction_iterable = (
super().iter_model_data_or_instruction_for_model_data_iterable(
model_data_iterable
)
)
return iter_model_data_with_reset_instruction_iterable(
parent_model_data_or_instruction_iterable
)
class NameTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH[:-1],
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=True
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/name/training_data.py
|
training_data.py
|
import logging
from typing import Iterable, Set, Union
from sciencebeam_parser.document.semantic_document import SemanticAuthor
from sciencebeam_parser.models.data import LayoutModelData
from sciencebeam_parser.models.model import (
LabeledLayoutToken,
iter_entity_layout_blocks_for_labeled_layout_tokens
)
from sciencebeam_parser.models.name.extract import NameSemanticExtractor
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser,
ExtractInstruction,
ResetExtractInstruction,
get_model_data_label
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/AuthorParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = [
'teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic', 'author', 'persName'
]
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['marker'],
'<title>': ROOT_TRAINING_XML_ELEMENT_PATH + ['roleName'],
'<forename>': ROOT_TRAINING_XML_ELEMENT_PATH + ['forename'],
'<middlename>': ROOT_TRAINING_XML_ELEMENT_PATH + ['middlename'],
'<surname>': ROOT_TRAINING_XML_ELEMENT_PATH + ['surname'],
'<suffix>': ROOT_TRAINING_XML_ELEMENT_PATH + ['suffix']
}
def iter_model_data_with_reset_instruction_iterable(
model_data_or_instruction_iterable: Iterable[Union[LayoutModelData, ExtractInstruction]]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
# using extractor to re-use logic to split author names
# here we will split on the first token of the extracted semantic content
extractor = NameSemanticExtractor()
model_data_or_instruction_list = list(
model_data_or_instruction_iterable
)
entity_tokens = iter_entity_layout_blocks_for_labeled_layout_tokens([
LabeledLayoutToken(
label=get_model_data_label(model_data) or '',
layout_token=model_data.layout_token
)
for model_data in model_data_or_instruction_list
if (
isinstance(model_data, LayoutModelData)
and model_data.layout_token is not None
)
])
LOGGER.debug('entity_tokens: %r', entity_tokens)
reset_token_ids: Set[int] = set()
for index, semantic_content in enumerate(extractor.iter_semantic_content_for_entity_blocks(
entity_tokens=entity_tokens,
name_type=SemanticAuthor
)):
if index == 0:
continue
for semantic_token in semantic_content.iter_tokens():
reset_token_ids.add(id(semantic_token))
break
for model_data_or_instruction in model_data_or_instruction_list:
if isinstance(model_data_or_instruction, LayoutModelData):
model_data = model_data_or_instruction
if id(model_data.layout_token) in reset_token_ids:
yield ResetExtractInstruction(
ROOT_TRAINING_XML_ELEMENT_PATH[:-1]
)
yield model_data_or_instruction
class NameTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.authors.tei.xml'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=True,
root_tag='TEI',
default_tei_filename_suffix=(
NameTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=None
)
def iter_model_data_or_instruction_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
parent_model_data_or_instruction_iterable = (
super().iter_model_data_or_instruction_for_model_data_iterable(
model_data_iterable
)
)
return iter_model_data_with_reset_instruction_iterable(
parent_model_data_or_instruction_iterable
)
class NameTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH[:-1],
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=True
)
| 0.74826 | 0.187058 |
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class ReferenceSegmenterDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_alignment_status(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_str_is_first_name(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_http(),
token_features.get_line_punctuation_profile(),
token_features.get_str_line_token_relative_position(),
token_features.get_str_line_relative_length(),
token_features.get_block_status_with_blockend_for_single_token(),
token_features.get_truncated_line_punctuation_profile_length_feature(),
token_features.get_dummy_label()
])
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/reference_segmenter/data.py
|
data.py
|
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class ReferenceSegmenterDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_alignment_status(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_str_is_first_name(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_http(),
token_features.get_line_punctuation_profile(),
token_features.get_str_line_token_relative_position(),
token_features.get_str_line_relative_length(),
token_features.get_block_status_with_blockend_for_single_token(),
token_features.get_truncated_line_punctuation_profile_length_feature(),
token_features.get_dummy_label()
])
| 0.774626 | 0.155976 |
import logging
from typing import Iterable, Optional, Tuple
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticHeading,
SemanticLabel,
SemanticNote,
SemanticRawReference,
SemanticRawReferenceText
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import ModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
def is_looks_like_reference(layout_block: LayoutBlock) -> bool:
# a quick and dirty check whether this remotely looks like a reference
return len(list(layout_block.iter_all_tokens())) > 3
class ReferenceSegmenterSemanticExtractor(ModelSemanticExtractor):
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('b'))
ref: Optional[SemanticRawReference] = None
is_first_ref = True
for name, layout_block in entity_tokens:
if name == '<label>':
if not ref:
ref = SemanticRawReference(content_id=next(ids_iterator, '?'))
ref.add_content(SemanticLabel(layout_block=layout_block))
continue
if name == '<reference>':
if not ref and is_first_ref and not is_looks_like_reference(layout_block):
yield SemanticHeading(layout_block=layout_block)
is_first_ref = False
continue
if not ref:
ref = SemanticRawReference(content_id=next(ids_iterator, '?'))
ref.add_content(SemanticRawReferenceText(layout_block=layout_block))
yield ref
ref = None
is_first_ref = False
continue
yield SemanticNote(layout_block=layout_block, note_type=name)
if ref:
yield ref
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/reference_segmenter/extract.py
|
extract.py
|
import logging
from typing import Iterable, Optional, Tuple
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticHeading,
SemanticLabel,
SemanticNote,
SemanticRawReference,
SemanticRawReferenceText
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import ModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
def is_looks_like_reference(layout_block: LayoutBlock) -> bool:
# a quick and dirty check whether this remotely looks like a reference
return len(list(layout_block.iter_all_tokens())) > 3
class ReferenceSegmenterSemanticExtractor(ModelSemanticExtractor):
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('b'))
ref: Optional[SemanticRawReference] = None
is_first_ref = True
for name, layout_block in entity_tokens:
if name == '<label>':
if not ref:
ref = SemanticRawReference(content_id=next(ids_iterator, '?'))
ref.add_content(SemanticLabel(layout_block=layout_block))
continue
if name == '<reference>':
if not ref and is_first_ref and not is_looks_like_reference(layout_block):
yield SemanticHeading(layout_block=layout_block)
is_first_ref = False
continue
if not ref:
ref = SemanticRawReference(content_id=next(ids_iterator, '?'))
ref.add_content(SemanticRawReferenceText(layout_block=layout_block))
yield ref
ref = None
is_first_ref = False
continue
yield SemanticNote(layout_block=layout_block, note_type=name)
if ref:
yield ref
| 0.797399 | 0.186299 |
import logging
from sciencebeam_parser.models.training_data import (
NO_NS_TEI_E,
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/ReferenceSegmenterParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'listBibl']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<reference>': ROOT_TRAINING_XML_ELEMENT_PATH + ['bibl'],
'<label>': ROOT_TRAINING_XML_ELEMENT_PATH + ['bibl', 'label']
}
RESET_TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<label>': ROOT_TRAINING_XML_ELEMENT_PATH
}
class ReferenceSegmenterTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.references.referenceSegmenter.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.references.referenceSegmenter'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
reset_training_xml_element_path_by_label=RESET_TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=NO_NS_TEI_E,
default_tei_filename_suffix=(
ReferenceSegmenterTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
ReferenceSegmenterTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='reference-segmenter/corpus/tei',
default_data_sub_directory='reference-segmenter/corpus/raw'
)
class ReferenceSegmenterTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=False
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/reference_segmenter/training_data.py
|
training_data.py
|
import logging
from sciencebeam_parser.models.training_data import (
NO_NS_TEI_E,
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/ReferenceSegmenterParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'listBibl']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<reference>': ROOT_TRAINING_XML_ELEMENT_PATH + ['bibl'],
'<label>': ROOT_TRAINING_XML_ELEMENT_PATH + ['bibl', 'label']
}
RESET_TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<label>': ROOT_TRAINING_XML_ELEMENT_PATH
}
class ReferenceSegmenterTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.references.referenceSegmenter.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.references.referenceSegmenter'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
reset_training_xml_element_path_by_label=RESET_TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=NO_NS_TEI_E,
default_tei_filename_suffix=(
ReferenceSegmenterTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
ReferenceSegmenterTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='reference-segmenter/corpus/tei',
default_data_sub_directory='reference-segmenter/corpus/raw'
)
class ReferenceSegmenterTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=False
)
| 0.575469 | 0.080647 |
import logging
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/TableParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'figure[@type="table"]']
# Note:
# The table training data generation is different to figures in
# how the following labels are mapped: `content`, `other`, `note`
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<figure_head>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head'],
'<label>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head', 'label'],
'<figDesc>': ROOT_TRAINING_XML_ELEMENT_PATH + ['figDesc'],
'<content>': ROOT_TRAINING_XML_ELEMENT_PATH + ['table'],
'<other>': ROOT_TRAINING_XML_ELEMENT_PATH + ['other'],
'<note>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note']
}
class TableTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.table.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.table'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=False,
root_tag='tei',
default_tei_filename_suffix=(
TableTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
TableTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='table/corpus/tei',
default_data_sub_directory='table/corpus/raw'
)
class TableTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=False
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/table/training_data.py
|
training_data.py
|
import logging
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/TableParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'figure[@type="table"]']
# Note:
# The table training data generation is different to figures in
# how the following labels are mapped: `content`, `other`, `note`
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<figure_head>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head'],
'<label>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head', 'label'],
'<figDesc>': ROOT_TRAINING_XML_ELEMENT_PATH + ['figDesc'],
'<content>': ROOT_TRAINING_XML_ELEMENT_PATH + ['table'],
'<other>': ROOT_TRAINING_XML_ELEMENT_PATH + ['other'],
'<note>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note']
}
class TableTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.table.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.table'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=False,
root_tag='tei',
default_tei_filename_suffix=(
TableTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
TableTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='table/corpus/tei',
default_data_sub_directory='table/corpus/raw'
)
class TableTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=False
)
| 0.681091 | 0.135775 |
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticFigureCitation,
SemanticHeading,
SemanticLabel,
SemanticNote,
SemanticParagraph,
SemanticRawEquation,
SemanticRawEquationContent,
SemanticRawFigure,
SemanticRawTable,
SemanticReferenceCitation,
SemanticSection,
SemanticSectionTypes,
SemanticTableCitation,
SemanticTitle
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutTokensText
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<figure>': SemanticRawFigure,
'<table>': SemanticRawTable
}
PARAGRAPH_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<figure_marker>': SemanticFigureCitation,
'<table_marker>': SemanticTableCitation,
'<citation_marker>': SemanticReferenceCitation
}
HEADER_LABEL_REGEX = r'(\d+\.?(?:\d+\.?)*)\s*(\D.*)'
def get_section_label_and_title_from_layout_block(
layout_block: LayoutBlock
) -> Tuple[Optional[LayoutBlock], LayoutBlock]:
if not layout_block:
return None, layout_block
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(HEADER_LABEL_REGEX, text, re.IGNORECASE)
if not m:
return None, layout_block
label_end = m.end(1)
title_start = m.start(2)
LOGGER.debug('label_end: %d, title_start: %d (text: %r)', label_end, title_start, text)
section_label_layout_block = LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(0, label_end)
))
section_title_layout_block = LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(title_start, len(text))
))
return section_label_layout_block, section_title_layout_block
class FullTextSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def add_paragraph_content(
self,
paragraph: SemanticParagraph,
name: str,
layout_block: LayoutBlock
):
semantic_content_class = PARAGRAPH_SEMANTIC_CONTENT_CLASS_BY_TAG.get(name)
if semantic_content_class:
paragraph.add_content(semantic_content_class(layout_block=layout_block))
return
paragraph.add_block_content(layout_block)
def get_semantic_heading(self, layout_block: LayoutBlock):
section_label_layout_block, section_title_layout_block = (
get_section_label_and_title_from_layout_block(layout_block)
)
if section_label_layout_block:
return SemanticHeading([
SemanticLabel(layout_block=section_label_layout_block),
SemanticTitle(layout_block=section_title_layout_block)
])
return SemanticHeading([
SemanticTitle(layout_block=section_title_layout_block)
])
def get_raw_equation_child_semantic_content(
self,
name: str,
layout_block: LayoutBlock
):
if name == '<equation_label>':
return SemanticLabel(layout_block=layout_block)
if name == '<equation>':
return SemanticRawEquationContent(layout_block=layout_block)
return self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
)
def iter_semantic_content_for_entity_blocks( # noqa pylint: disable=arguments-differ, too-many-branches
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
section_type: str = SemanticSectionTypes.OTHER,
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
section: Optional[SemanticSection] = None
paragraph: Optional[SemanticParagraph] = None
raw_equation: Optional[SemanticRawEquation] = None
_previous_tag: Optional[str] = None
for name, layout_block in entity_tokens:
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('entity_block: %r, %r', name, layout_block.text)
previous_tag = _previous_tag
_previous_tag = name
if name in {'O'}:
LOGGER.debug('ignoring content (%r): %r', name, layout_block)
note_type = 'fulltext:other' if name == 'O' else name
if section:
section.add_note(layout_block, note_type=note_type)
else:
yield SemanticNote(
layout_block=layout_block,
note_type=note_type
)
continue
if name == '<section>':
paragraph = None
raw_equation = None
if section:
yield section
section = SemanticSection(section_type=section_type)
section.add_content(self.get_semantic_heading(layout_block))
continue
if not section:
section = SemanticSection(section_type=section_type)
if name in SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG:
section.add_content(self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
))
continue
# treat everything else as paragraph content
if (
not paragraph
or (
name == '<paragraph>'
and previous_tag == '<paragraph>'
)
):
paragraph = section.add_new_paragraph()
if name in {'<equation>', '<equation_label>'}:
semantic_content = self.get_raw_equation_child_semantic_content(
name, layout_block=layout_block
)
if (
isinstance(semantic_content, SemanticRawEquationContent)
and raw_equation
and raw_equation.has_type(SemanticRawEquationContent)
):
LOGGER.debug('already has equation content, start new one')
raw_equation = None
if not raw_equation:
raw_equation = SemanticRawEquation()
paragraph.add_content(raw_equation)
raw_equation.add_content(semantic_content)
continue
raw_equation = None
self.add_paragraph_content(
paragraph, name, layout_block
)
if section:
yield section
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/fulltext/extract.py
|
extract.py
|
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticFigureCitation,
SemanticHeading,
SemanticLabel,
SemanticNote,
SemanticParagraph,
SemanticRawEquation,
SemanticRawEquationContent,
SemanticRawFigure,
SemanticRawTable,
SemanticReferenceCitation,
SemanticSection,
SemanticSectionTypes,
SemanticTableCitation,
SemanticTitle
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutTokensText
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<figure>': SemanticRawFigure,
'<table>': SemanticRawTable
}
PARAGRAPH_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<figure_marker>': SemanticFigureCitation,
'<table_marker>': SemanticTableCitation,
'<citation_marker>': SemanticReferenceCitation
}
HEADER_LABEL_REGEX = r'(\d+\.?(?:\d+\.?)*)\s*(\D.*)'
def get_section_label_and_title_from_layout_block(
layout_block: LayoutBlock
) -> Tuple[Optional[LayoutBlock], LayoutBlock]:
if not layout_block:
return None, layout_block
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(HEADER_LABEL_REGEX, text, re.IGNORECASE)
if not m:
return None, layout_block
label_end = m.end(1)
title_start = m.start(2)
LOGGER.debug('label_end: %d, title_start: %d (text: %r)', label_end, title_start, text)
section_label_layout_block = LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(0, label_end)
))
section_title_layout_block = LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(title_start, len(text))
))
return section_label_layout_block, section_title_layout_block
class FullTextSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def add_paragraph_content(
self,
paragraph: SemanticParagraph,
name: str,
layout_block: LayoutBlock
):
semantic_content_class = PARAGRAPH_SEMANTIC_CONTENT_CLASS_BY_TAG.get(name)
if semantic_content_class:
paragraph.add_content(semantic_content_class(layout_block=layout_block))
return
paragraph.add_block_content(layout_block)
def get_semantic_heading(self, layout_block: LayoutBlock):
section_label_layout_block, section_title_layout_block = (
get_section_label_and_title_from_layout_block(layout_block)
)
if section_label_layout_block:
return SemanticHeading([
SemanticLabel(layout_block=section_label_layout_block),
SemanticTitle(layout_block=section_title_layout_block)
])
return SemanticHeading([
SemanticTitle(layout_block=section_title_layout_block)
])
def get_raw_equation_child_semantic_content(
self,
name: str,
layout_block: LayoutBlock
):
if name == '<equation_label>':
return SemanticLabel(layout_block=layout_block)
if name == '<equation>':
return SemanticRawEquationContent(layout_block=layout_block)
return self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
)
def iter_semantic_content_for_entity_blocks( # noqa pylint: disable=arguments-differ, too-many-branches
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
section_type: str = SemanticSectionTypes.OTHER,
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
section: Optional[SemanticSection] = None
paragraph: Optional[SemanticParagraph] = None
raw_equation: Optional[SemanticRawEquation] = None
_previous_tag: Optional[str] = None
for name, layout_block in entity_tokens:
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('entity_block: %r, %r', name, layout_block.text)
previous_tag = _previous_tag
_previous_tag = name
if name in {'O'}:
LOGGER.debug('ignoring content (%r): %r', name, layout_block)
note_type = 'fulltext:other' if name == 'O' else name
if section:
section.add_note(layout_block, note_type=note_type)
else:
yield SemanticNote(
layout_block=layout_block,
note_type=note_type
)
continue
if name == '<section>':
paragraph = None
raw_equation = None
if section:
yield section
section = SemanticSection(section_type=section_type)
section.add_content(self.get_semantic_heading(layout_block))
continue
if not section:
section = SemanticSection(section_type=section_type)
if name in SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG:
section.add_content(self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
))
continue
# treat everything else as paragraph content
if (
not paragraph
or (
name == '<paragraph>'
and previous_tag == '<paragraph>'
)
):
paragraph = section.add_new_paragraph()
if name in {'<equation>', '<equation_label>'}:
semantic_content = self.get_raw_equation_child_semantic_content(
name, layout_block=layout_block
)
if (
isinstance(semantic_content, SemanticRawEquationContent)
and raw_equation
and raw_equation.has_type(SemanticRawEquationContent)
):
LOGGER.debug('already has equation content, start new one')
raw_equation = None
if not raw_equation:
raw_equation = SemanticRawEquation()
paragraph.add_content(raw_equation)
raw_equation.add_content(semantic_content)
continue
raw_equation = None
self.add_paragraph_content(
paragraph, name, layout_block
)
if section:
yield section
| 0.727298 | 0.152095 |
import logging
from sciencebeam_parser.models.training_data import (
NO_NS_TEI_E,
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/FullTextParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<other>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="other"]'],
'<section>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head'],
'<paragraph>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p'],
'<citation_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="biblio"]'],
'<figure_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="figure"]'],
'<table_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="table"]'],
'<equation_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="formula"]'],
'<section_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="section"]'],
'<figure>': ROOT_TRAINING_XML_ELEMENT_PATH + ['figure'],
'<table>': ROOT_TRAINING_XML_ELEMENT_PATH + ['figure[@type="table"]'],
'<equation>': ROOT_TRAINING_XML_ELEMENT_PATH + ['formula'],
'<equation_label>': ROOT_TRAINING_XML_ELEMENT_PATH + ['formula', 'label'],
'<item>': ROOT_TRAINING_XML_ELEMENT_PATH + ['item']
}
class FullTextTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.fulltext.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.fulltext'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=NO_NS_TEI_E,
default_tei_filename_suffix=(
FullTextTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
FullTextTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='fulltext/corpus/tei',
default_data_sub_directory='fulltext/corpus/raw'
)
class FullTextTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=False
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/fulltext/training_data.py
|
training_data.py
|
import logging
from sciencebeam_parser.models.training_data import (
NO_NS_TEI_E,
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/FullTextParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text']
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<other>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="other"]'],
'<section>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head'],
'<paragraph>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p'],
'<citation_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="biblio"]'],
'<figure_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="figure"]'],
'<table_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="table"]'],
'<equation_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="formula"]'],
'<section_marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['p', 'ref[@type="section"]'],
'<figure>': ROOT_TRAINING_XML_ELEMENT_PATH + ['figure'],
'<table>': ROOT_TRAINING_XML_ELEMENT_PATH + ['figure[@type="table"]'],
'<equation>': ROOT_TRAINING_XML_ELEMENT_PATH + ['formula'],
'<equation_label>': ROOT_TRAINING_XML_ELEMENT_PATH + ['formula', 'label'],
'<item>': ROOT_TRAINING_XML_ELEMENT_PATH + ['item']
}
class FullTextTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.fulltext.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.fulltext'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=NO_NS_TEI_E,
default_tei_filename_suffix=(
FullTextTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
FullTextTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='fulltext/corpus/tei',
default_data_sub_directory='fulltext/corpus/raw'
)
class FullTextTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=False
)
| 0.63624 | 0.093927 |
import logging
from typing import Iterable, Tuple
from sciencebeam_parser.document.layout_document import (
LayoutBlock
)
from sciencebeam_parser.document.semantic_document import (
SemanticSection,
SemanticSectionTypes
)
from sciencebeam_parser.models.fulltext.training_data import (
FullTextTeiTrainingDataGenerator,
FullTextTrainingTeiParser
)
from sciencebeam_parser.models.model import Model
from sciencebeam_parser.models.data import (
DocumentFeaturesContext
)
from sciencebeam_parser.models.fulltext.data import FullTextDataGenerator
from sciencebeam_parser.models.fulltext.extract import FullTextSemanticExtractor
LOGGER = logging.getLogger(__name__)
class FullTextModel(Model):
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> FullTextDataGenerator:
return FullTextDataGenerator(
document_features_context=document_features_context
)
def get_semantic_extractor(self) -> FullTextSemanticExtractor:
return FullTextSemanticExtractor()
def get_tei_training_data_generator(self) -> FullTextTeiTrainingDataGenerator:
return FullTextTeiTrainingDataGenerator()
def get_training_tei_parser(self) -> FullTextTrainingTeiParser:
return FullTextTrainingTeiParser()
def update_section_with_entity_blocks(
self,
parent_section: SemanticSection,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
section_type: str = SemanticSectionTypes.OTHER
):
semantic_extractor = self.get_semantic_extractor()
for semantic_content in semantic_extractor.iter_semantic_content_for_entity_blocks(
entity_tokens=entity_tokens,
section_type=section_type
):
parent_section.add_content(semantic_content)
def get_section_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]]
) -> SemanticSection:
parent_section = SemanticSection()
self.update_section_with_entity_blocks(parent_section, entity_tokens)
return parent_section
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/fulltext/model.py
|
model.py
|
import logging
from typing import Iterable, Tuple
from sciencebeam_parser.document.layout_document import (
LayoutBlock
)
from sciencebeam_parser.document.semantic_document import (
SemanticSection,
SemanticSectionTypes
)
from sciencebeam_parser.models.fulltext.training_data import (
FullTextTeiTrainingDataGenerator,
FullTextTrainingTeiParser
)
from sciencebeam_parser.models.model import Model
from sciencebeam_parser.models.data import (
DocumentFeaturesContext
)
from sciencebeam_parser.models.fulltext.data import FullTextDataGenerator
from sciencebeam_parser.models.fulltext.extract import FullTextSemanticExtractor
LOGGER = logging.getLogger(__name__)
class FullTextModel(Model):
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> FullTextDataGenerator:
return FullTextDataGenerator(
document_features_context=document_features_context
)
def get_semantic_extractor(self) -> FullTextSemanticExtractor:
return FullTextSemanticExtractor()
def get_tei_training_data_generator(self) -> FullTextTeiTrainingDataGenerator:
return FullTextTeiTrainingDataGenerator()
def get_training_tei_parser(self) -> FullTextTrainingTeiParser:
return FullTextTrainingTeiParser()
def update_section_with_entity_blocks(
self,
parent_section: SemanticSection,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
section_type: str = SemanticSectionTypes.OTHER
):
semantic_extractor = self.get_semantic_extractor()
for semantic_content in semantic_extractor.iter_semantic_content_for_entity_blocks(
entity_tokens=entity_tokens,
section_type=section_type
):
parent_section.add_content(semantic_content)
def get_section_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]]
) -> SemanticSection:
parent_section = SemanticSection()
self.update_section_with_entity_blocks(parent_section, entity_tokens)
return parent_section
| 0.671686 | 0.161056 |
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class HeaderDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_block_status_with_blockend_for_single_token(),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_alignment_status(),
token_features.get_token_font_status(),
token_features.get_token_font_size_feature(),
token_features.get_str_is_bold(),
token_features.get_str_is_italic(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_email(),
token_features.get_dummy_str_is_http(),
token_features.get_punctuation_type_feature(),
token_features.get_str_is_largest_font_size(),
# bug in GROBID #795
token_features.get_dummy_str_is_smallest_font_size(),
# due to bug, usually larger than mean
token_features.get_dummy_str_is_larger_than_average_font_size('1'),
token_features.get_dummy_label()
])
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/header/data.py
|
data.py
|
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class HeaderDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_block_status_with_blockend_for_single_token(),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_alignment_status(),
token_features.get_token_font_status(),
token_features.get_token_font_size_feature(),
token_features.get_str_is_bold(),
token_features.get_str_is_italic(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_email(),
token_features.get_dummy_str_is_http(),
token_features.get_punctuation_type_feature(),
token_features.get_str_is_largest_font_size(),
# bug in GROBID #795
token_features.get_dummy_str_is_smallest_font_size(),
# due to bug, usually larger than mean
token_features.get_dummy_str_is_larger_than_average_font_size('1'),
token_features.get_dummy_label()
])
| 0.760028 | 0.161155 |
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticRawAddress,
SemanticRawAffiliation,
SemanticRawAffiliationAddress,
SemanticTitle,
SemanticAbstract,
SemanticRawAuthors
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutTokensText
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
# based on:
# grobid-core/src/main/java/org/grobid/core/data/BiblioItem.java
ABSTRACT_REGEX = r'^(?:(?:abstract|summary|résumé|abrégé|a b s t r a c t)(?:[.:])?)?\s*(.*)'
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<author>': SemanticRawAuthors,
'<affiliation>': SemanticRawAffiliation,
'<address>': SemanticRawAddress
}
def get_cleaned_abstract_text(text: str) -> str:
if not text:
return text
m = re.match(ABSTRACT_REGEX, text, re.IGNORECASE)
if not m:
LOGGER.debug('text does not match regex: %r', text)
return text
return m.group(1)
def get_cleaned_abstract_layout_block(layout_block: LayoutBlock) -> LayoutBlock:
if not layout_block or not layout_block.lines:
return layout_block
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(ABSTRACT_REGEX, text, re.IGNORECASE)
if not m:
LOGGER.debug('text does not match regex: %r', text)
return layout_block
start = m.start(1)
LOGGER.debug('start: %d (text: %r)', start, text)
return LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(start, len(text))
))
class HeaderSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
has_title: bool = False
has_abstract: bool = False
aff_address: Optional[SemanticRawAffiliationAddress] = None
next_previous_label: str = ''
for name, layout_block in entity_tokens:
previous_label = next_previous_label
next_previous_label = name
if name == '<title>' and not has_title:
yield SemanticTitle(layout_block=layout_block)
has_title = True
continue
if name == '<abstract>' and not has_abstract:
abstract_layout_block = get_cleaned_abstract_layout_block(
layout_block
)
yield SemanticAbstract(layout_block=abstract_layout_block)
has_abstract = True
continue
if name in {'<affiliation>', '<address>'}:
if (
aff_address is not None
and name == '<affiliation>'
and previous_label in {'<affiliation>', '<address>'}
):
yield aff_address
aff_address = None
if aff_address is None:
aff_address = SemanticRawAffiliationAddress()
aff_address.add_content(self.get_semantic_content_for_entity_name(
name, layout_block
))
continue
if aff_address is not None:
yield aff_address
aff_address = None
yield self.get_semantic_content_for_entity_name(
name, layout_block
)
if aff_address is not None:
yield aff_address
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/header/extract.py
|
extract.py
|
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticRawAddress,
SemanticRawAffiliation,
SemanticRawAffiliationAddress,
SemanticTitle,
SemanticAbstract,
SemanticRawAuthors
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutTokensText
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
# based on:
# grobid-core/src/main/java/org/grobid/core/data/BiblioItem.java
ABSTRACT_REGEX = r'^(?:(?:abstract|summary|résumé|abrégé|a b s t r a c t)(?:[.:])?)?\s*(.*)'
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<author>': SemanticRawAuthors,
'<affiliation>': SemanticRawAffiliation,
'<address>': SemanticRawAddress
}
def get_cleaned_abstract_text(text: str) -> str:
if not text:
return text
m = re.match(ABSTRACT_REGEX, text, re.IGNORECASE)
if not m:
LOGGER.debug('text does not match regex: %r', text)
return text
return m.group(1)
def get_cleaned_abstract_layout_block(layout_block: LayoutBlock) -> LayoutBlock:
if not layout_block or not layout_block.lines:
return layout_block
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(ABSTRACT_REGEX, text, re.IGNORECASE)
if not m:
LOGGER.debug('text does not match regex: %r', text)
return layout_block
start = m.start(1)
LOGGER.debug('start: %d (text: %r)', start, text)
return LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(start, len(text))
))
class HeaderSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
has_title: bool = False
has_abstract: bool = False
aff_address: Optional[SemanticRawAffiliationAddress] = None
next_previous_label: str = ''
for name, layout_block in entity_tokens:
previous_label = next_previous_label
next_previous_label = name
if name == '<title>' and not has_title:
yield SemanticTitle(layout_block=layout_block)
has_title = True
continue
if name == '<abstract>' and not has_abstract:
abstract_layout_block = get_cleaned_abstract_layout_block(
layout_block
)
yield SemanticAbstract(layout_block=abstract_layout_block)
has_abstract = True
continue
if name in {'<affiliation>', '<address>'}:
if (
aff_address is not None
and name == '<affiliation>'
and previous_label in {'<affiliation>', '<address>'}
):
yield aff_address
aff_address = None
if aff_address is None:
aff_address = SemanticRawAffiliationAddress()
aff_address.add_content(self.get_semantic_content_for_entity_name(
name, layout_block
))
continue
if aff_address is not None:
yield aff_address
aff_address = None
yield self.get_semantic_content_for_entity_name(
name, layout_block
)
if aff_address is not None:
yield aff_address
| 0.7696 | 0.099164 |
import logging
from lxml.builder import ElementMaker
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
TEI_E = ElementMaker()
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/HeaderParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'front']
TRAINING_XML_ELEMENT_PATH_BY_LABEL_WITHOUT_ALIAS = {
'<title>': ROOT_TRAINING_XML_ELEMENT_PATH + ['docTitle', 'titlePart'],
'<author>': ROOT_TRAINING_XML_ELEMENT_PATH + ['byline', 'docAuthor'],
'<address>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address'],
'<date>': ROOT_TRAINING_XML_ELEMENT_PATH + ['date'],
'<page>': ROOT_TRAINING_XML_ELEMENT_PATH + ['page'],
'<publisher>': ROOT_TRAINING_XML_ELEMENT_PATH + ['publisher'],
'<journal>': ROOT_TRAINING_XML_ELEMENT_PATH + ['journal'],
'<affiliation>': ROOT_TRAINING_XML_ELEMENT_PATH + ['byline', 'affiliation'],
'<note>': ROOT_TRAINING_XML_ELEMENT_PATH,
'<abstract>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="abstract"]'],
'<email>': ROOT_TRAINING_XML_ELEMENT_PATH + ['email'],
'<pubnum>': ROOT_TRAINING_XML_ELEMENT_PATH + ['idno'],
'<keyword>': ROOT_TRAINING_XML_ELEMENT_PATH + ['keyword'],
'<phone>': ROOT_TRAINING_XML_ELEMENT_PATH + ['phone'],
'<web>': ROOT_TRAINING_XML_ELEMENT_PATH + ['ptr[@type="web"]'],
'<meeting>': ROOT_TRAINING_XML_ELEMENT_PATH + ['meeting'],
'<submission>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="submission"]'],
'<reference>': ROOT_TRAINING_XML_ELEMENT_PATH + ['reference'],
'<copyright>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="copyright"]'],
'<funding>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="funding"]'],
'<doctype>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="doctype"]'],
'<group>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="group"]']
}
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
**TRAINING_XML_ELEMENT_PATH_BY_LABEL_WITHOUT_ALIAS,
'<location>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address'],
'<institution>': ROOT_TRAINING_XML_ELEMENT_PATH + ['byline', 'affiliation']
}
class HeaderTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.header.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.header'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=TEI_E,
default_tei_filename_suffix=(
HeaderTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
HeaderTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='header/corpus/tei',
default_data_sub_directory='header/corpus/raw'
)
class HeaderTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL_WITHOUT_ALIAS
),
use_tei_namespace=False
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/header/training_data.py
|
training_data.py
|
import logging
from lxml.builder import ElementMaker
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
TEI_E = ElementMaker()
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/HeaderParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'front']
TRAINING_XML_ELEMENT_PATH_BY_LABEL_WITHOUT_ALIAS = {
'<title>': ROOT_TRAINING_XML_ELEMENT_PATH + ['docTitle', 'titlePart'],
'<author>': ROOT_TRAINING_XML_ELEMENT_PATH + ['byline', 'docAuthor'],
'<address>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address'],
'<date>': ROOT_TRAINING_XML_ELEMENT_PATH + ['date'],
'<page>': ROOT_TRAINING_XML_ELEMENT_PATH + ['page'],
'<publisher>': ROOT_TRAINING_XML_ELEMENT_PATH + ['publisher'],
'<journal>': ROOT_TRAINING_XML_ELEMENT_PATH + ['journal'],
'<affiliation>': ROOT_TRAINING_XML_ELEMENT_PATH + ['byline', 'affiliation'],
'<note>': ROOT_TRAINING_XML_ELEMENT_PATH,
'<abstract>': ROOT_TRAINING_XML_ELEMENT_PATH + ['div[@type="abstract"]'],
'<email>': ROOT_TRAINING_XML_ELEMENT_PATH + ['email'],
'<pubnum>': ROOT_TRAINING_XML_ELEMENT_PATH + ['idno'],
'<keyword>': ROOT_TRAINING_XML_ELEMENT_PATH + ['keyword'],
'<phone>': ROOT_TRAINING_XML_ELEMENT_PATH + ['phone'],
'<web>': ROOT_TRAINING_XML_ELEMENT_PATH + ['ptr[@type="web"]'],
'<meeting>': ROOT_TRAINING_XML_ELEMENT_PATH + ['meeting'],
'<submission>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="submission"]'],
'<reference>': ROOT_TRAINING_XML_ELEMENT_PATH + ['reference'],
'<copyright>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="copyright"]'],
'<funding>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="funding"]'],
'<doctype>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="doctype"]'],
'<group>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note[@type="group"]']
}
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
**TRAINING_XML_ELEMENT_PATH_BY_LABEL_WITHOUT_ALIAS,
'<location>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address'],
'<institution>': ROOT_TRAINING_XML_ELEMENT_PATH + ['byline', 'affiliation']
}
class HeaderTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.header.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.header'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=TEI_E,
default_tei_filename_suffix=(
HeaderTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
HeaderTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='header/corpus/tei',
default_data_sub_directory='header/corpus/raw'
)
class HeaderTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL_WITHOUT_ALIAS
),
use_tei_namespace=False
)
| 0.590071 | 0.072505 |
import logging
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticAddressLine,
SemanticAffiliationAddress,
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticCountry,
SemanticDepartment,
SemanticInstitution,
SemanticLaboratory,
SemanticMarker,
SemanticNote,
SemanticPostBox,
SemanticPostCode,
SemanticRegion,
SemanticSettlement
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import (
SimpleModelSemanticExtractor,
get_regex_cleaned_layout_block_with_prefix_suffix
)
LOGGER = logging.getLogger(__name__)
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<institution>': SemanticInstitution,
'<department>': SemanticDepartment,
'<laboratory>': SemanticLaboratory,
'<addrLine>': SemanticAddressLine,
'<postCode>': SemanticPostCode,
'<postBox>': SemanticPostBox,
'<region>': SemanticRegion,
'<settlement>': SemanticSettlement,
'<country>': SemanticCountry
}
CLEAN_REGEX_BY_TAG: Mapping[str, str] = {
'<country>': r'(.*[^.]).*'
}
class AffiliationAddressSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('aff'))
aff: Optional[SemanticAffiliationAddress] = None
for name, layout_block in entity_tokens:
if name == '<marker>':
if aff:
yield aff
aff = SemanticAffiliationAddress(content_id=next(ids_iterator, '?'))
aff.add_content(SemanticMarker(layout_block=layout_block))
continue
prefix_block, cleaned_block, suffix_block = (
get_regex_cleaned_layout_block_with_prefix_suffix(
layout_block,
CLEAN_REGEX_BY_TAG.get(name)
)
)
semantic_content = self.get_semantic_content_for_entity_name(
name, cleaned_block
)
if (
aff is not None
and isinstance(semantic_content, SemanticInstitution)
and aff.has_type(SemanticInstitution)
):
yield aff
aff = None
if not aff:
if isinstance(semantic_content, SemanticNote):
yield semantic_content
continue
aff = SemanticAffiliationAddress(content_id=next(ids_iterator, '?'))
if prefix_block:
aff.add_content(SemanticNote(layout_block=prefix_block, note_type=f'{name}-prefix'))
aff.add_content(semantic_content)
if suffix_block:
aff.add_content(SemanticNote(layout_block=suffix_block, note_type=f'{name}-suffix'))
if aff:
yield aff
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/affiliation_address/extract.py
|
extract.py
|
import logging
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticAddressLine,
SemanticAffiliationAddress,
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticCountry,
SemanticDepartment,
SemanticInstitution,
SemanticLaboratory,
SemanticMarker,
SemanticNote,
SemanticPostBox,
SemanticPostCode,
SemanticRegion,
SemanticSettlement
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import (
SimpleModelSemanticExtractor,
get_regex_cleaned_layout_block_with_prefix_suffix
)
LOGGER = logging.getLogger(__name__)
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<institution>': SemanticInstitution,
'<department>': SemanticDepartment,
'<laboratory>': SemanticLaboratory,
'<addrLine>': SemanticAddressLine,
'<postCode>': SemanticPostCode,
'<postBox>': SemanticPostBox,
'<region>': SemanticRegion,
'<settlement>': SemanticSettlement,
'<country>': SemanticCountry
}
CLEAN_REGEX_BY_TAG: Mapping[str, str] = {
'<country>': r'(.*[^.]).*'
}
class AffiliationAddressSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('aff'))
aff: Optional[SemanticAffiliationAddress] = None
for name, layout_block in entity_tokens:
if name == '<marker>':
if aff:
yield aff
aff = SemanticAffiliationAddress(content_id=next(ids_iterator, '?'))
aff.add_content(SemanticMarker(layout_block=layout_block))
continue
prefix_block, cleaned_block, suffix_block = (
get_regex_cleaned_layout_block_with_prefix_suffix(
layout_block,
CLEAN_REGEX_BY_TAG.get(name)
)
)
semantic_content = self.get_semantic_content_for_entity_name(
name, cleaned_block
)
if (
aff is not None
and isinstance(semantic_content, SemanticInstitution)
and aff.has_type(SemanticInstitution)
):
yield aff
aff = None
if not aff:
if isinstance(semantic_content, SemanticNote):
yield semantic_content
continue
aff = SemanticAffiliationAddress(content_id=next(ids_iterator, '?'))
if prefix_block:
aff.add_content(SemanticNote(layout_block=prefix_block, note_type=f'{name}-prefix'))
aff.add_content(semantic_content)
if suffix_block:
aff.add_content(SemanticNote(layout_block=suffix_block, note_type=f'{name}-suffix'))
if aff:
yield aff
| 0.741861 | 0.151467 |
import logging
from sciencebeam_parser.document.tei.common import TEI_E
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/AffiliationAddressParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = [
'teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic', 'author',
'affiliation'
]
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['marker'],
'<institution>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="institution"]'],
'<department>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="department"]'],
'<laboratory>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="laboratory"]'],
'<addrLine>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'addrLine'],
'<postCode>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'postCode'],
'<postBox>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'postBox'],
'<region>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'region'],
'<settlement>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'settlement'],
'<country>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'country']
}
class AffiliationAddressTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.affiliation.tei.xml'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=TEI_E,
default_tei_filename_suffix=(
AffiliationAddressTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=None,
default_tei_sub_directory='affiliation-address/corpus'
)
class AffiliationAddressTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=True
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/affiliation_address/training_data.py
|
training_data.py
|
import logging
from sciencebeam_parser.document.tei.common import TEI_E
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/AffiliationAddressParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = [
'teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic', 'author',
'affiliation'
]
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['marker'],
'<institution>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="institution"]'],
'<department>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="department"]'],
'<laboratory>': ROOT_TRAINING_XML_ELEMENT_PATH + ['orgName[@type="laboratory"]'],
'<addrLine>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'addrLine'],
'<postCode>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'postCode'],
'<postBox>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'postBox'],
'<region>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'region'],
'<settlement>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'settlement'],
'<country>': ROOT_TRAINING_XML_ELEMENT_PATH + ['address', 'country']
}
class AffiliationAddressTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.affiliation.tei.xml'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
element_maker=TEI_E,
default_tei_filename_suffix=(
AffiliationAddressTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=None,
default_tei_sub_directory='affiliation-address/corpus'
)
class AffiliationAddressTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=True
)
| 0.644449 | 0.09556 |
import os
import logging
from time import monotonic
from typing import Dict, Iterable, Mapping, Optional, Sequence, Set
import PIL.Image
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.document.semantic_document import SemanticGraphic
from sciencebeam_parser.document.layout_document import (
DEFAULT_LAYOUT_PAGE_META,
LayoutDocument,
LayoutGraphic,
LayoutPage,
LayoutPageCoordinates
)
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.processors.document_page_image import DocumentPageImage
from sciencebeam_parser.processors.graphic_provider import (
DocumentGraphicProvider,
get_semantic_graphic_for_layout_graphic,
get_semantic_graphic_list_for_layout_graphic_list
)
LOGGER = logging.getLogger(__name__)
def get_cropped_image(image: PIL.Image.Image, bounding_box: BoundingBox) -> PIL.Image.Image:
return image.crop((
bounding_box.x,
bounding_box.y,
bounding_box.right,
bounding_box.bottom
))
def get_bounding_box_intersection_area_ratio(
bounding_box_1: BoundingBox,
bounding_box_2: BoundingBox,
empty_ratio: float = 0.0
) -> float:
max_area = max(bounding_box_1.area, bounding_box_2.area)
if not max_area:
return empty_ratio
intersection_area = bounding_box_1.intersection(bounding_box_2).area
return intersection_area / max_area
def get_layout_graphic_with_similar_coordinates(
page_graphics: Sequence[LayoutGraphic],
bounding_box: BoundingBox,
threshold: float = 0.80,
ignored_graphic_types: Set[str] = None
) -> Optional[LayoutGraphic]:
sorted_area_intersection_bounding_boxes = sorted((
(
get_bounding_box_intersection_area_ratio(
bounding_box,
graphic.coordinates.bounding_box
),
graphic
)
for graphic in page_graphics
if graphic.coordinates and (
not ignored_graphic_types or graphic.graphic_type not in ignored_graphic_types
)
), key=lambda t: -t[0])
if not sorted_area_intersection_bounding_boxes:
return None
LOGGER.debug(
'sorted_area_intersection_bounding_boxes: %r',
sorted_area_intersection_bounding_boxes
)
best_area_ratio, best_matching_graphic = sorted_area_intersection_bounding_boxes[0]
if best_area_ratio < threshold:
LOGGER.debug('best_area_ratio below threshold: %.3f < %.3f', best_area_ratio, threshold)
return None
return best_matching_graphic
class ComputerVisionDocumentGraphicProvider(DocumentGraphicProvider):
def __init__(
self,
computer_vision_model: ComputerVisionModel,
page_image_iterable: Iterable[DocumentPageImage],
temp_dir: str
):
super().__init__()
self.computer_vision_model = computer_vision_model
self.page_image_iterable = page_image_iterable
self.temp_dir = temp_dir
# ignoring svg for now because we are also ignoring it when matching graphics
# an svg image may also not be standalone and require text to be complete
self.ignored_graphic_types = {'svg'}
def iter_semantic_graphic_for_image( # pylint: disable=too-many-locals
self,
image: PIL.Image.Image,
extract_graphic_assets: bool,
page_number: int,
page: Optional[LayoutPage]
) -> Iterable[SemanticGraphic]:
LOGGER.debug('image size: %d x %d', image.width, image.height)
page_meta = page.meta if page is not None else DEFAULT_LAYOUT_PAGE_META
page_coordinates = (
page.meta.coordinates if page is not None else None
)
page_graphics = (
page.graphics if page is not None else []
)
cv_start = monotonic()
cv_result = self.computer_vision_model.predict_single(image)
cv_end = monotonic()
cv_instances = cv_result.get_instances_by_type_names(['Figure', 'Table'])
cv_type_name_and_coordinates_list = [
(instance.get_type_name(), instance.get_bounding_box())
for instance in cv_instances
]
LOGGER.info(
(
'cv result, took=%.3fs, page_number=%d, image_size=%dx%d'
', cv_type_name_and_coordinates_list=%r'
),
cv_end - cv_start,
page_number,
image.width,
image.height,
cv_type_name_and_coordinates_list
)
count_by_type_name_map: Dict[str, int] = {}
for type_name, cv_coordinates in cv_type_name_and_coordinates_list:
lower_type_name = type_name.lower()
count_by_type_name_map[type_name] = count_by_type_name_map.get(type_name, 0) + 1
item_number = count_by_type_name_map[type_name]
local_image_path: Optional[str] = None
relative_image_path: Optional[str] = None
scaled_item_coordinates = cv_coordinates
if page_coordinates:
scaled_item_coordinates = (
cv_coordinates
.scale_by(
page_coordinates.width / image.width,
page_coordinates.height / image.height
)
)
matching_layout_graphic = get_layout_graphic_with_similar_coordinates(
page_graphics=page_graphics,
bounding_box=scaled_item_coordinates,
ignored_graphic_types=self.ignored_graphic_types
)
if matching_layout_graphic is not None:
yield get_semantic_graphic_for_layout_graphic(
matching_layout_graphic,
extract_graphic_assets=extract_graphic_assets
)
continue
if extract_graphic_assets:
local_image_path = os.path.join(
self.temp_dir, f'{lower_type_name}-{page_number}-{item_number}.png'
)
relative_image_path = os.path.basename(local_image_path)
cropped_image = get_cropped_image(image, cv_coordinates)
cropped_image.save(local_image_path)
layout_graphic = LayoutGraphic(
coordinates=LayoutPageCoordinates(
x=scaled_item_coordinates.x,
y=scaled_item_coordinates.y,
width=scaled_item_coordinates.width,
height=scaled_item_coordinates.height,
page_number=page_number
),
page_meta=page_meta,
graphic_type=f'cv-{lower_type_name}',
local_file_path=local_image_path
)
semantic_graphic = SemanticGraphic(
layout_graphic=layout_graphic,
relative_path=relative_image_path
)
yield semantic_graphic
def get_page_by_page_number_map(
self,
layout_document: LayoutDocument
) -> Mapping[int, Optional[LayoutPage]]:
return {
page.meta.page_number: page
for page in layout_document.pages
}
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
page_by_page_number_map = self.get_page_by_page_number_map(
layout_document
)
LOGGER.debug(
'cv model: page_by_page_number_map=%r',
page_by_page_number_map
)
has_cv_semantic_graphic: bool = False
for page_image in self.page_image_iterable:
LOGGER.debug('page_image: %r', page_image)
page_number = page_image.page_number
with PIL.Image.open(page_image.page_image_path) as image:
for semantic_graphic in self.iter_semantic_graphic_for_image(
image,
extract_graphic_assets=extract_graphic_assets,
page_number=page_number,
page=page_by_page_number_map.get(page_number)
):
has_cv_semantic_graphic = True
yield semantic_graphic
if not has_cv_semantic_graphic:
LOGGER.info('no graphics detected using cv model, falling back to regular graphics')
yield from get_semantic_graphic_list_for_layout_graphic_list(
layout_document.iter_all_graphics(),
extract_graphic_assets=extract_graphic_assets
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/cv_graphic_provider.py
|
cv_graphic_provider.py
|
import os
import logging
from time import monotonic
from typing import Dict, Iterable, Mapping, Optional, Sequence, Set
import PIL.Image
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.document.semantic_document import SemanticGraphic
from sciencebeam_parser.document.layout_document import (
DEFAULT_LAYOUT_PAGE_META,
LayoutDocument,
LayoutGraphic,
LayoutPage,
LayoutPageCoordinates
)
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.processors.document_page_image import DocumentPageImage
from sciencebeam_parser.processors.graphic_provider import (
DocumentGraphicProvider,
get_semantic_graphic_for_layout_graphic,
get_semantic_graphic_list_for_layout_graphic_list
)
LOGGER = logging.getLogger(__name__)
def get_cropped_image(image: PIL.Image.Image, bounding_box: BoundingBox) -> PIL.Image.Image:
return image.crop((
bounding_box.x,
bounding_box.y,
bounding_box.right,
bounding_box.bottom
))
def get_bounding_box_intersection_area_ratio(
bounding_box_1: BoundingBox,
bounding_box_2: BoundingBox,
empty_ratio: float = 0.0
) -> float:
max_area = max(bounding_box_1.area, bounding_box_2.area)
if not max_area:
return empty_ratio
intersection_area = bounding_box_1.intersection(bounding_box_2).area
return intersection_area / max_area
def get_layout_graphic_with_similar_coordinates(
page_graphics: Sequence[LayoutGraphic],
bounding_box: BoundingBox,
threshold: float = 0.80,
ignored_graphic_types: Set[str] = None
) -> Optional[LayoutGraphic]:
sorted_area_intersection_bounding_boxes = sorted((
(
get_bounding_box_intersection_area_ratio(
bounding_box,
graphic.coordinates.bounding_box
),
graphic
)
for graphic in page_graphics
if graphic.coordinates and (
not ignored_graphic_types or graphic.graphic_type not in ignored_graphic_types
)
), key=lambda t: -t[0])
if not sorted_area_intersection_bounding_boxes:
return None
LOGGER.debug(
'sorted_area_intersection_bounding_boxes: %r',
sorted_area_intersection_bounding_boxes
)
best_area_ratio, best_matching_graphic = sorted_area_intersection_bounding_boxes[0]
if best_area_ratio < threshold:
LOGGER.debug('best_area_ratio below threshold: %.3f < %.3f', best_area_ratio, threshold)
return None
return best_matching_graphic
class ComputerVisionDocumentGraphicProvider(DocumentGraphicProvider):
def __init__(
self,
computer_vision_model: ComputerVisionModel,
page_image_iterable: Iterable[DocumentPageImage],
temp_dir: str
):
super().__init__()
self.computer_vision_model = computer_vision_model
self.page_image_iterable = page_image_iterable
self.temp_dir = temp_dir
# ignoring svg for now because we are also ignoring it when matching graphics
# an svg image may also not be standalone and require text to be complete
self.ignored_graphic_types = {'svg'}
def iter_semantic_graphic_for_image( # pylint: disable=too-many-locals
self,
image: PIL.Image.Image,
extract_graphic_assets: bool,
page_number: int,
page: Optional[LayoutPage]
) -> Iterable[SemanticGraphic]:
LOGGER.debug('image size: %d x %d', image.width, image.height)
page_meta = page.meta if page is not None else DEFAULT_LAYOUT_PAGE_META
page_coordinates = (
page.meta.coordinates if page is not None else None
)
page_graphics = (
page.graphics if page is not None else []
)
cv_start = monotonic()
cv_result = self.computer_vision_model.predict_single(image)
cv_end = monotonic()
cv_instances = cv_result.get_instances_by_type_names(['Figure', 'Table'])
cv_type_name_and_coordinates_list = [
(instance.get_type_name(), instance.get_bounding_box())
for instance in cv_instances
]
LOGGER.info(
(
'cv result, took=%.3fs, page_number=%d, image_size=%dx%d'
', cv_type_name_and_coordinates_list=%r'
),
cv_end - cv_start,
page_number,
image.width,
image.height,
cv_type_name_and_coordinates_list
)
count_by_type_name_map: Dict[str, int] = {}
for type_name, cv_coordinates in cv_type_name_and_coordinates_list:
lower_type_name = type_name.lower()
count_by_type_name_map[type_name] = count_by_type_name_map.get(type_name, 0) + 1
item_number = count_by_type_name_map[type_name]
local_image_path: Optional[str] = None
relative_image_path: Optional[str] = None
scaled_item_coordinates = cv_coordinates
if page_coordinates:
scaled_item_coordinates = (
cv_coordinates
.scale_by(
page_coordinates.width / image.width,
page_coordinates.height / image.height
)
)
matching_layout_graphic = get_layout_graphic_with_similar_coordinates(
page_graphics=page_graphics,
bounding_box=scaled_item_coordinates,
ignored_graphic_types=self.ignored_graphic_types
)
if matching_layout_graphic is not None:
yield get_semantic_graphic_for_layout_graphic(
matching_layout_graphic,
extract_graphic_assets=extract_graphic_assets
)
continue
if extract_graphic_assets:
local_image_path = os.path.join(
self.temp_dir, f'{lower_type_name}-{page_number}-{item_number}.png'
)
relative_image_path = os.path.basename(local_image_path)
cropped_image = get_cropped_image(image, cv_coordinates)
cropped_image.save(local_image_path)
layout_graphic = LayoutGraphic(
coordinates=LayoutPageCoordinates(
x=scaled_item_coordinates.x,
y=scaled_item_coordinates.y,
width=scaled_item_coordinates.width,
height=scaled_item_coordinates.height,
page_number=page_number
),
page_meta=page_meta,
graphic_type=f'cv-{lower_type_name}',
local_file_path=local_image_path
)
semantic_graphic = SemanticGraphic(
layout_graphic=layout_graphic,
relative_path=relative_image_path
)
yield semantic_graphic
def get_page_by_page_number_map(
self,
layout_document: LayoutDocument
) -> Mapping[int, Optional[LayoutPage]]:
return {
page.meta.page_number: page
for page in layout_document.pages
}
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
page_by_page_number_map = self.get_page_by_page_number_map(
layout_document
)
LOGGER.debug(
'cv model: page_by_page_number_map=%r',
page_by_page_number_map
)
has_cv_semantic_graphic: bool = False
for page_image in self.page_image_iterable:
LOGGER.debug('page_image: %r', page_image)
page_number = page_image.page_number
with PIL.Image.open(page_image.page_image_path) as image:
for semantic_graphic in self.iter_semantic_graphic_for_image(
image,
extract_graphic_assets=extract_graphic_assets,
page_number=page_number,
page=page_by_page_number_map.get(page_number)
):
has_cv_semantic_graphic = True
yield semantic_graphic
if not has_cv_semantic_graphic:
LOGGER.info('no graphics detected using cv model, falling back to regular graphics')
yield from get_semantic_graphic_list_for_layout_graphic_list(
layout_document.iter_all_graphics(),
extract_graphic_assets=extract_graphic_assets
)
| 0.835618 | 0.210685 |
import logging
import re
from abc import ABC, abstractmethod
from collections import Counter, defaultdict
from typing import Dict, List, Mapping, Optional, Sequence
from sciencebeam_parser.utils.tokenizer import iter_tokenized_tokens
LOGGER = logging.getLogger(__name__)
class ContentIdMatcher(ABC):
@abstractmethod
def get_id_by_text(self, text: str) -> Optional[str]:
pass
def get_normalized_key_text(text: str):
return re.sub(
r'[^a-z0-9]',
'',
text.lower()
)
def get_token_prefix_normalized_key_text(text: str, prefix_length: int = 1):
return ''.join([
get_normalized_key_text(
token if re.search(r'\d', token) else token[:prefix_length]
)
for token in re.split(r'\s', text)
])
def get_normalized_key_tokens(text: str):
return [
get_normalized_key_text(token)
for token in iter_tokenized_tokens(text)
if token.strip()
]
class SimpleContentIdMatcher(ContentIdMatcher):
def __init__(self, text_by_content_id: Mapping[str, str], prefix_length: int = 1):
self.text_by_content_id = text_by_content_id
self.content_id_by_text = {
get_normalized_key_text(value): key
for key, value in text_by_content_id.items()
}
self.content_id_by_token_prefix_text = {
get_token_prefix_normalized_key_text(value, prefix_length=prefix_length): key
for key, value in text_by_content_id.items()
}
self.prefix_length = prefix_length
def get_id_by_text(self, text: str) -> Optional[str]:
content_id = self.content_id_by_text.get(get_normalized_key_text(text))
if content_id:
return content_id
content_id = self.content_id_by_token_prefix_text.get(
get_token_prefix_normalized_key_text(text, prefix_length=self.prefix_length)
)
return content_id
class PartialContentIdMatcher(ContentIdMatcher):
def __init__(self, text_by_content_id: Mapping[str, str]):
self.content_ids_by_token_text: Dict[str, List[str]] = defaultdict(list)
for content_id, text in text_by_content_id.items():
for token in get_normalized_key_tokens(text):
self.content_ids_by_token_text[token].append(content_id)
def get_id_by_text(self, text: str) -> Optional[str]:
tokens = get_normalized_key_tokens(text)
LOGGER.debug('tokens: %r (text: %r)', tokens, text)
if not tokens:
return None
content_id_counts = Counter((
content_id
for token in tokens
for content_id in self.content_ids_by_token_text.get(token, [])
))
LOGGER.debug('content_id_counts: %s', content_id_counts)
if not content_id_counts:
return None
keys = list(content_id_counts.keys())
if (
len(content_id_counts) >= 2
and content_id_counts[keys[0]] == content_id_counts[keys[1]]
):
return None
return keys[0]
class ChainedContentIdMatcher(ContentIdMatcher):
def __init__(self, content_id_matchers: Sequence[ContentIdMatcher]):
self.content_id_matchers = content_id_matchers
def get_id_by_text(self, text: str) -> Optional[str]:
for content_id_matcher in self.content_id_matchers:
content_id = content_id_matcher.get_id_by_text(text)
if content_id:
return content_id
return None
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/ref_matching.py
|
ref_matching.py
|
import logging
import re
from abc import ABC, abstractmethod
from collections import Counter, defaultdict
from typing import Dict, List, Mapping, Optional, Sequence
from sciencebeam_parser.utils.tokenizer import iter_tokenized_tokens
LOGGER = logging.getLogger(__name__)
class ContentIdMatcher(ABC):
@abstractmethod
def get_id_by_text(self, text: str) -> Optional[str]:
pass
def get_normalized_key_text(text: str):
return re.sub(
r'[^a-z0-9]',
'',
text.lower()
)
def get_token_prefix_normalized_key_text(text: str, prefix_length: int = 1):
return ''.join([
get_normalized_key_text(
token if re.search(r'\d', token) else token[:prefix_length]
)
for token in re.split(r'\s', text)
])
def get_normalized_key_tokens(text: str):
return [
get_normalized_key_text(token)
for token in iter_tokenized_tokens(text)
if token.strip()
]
class SimpleContentIdMatcher(ContentIdMatcher):
def __init__(self, text_by_content_id: Mapping[str, str], prefix_length: int = 1):
self.text_by_content_id = text_by_content_id
self.content_id_by_text = {
get_normalized_key_text(value): key
for key, value in text_by_content_id.items()
}
self.content_id_by_token_prefix_text = {
get_token_prefix_normalized_key_text(value, prefix_length=prefix_length): key
for key, value in text_by_content_id.items()
}
self.prefix_length = prefix_length
def get_id_by_text(self, text: str) -> Optional[str]:
content_id = self.content_id_by_text.get(get_normalized_key_text(text))
if content_id:
return content_id
content_id = self.content_id_by_token_prefix_text.get(
get_token_prefix_normalized_key_text(text, prefix_length=self.prefix_length)
)
return content_id
class PartialContentIdMatcher(ContentIdMatcher):
def __init__(self, text_by_content_id: Mapping[str, str]):
self.content_ids_by_token_text: Dict[str, List[str]] = defaultdict(list)
for content_id, text in text_by_content_id.items():
for token in get_normalized_key_tokens(text):
self.content_ids_by_token_text[token].append(content_id)
def get_id_by_text(self, text: str) -> Optional[str]:
tokens = get_normalized_key_tokens(text)
LOGGER.debug('tokens: %r (text: %r)', tokens, text)
if not tokens:
return None
content_id_counts = Counter((
content_id
for token in tokens
for content_id in self.content_ids_by_token_text.get(token, [])
))
LOGGER.debug('content_id_counts: %s', content_id_counts)
if not content_id_counts:
return None
keys = list(content_id_counts.keys())
if (
len(content_id_counts) >= 2
and content_id_counts[keys[0]] == content_id_counts[keys[1]]
):
return None
return keys[0]
class ChainedContentIdMatcher(ContentIdMatcher):
def __init__(self, content_id_matchers: Sequence[ContentIdMatcher]):
self.content_id_matchers = content_id_matchers
def get_id_by_text(self, text: str) -> Optional[str]:
for content_id_matcher in self.content_id_matchers:
content_id = content_id_matcher.get_id_by_text(text)
if content_id:
return content_id
return None
| 0.826712 | 0.097219 |
import functools
import logging
import os
from abc import ABC, abstractmethod
from typing import Counter, Iterable, List, Optional, Sequence
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument,
LayoutGraphic,
LayoutPage,
LayoutPageCoordinates,
LayoutToken
)
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper, SemanticGraphic
LOGGER = logging.getLogger(__name__)
class DocumentGraphicProvider(ABC):
@abstractmethod
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
pass
def get_semantic_graphic_for_layout_graphic(
layout_graphic: LayoutGraphic,
extract_graphic_assets: bool
) -> SemanticGraphic:
relative_path: Optional[str] = None
if layout_graphic.local_file_path and extract_graphic_assets:
relative_path = os.path.basename(layout_graphic.local_file_path)
return SemanticGraphic(
layout_graphic=layout_graphic,
relative_path=relative_path
)
def get_semantic_graphic_list_for_layout_graphic_list(
layout_graphic_iterable: Iterable[LayoutGraphic],
extract_graphic_assets: bool
) -> List[SemanticGraphic]:
return [
get_semantic_graphic_for_layout_graphic(
layout_graphic,
extract_graphic_assets=extract_graphic_assets
)
for layout_graphic in layout_graphic_iterable
if layout_graphic.coordinates
]
def get_page_numbers_for_semantic_content_list(
semantic_content_list: Sequence[SemanticContentWrapper]
) -> Sequence[int]:
return sorted({
coordinates.page_number
for semantic_content in semantic_content_list
for coordinates in semantic_content.merged_block.get_merged_coordinates_list()
})
def get_all_page_numbers_of_layout_document(
layout_document: LayoutDocument
) -> Sequence[int]:
return sorted({
page.meta.page_number
for page in layout_document.pages
})
def get_graphic_matching_candidate_page_numbers_for_semantic_content_list(
semantic_content_list: Sequence[SemanticContentWrapper],
layout_document: Optional[LayoutDocument] = None
) -> Sequence[int]:
page_numbers = get_page_numbers_for_semantic_content_list(semantic_content_list)
if layout_document:
document_page_numbers = set(get_all_page_numbers_of_layout_document(layout_document))
page_numbers = sorted(
set(page_numbers).union({
page_number + 1
for page_number in page_numbers
if page_number + 1 in document_page_numbers
})
)
return page_numbers
def get_page_numbers_with_uncommon_page_dimension(
layout_document: LayoutDocument
) -> Sequence[int]:
page_dimension_counter = Counter((
page.meta.coordinates.bounding_box
for page in layout_document.pages
if page.meta and page.meta.coordinates
))
LOGGER.debug('page_dimension_counter: %r', page_dimension_counter)
if len(page_dimension_counter) < 2:
return []
most_common_page_dimension = page_dimension_counter.most_common(1)[0][0]
LOGGER.debug('most_common_page_dimension: %r', most_common_page_dimension)
return sorted({
page.meta.page_number
for page in layout_document.pages
if (
page.meta
and page.meta.coordinates
and page.meta.coordinates.bounding_box != most_common_page_dimension
)
})
def is_page_with_mostly_bitmap_graphics(
layout_page: LayoutPage
) -> bool:
if not layout_page.meta or not layout_page.meta.coordinates:
LOGGER.debug('page has no coordinates')
return False
page_area = layout_page.meta.coordinates.bounding_box.area
if not page_area:
LOGGER.debug('page has no area')
return False
bitmap_graphics_with_area_ratio = [
(graphic, graphic.coordinates.bounding_box.area / page_area)
for graphic in layout_page.graphics
if (
graphic.graphic_type != 'svg'
and graphic.coordinates
)
]
LOGGER.debug('bitmap_graphics_with_area_ratio: %r', bitmap_graphics_with_area_ratio)
if not bitmap_graphics_with_area_ratio:
LOGGER.debug('no bitmap images')
return False
accepted_bitmap_graphics = [
bitmap_graphics
for bitmap_graphics, area_ratio in bitmap_graphics_with_area_ratio
if area_ratio > 0.5
]
if not accepted_bitmap_graphics:
LOGGER.debug('no too small bitmap images')
return False
return True
def get_page_numbers_with_mostly_bitmap_graphics(
layout_document: LayoutDocument
) -> Sequence[int]:
return [
page.meta.page_number
for page in layout_document.pages
if (
page.meta
and is_page_with_mostly_bitmap_graphics(page)
)
]
def are_page_coordinates_within_bounding_box(
page_coordinates: Optional[LayoutPageCoordinates],
bounding_box: BoundingBox,
min_area_ratio: float = 0.5
) -> bool:
if not page_coordinates:
return False
item_bounding_box = page_coordinates.bounding_box
item_bounding_box_area = item_bounding_box.area
if not item_bounding_box_area:
return False
intersection_bounding_box = item_bounding_box.intersection(bounding_box)
if not intersection_bounding_box:
return False
if intersection_bounding_box.area / item_bounding_box_area < min_area_ratio:
return False
return True
def is_layout_token_within_bounding_box(layout_token: LayoutToken, **kwargs) -> bool:
return are_page_coordinates_within_bounding_box(layout_token.coordinates, **kwargs)
def is_layout_graphic_within_bounding_box(layout_graphic: LayoutGraphic, **kwargs) -> bool:
return are_page_coordinates_within_bounding_box(layout_graphic.coordinates, **kwargs)
def _remove_tokens_within_bounding_box_flatmap_fn(
layout_token: LayoutToken,
**kwargs
) -> List[LayoutToken]:
if not is_layout_token_within_bounding_box(layout_token, **kwargs):
return [layout_token]
return []
def get_layout_page_with_text_or_graphic_replaced_by_graphic(
layout_page: LayoutPage,
semantic_graphic: SemanticGraphic,
is_only_semantic_graphic_on_page: bool,
is_replace_overlapping_text: bool
) -> LayoutPage:
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
assert layout_graphic.coordinates
graphic_bounding_box = layout_graphic.coordinates.bounding_box
if is_only_semantic_graphic_on_page:
layout_graphic = layout_graphic._replace(
related_block=LayoutBlock.for_tokens(list(layout_page.iter_all_tokens()))
)
modified_layout_page = (
layout_page.replace(
graphics=[
_layout_graphic
for _layout_graphic in layout_page.graphics
if not is_layout_graphic_within_bounding_box(
_layout_graphic,
bounding_box=graphic_bounding_box
)
] + [layout_graphic]
)
)
if is_replace_overlapping_text:
modified_layout_page = (
modified_layout_page
.flat_map_layout_tokens(functools.partial(
_remove_tokens_within_bounding_box_flatmap_fn,
bounding_box=graphic_bounding_box
)).remove_empty_blocks()
)
return modified_layout_page
def get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic],
is_replace_overlapping_text: bool
) -> LayoutDocument:
page_by_page_number = {
page.meta.page_number: page
for page in layout_document.pages
if page.meta
}
LOGGER.debug('page_by_page_number.keys: %r', page_by_page_number.keys())
has_changes = False
semantic_graphics_list = list(semantic_graphics)
semantic_graphic_count_by_page = Counter((
semantic_graphic.layout_graphic.coordinates.page_number
for semantic_graphic in semantic_graphics_list
if (
semantic_graphic.layout_graphic
and semantic_graphic.layout_graphic.coordinates
)
))
for semantic_graphic in semantic_graphics_list:
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
if not layout_graphic.coordinates:
continue
page_number = layout_graphic.coordinates.page_number
page_by_page_number[page_number] = (
get_layout_page_with_text_or_graphic_replaced_by_graphic(
page_by_page_number[page_number],
semantic_graphic,
is_only_semantic_graphic_on_page=(
semantic_graphic_count_by_page[page_number] < 2
),
is_replace_overlapping_text=is_replace_overlapping_text
)
)
has_changes = True
if not has_changes:
return layout_document
pages = [
(
page_by_page_number[page.meta.page_number]
if page.meta
else page
)
for page in layout_document.pages
]
return layout_document.replace(pages=pages)
def get_layout_document_with_text_and_graphics_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic]
) -> LayoutDocument:
return get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document,
semantic_graphics=semantic_graphics,
is_replace_overlapping_text=True
)
def get_layout_document_with_graphics_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic]
) -> LayoutDocument:
return get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document,
semantic_graphics=semantic_graphics,
is_replace_overlapping_text=False
)
class SimpleDocumentGraphicProvider(DocumentGraphicProvider):
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
return get_semantic_graphic_list_for_layout_graphic_list(
layout_document.iter_all_graphics(),
extract_graphic_assets=extract_graphic_assets
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/graphic_provider.py
|
graphic_provider.py
|
import functools
import logging
import os
from abc import ABC, abstractmethod
from typing import Counter, Iterable, List, Optional, Sequence
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument,
LayoutGraphic,
LayoutPage,
LayoutPageCoordinates,
LayoutToken
)
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper, SemanticGraphic
LOGGER = logging.getLogger(__name__)
class DocumentGraphicProvider(ABC):
@abstractmethod
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
pass
def get_semantic_graphic_for_layout_graphic(
layout_graphic: LayoutGraphic,
extract_graphic_assets: bool
) -> SemanticGraphic:
relative_path: Optional[str] = None
if layout_graphic.local_file_path and extract_graphic_assets:
relative_path = os.path.basename(layout_graphic.local_file_path)
return SemanticGraphic(
layout_graphic=layout_graphic,
relative_path=relative_path
)
def get_semantic_graphic_list_for_layout_graphic_list(
layout_graphic_iterable: Iterable[LayoutGraphic],
extract_graphic_assets: bool
) -> List[SemanticGraphic]:
return [
get_semantic_graphic_for_layout_graphic(
layout_graphic,
extract_graphic_assets=extract_graphic_assets
)
for layout_graphic in layout_graphic_iterable
if layout_graphic.coordinates
]
def get_page_numbers_for_semantic_content_list(
semantic_content_list: Sequence[SemanticContentWrapper]
) -> Sequence[int]:
return sorted({
coordinates.page_number
for semantic_content in semantic_content_list
for coordinates in semantic_content.merged_block.get_merged_coordinates_list()
})
def get_all_page_numbers_of_layout_document(
layout_document: LayoutDocument
) -> Sequence[int]:
return sorted({
page.meta.page_number
for page in layout_document.pages
})
def get_graphic_matching_candidate_page_numbers_for_semantic_content_list(
semantic_content_list: Sequence[SemanticContentWrapper],
layout_document: Optional[LayoutDocument] = None
) -> Sequence[int]:
page_numbers = get_page_numbers_for_semantic_content_list(semantic_content_list)
if layout_document:
document_page_numbers = set(get_all_page_numbers_of_layout_document(layout_document))
page_numbers = sorted(
set(page_numbers).union({
page_number + 1
for page_number in page_numbers
if page_number + 1 in document_page_numbers
})
)
return page_numbers
def get_page_numbers_with_uncommon_page_dimension(
layout_document: LayoutDocument
) -> Sequence[int]:
page_dimension_counter = Counter((
page.meta.coordinates.bounding_box
for page in layout_document.pages
if page.meta and page.meta.coordinates
))
LOGGER.debug('page_dimension_counter: %r', page_dimension_counter)
if len(page_dimension_counter) < 2:
return []
most_common_page_dimension = page_dimension_counter.most_common(1)[0][0]
LOGGER.debug('most_common_page_dimension: %r', most_common_page_dimension)
return sorted({
page.meta.page_number
for page in layout_document.pages
if (
page.meta
and page.meta.coordinates
and page.meta.coordinates.bounding_box != most_common_page_dimension
)
})
def is_page_with_mostly_bitmap_graphics(
layout_page: LayoutPage
) -> bool:
if not layout_page.meta or not layout_page.meta.coordinates:
LOGGER.debug('page has no coordinates')
return False
page_area = layout_page.meta.coordinates.bounding_box.area
if not page_area:
LOGGER.debug('page has no area')
return False
bitmap_graphics_with_area_ratio = [
(graphic, graphic.coordinates.bounding_box.area / page_area)
for graphic in layout_page.graphics
if (
graphic.graphic_type != 'svg'
and graphic.coordinates
)
]
LOGGER.debug('bitmap_graphics_with_area_ratio: %r', bitmap_graphics_with_area_ratio)
if not bitmap_graphics_with_area_ratio:
LOGGER.debug('no bitmap images')
return False
accepted_bitmap_graphics = [
bitmap_graphics
for bitmap_graphics, area_ratio in bitmap_graphics_with_area_ratio
if area_ratio > 0.5
]
if not accepted_bitmap_graphics:
LOGGER.debug('no too small bitmap images')
return False
return True
def get_page_numbers_with_mostly_bitmap_graphics(
layout_document: LayoutDocument
) -> Sequence[int]:
return [
page.meta.page_number
for page in layout_document.pages
if (
page.meta
and is_page_with_mostly_bitmap_graphics(page)
)
]
def are_page_coordinates_within_bounding_box(
page_coordinates: Optional[LayoutPageCoordinates],
bounding_box: BoundingBox,
min_area_ratio: float = 0.5
) -> bool:
if not page_coordinates:
return False
item_bounding_box = page_coordinates.bounding_box
item_bounding_box_area = item_bounding_box.area
if not item_bounding_box_area:
return False
intersection_bounding_box = item_bounding_box.intersection(bounding_box)
if not intersection_bounding_box:
return False
if intersection_bounding_box.area / item_bounding_box_area < min_area_ratio:
return False
return True
def is_layout_token_within_bounding_box(layout_token: LayoutToken, **kwargs) -> bool:
return are_page_coordinates_within_bounding_box(layout_token.coordinates, **kwargs)
def is_layout_graphic_within_bounding_box(layout_graphic: LayoutGraphic, **kwargs) -> bool:
return are_page_coordinates_within_bounding_box(layout_graphic.coordinates, **kwargs)
def _remove_tokens_within_bounding_box_flatmap_fn(
layout_token: LayoutToken,
**kwargs
) -> List[LayoutToken]:
if not is_layout_token_within_bounding_box(layout_token, **kwargs):
return [layout_token]
return []
def get_layout_page_with_text_or_graphic_replaced_by_graphic(
layout_page: LayoutPage,
semantic_graphic: SemanticGraphic,
is_only_semantic_graphic_on_page: bool,
is_replace_overlapping_text: bool
) -> LayoutPage:
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
assert layout_graphic.coordinates
graphic_bounding_box = layout_graphic.coordinates.bounding_box
if is_only_semantic_graphic_on_page:
layout_graphic = layout_graphic._replace(
related_block=LayoutBlock.for_tokens(list(layout_page.iter_all_tokens()))
)
modified_layout_page = (
layout_page.replace(
graphics=[
_layout_graphic
for _layout_graphic in layout_page.graphics
if not is_layout_graphic_within_bounding_box(
_layout_graphic,
bounding_box=graphic_bounding_box
)
] + [layout_graphic]
)
)
if is_replace_overlapping_text:
modified_layout_page = (
modified_layout_page
.flat_map_layout_tokens(functools.partial(
_remove_tokens_within_bounding_box_flatmap_fn,
bounding_box=graphic_bounding_box
)).remove_empty_blocks()
)
return modified_layout_page
def get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic],
is_replace_overlapping_text: bool
) -> LayoutDocument:
page_by_page_number = {
page.meta.page_number: page
for page in layout_document.pages
if page.meta
}
LOGGER.debug('page_by_page_number.keys: %r', page_by_page_number.keys())
has_changes = False
semantic_graphics_list = list(semantic_graphics)
semantic_graphic_count_by_page = Counter((
semantic_graphic.layout_graphic.coordinates.page_number
for semantic_graphic in semantic_graphics_list
if (
semantic_graphic.layout_graphic
and semantic_graphic.layout_graphic.coordinates
)
))
for semantic_graphic in semantic_graphics_list:
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
if not layout_graphic.coordinates:
continue
page_number = layout_graphic.coordinates.page_number
page_by_page_number[page_number] = (
get_layout_page_with_text_or_graphic_replaced_by_graphic(
page_by_page_number[page_number],
semantic_graphic,
is_only_semantic_graphic_on_page=(
semantic_graphic_count_by_page[page_number] < 2
),
is_replace_overlapping_text=is_replace_overlapping_text
)
)
has_changes = True
if not has_changes:
return layout_document
pages = [
(
page_by_page_number[page.meta.page_number]
if page.meta
else page
)
for page in layout_document.pages
]
return layout_document.replace(pages=pages)
def get_layout_document_with_text_and_graphics_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic]
) -> LayoutDocument:
return get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document,
semantic_graphics=semantic_graphics,
is_replace_overlapping_text=True
)
def get_layout_document_with_graphics_replaced_by_graphics(
layout_document: LayoutDocument,
semantic_graphics: Iterable[SemanticGraphic]
) -> LayoutDocument:
return get_layout_document_with_text_or_graphic_replaced_by_graphics(
layout_document,
semantic_graphics=semantic_graphics,
is_replace_overlapping_text=False
)
class SimpleDocumentGraphicProvider(DocumentGraphicProvider):
def iter_semantic_graphic_for_layout_document(
self,
layout_document: LayoutDocument,
extract_graphic_assets: bool
) -> Iterable[SemanticGraphic]:
return get_semantic_graphic_list_for_layout_graphic_list(
layout_document.iter_all_graphics(),
extract_graphic_assets=extract_graphic_assets
)
| 0.816077 | 0.184859 |
from typing import NamedTuple, Set
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.processors.document_page_image import (
DEFAULT_PDF_RENDER_DPI
)
from sciencebeam_parser.processors.graphic_matching import DEFAULT_MAX_GRAPHIC_DISTANCE
class RequestFieldNames:
"""
"Abstract" field names that should be independent from the model architecture.
"""
TITLE = 'title'
ABSTRACT = 'abstract'
AUTHORS = 'authors'
AFFILIATIONS = 'affiliations'
REFERENCES = 'references'
FRONT_FIELDS = {
RequestFieldNames.TITLE,
RequestFieldNames.ABSTRACT,
RequestFieldNames.AUTHORS,
RequestFieldNames.AFFILIATIONS
}
class FullTextProcessorConfig(NamedTuple):
extract_front: bool = True
extract_authors: bool = True
extract_affiliations: bool = True
extract_body_sections: bool = True
extract_acknowledgements: bool = True
extract_back_sections: bool = True
extract_references: bool = True
extract_citation_fields: bool = True
extract_citation_authors: bool = True
extract_citation_editors: bool = False
extract_figure_fields: bool = True
extract_table_fields: bool = True
merge_raw_authors: bool = False
extract_graphic_bounding_boxes: bool = True
extract_graphic_assets: bool = False
use_cv_model: bool = False
cv_render_dpi: float = DEFAULT_PDF_RENDER_DPI
use_ocr_model: bool = False
replace_text_by_cv_graphic: bool = False
max_graphic_distance: float = DEFAULT_MAX_GRAPHIC_DISTANCE
@staticmethod
def from_app_config(app_config: AppConfig) -> 'FullTextProcessorConfig':
return FullTextProcessorConfig()._replace(
**app_config.get('processors', {}).get('fulltext', {})
)
def get_for_requested_field_names(
self,
request_field_names: Set[str]
) -> 'FullTextProcessorConfig':
if not request_field_names:
return self
remaining_field_names = request_field_names - FRONT_FIELDS - {RequestFieldNames.REFERENCES}
if remaining_field_names:
return self
extract_front = bool(FRONT_FIELDS & request_field_names)
extract_authors = RequestFieldNames.AUTHORS in request_field_names
extract_affiliations = RequestFieldNames.AFFILIATIONS in request_field_names
extract_references = RequestFieldNames.REFERENCES in request_field_names
return self._replace( # pylint: disable=no-member
extract_front=extract_front,
extract_authors=extract_authors,
extract_affiliations=extract_affiliations,
extract_body_sections=False,
extract_acknowledgements=False,
extract_back_sections=False,
extract_references=extract_references,
extract_graphic_bounding_boxes=False
)
def get_for_header_document(self) -> 'FullTextProcessorConfig':
return self.get_for_requested_field_names(FRONT_FIELDS)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/fulltext/config.py
|
config.py
|
from typing import NamedTuple, Set
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.processors.document_page_image import (
DEFAULT_PDF_RENDER_DPI
)
from sciencebeam_parser.processors.graphic_matching import DEFAULT_MAX_GRAPHIC_DISTANCE
class RequestFieldNames:
"""
"Abstract" field names that should be independent from the model architecture.
"""
TITLE = 'title'
ABSTRACT = 'abstract'
AUTHORS = 'authors'
AFFILIATIONS = 'affiliations'
REFERENCES = 'references'
FRONT_FIELDS = {
RequestFieldNames.TITLE,
RequestFieldNames.ABSTRACT,
RequestFieldNames.AUTHORS,
RequestFieldNames.AFFILIATIONS
}
class FullTextProcessorConfig(NamedTuple):
extract_front: bool = True
extract_authors: bool = True
extract_affiliations: bool = True
extract_body_sections: bool = True
extract_acknowledgements: bool = True
extract_back_sections: bool = True
extract_references: bool = True
extract_citation_fields: bool = True
extract_citation_authors: bool = True
extract_citation_editors: bool = False
extract_figure_fields: bool = True
extract_table_fields: bool = True
merge_raw_authors: bool = False
extract_graphic_bounding_boxes: bool = True
extract_graphic_assets: bool = False
use_cv_model: bool = False
cv_render_dpi: float = DEFAULT_PDF_RENDER_DPI
use_ocr_model: bool = False
replace_text_by_cv_graphic: bool = False
max_graphic_distance: float = DEFAULT_MAX_GRAPHIC_DISTANCE
@staticmethod
def from_app_config(app_config: AppConfig) -> 'FullTextProcessorConfig':
return FullTextProcessorConfig()._replace(
**app_config.get('processors', {}).get('fulltext', {})
)
def get_for_requested_field_names(
self,
request_field_names: Set[str]
) -> 'FullTextProcessorConfig':
if not request_field_names:
return self
remaining_field_names = request_field_names - FRONT_FIELDS - {RequestFieldNames.REFERENCES}
if remaining_field_names:
return self
extract_front = bool(FRONT_FIELDS & request_field_names)
extract_authors = RequestFieldNames.AUTHORS in request_field_names
extract_affiliations = RequestFieldNames.AFFILIATIONS in request_field_names
extract_references = RequestFieldNames.REFERENCES in request_field_names
return self._replace( # pylint: disable=no-member
extract_front=extract_front,
extract_authors=extract_authors,
extract_affiliations=extract_affiliations,
extract_body_sections=False,
extract_acknowledgements=False,
extract_back_sections=False,
extract_references=extract_references,
extract_graphic_bounding_boxes=False
)
def get_for_header_document(self) -> 'FullTextProcessorConfig':
return self.get_for_requested_field_names(FRONT_FIELDS)
| 0.818954 | 0.185929 |
import logging
import multiprocessing
from typing import (
Iterable,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Tuple,
Type,
Union
)
from sciencebeam_parser.models.data import AppFeaturesContext, DEFAULT_APP_FEATURES_CONTEXT
from sciencebeam_parser.models.model import LayoutDocumentLabelResult, Model
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticAffiliationAddress,
SemanticAuthor,
SemanticCitation,
SemanticContentWrapper,
SemanticDocument,
SemanticEditor,
SemanticFigure,
SemanticFigureCitation,
SemanticGraphic,
SemanticInvalidReference,
SemanticLabel,
SemanticMixedContentWrapper,
SemanticMixedNote,
SemanticRawAffiliationAddress,
SemanticRawAuthors,
SemanticRawEditors,
SemanticRawFigure,
SemanticRawReference,
SemanticRawReferenceText,
SemanticRawTable,
SemanticReference,
SemanticReferenceCitation,
SemanticReferenceList,
SemanticSection,
SemanticSectionTypes,
SemanticTable,
SemanticTableCitation,
T_SemanticContentWrapper,
T_SemanticName,
T_SemanticRawNameList
)
from sciencebeam_parser.document.tei_document import TeiDocument, get_tei_for_semantic_document
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.models.segmentation.model import SegmentationModel
from sciencebeam_parser.models.header.model import HeaderModel
from sciencebeam_parser.models.name.model import NameModel
from sciencebeam_parser.models.affiliation_address.model import AffiliationAddressModel
from sciencebeam_parser.models.fulltext.model import FullTextModel
from sciencebeam_parser.models.figure.model import FigureModel
from sciencebeam_parser.models.table.model import TableModel
from sciencebeam_parser.models.reference_segmenter.model import ReferenceSegmenterModel
from sciencebeam_parser.models.citation.model import CitationModel
from sciencebeam_parser.processors.ref_matching import (
ChainedContentIdMatcher,
ContentIdMatcher,
PartialContentIdMatcher,
SimpleContentIdMatcher
)
from sciencebeam_parser.processors.document_page_image import (
iter_pdf_document_page_images
)
from sciencebeam_parser.processors.graphic_matching import (
BoundingBoxDistanceGraphicMatcher,
ChainedGraphicMatcher,
GraphicMatcher,
GraphicRelatedBlockTextGraphicMatcher,
OpticalCharacterRecognitionGraphicMatcher
)
from sciencebeam_parser.processors.graphic_provider import (
DocumentGraphicProvider,
SimpleDocumentGraphicProvider,
get_graphic_matching_candidate_page_numbers_for_semantic_content_list,
get_layout_document_with_graphics_replaced_by_graphics,
get_layout_document_with_text_and_graphics_replaced_by_graphics,
get_page_numbers_with_mostly_bitmap_graphics,
get_page_numbers_with_uncommon_page_dimension
)
from sciencebeam_parser.processors.fulltext.config import (
FullTextProcessorConfig
)
LOGGER = logging.getLogger(__name__)
class FullTextProcessorDocumentContext(NamedTuple):
pdf_path: Optional[str] = None
temp_dir: Optional[str] = None
def get_cv_document_graphic_provider(
cv_model: ComputerVisionModel,
context: FullTextProcessorDocumentContext,
page_numbers: Optional[Sequence[int]],
cv_render_dpi: float
) -> DocumentGraphicProvider:
from sciencebeam_parser.processors.cv_graphic_provider import ( # noqa pylint: disable=import-outside-toplevel
ComputerVisionDocumentGraphicProvider
)
assert context.pdf_path
assert context.temp_dir
return ComputerVisionDocumentGraphicProvider(
cv_model,
iter_pdf_document_page_images(
pdf_path=context.pdf_path,
output_dir=context.temp_dir,
page_numbers=page_numbers,
dpi=cv_render_dpi,
thread_count=multiprocessing.cpu_count()
),
temp_dir=context.temp_dir
)
class FullTextProcessor:
def __init__(
self,
fulltext_models: FullTextModels,
config: Optional[FullTextProcessorConfig] = None,
app_features_context: AppFeaturesContext = DEFAULT_APP_FEATURES_CONTEXT
) -> None:
self.fulltext_models = fulltext_models
self.app_features_context = app_features_context
if not config:
config = FullTextProcessorConfig()
self.config = config
@property
def segmentation_model(self) -> SegmentationModel:
return self.fulltext_models.segmentation_model
@property
def header_model(self) -> HeaderModel:
return self.fulltext_models.header_model
@property
def affiliation_address_model(self) -> AffiliationAddressModel:
return self.fulltext_models.affiliation_address_model
@property
def name_header_model(self) -> NameModel:
return self.fulltext_models.name_header_model
@property
def name_citation_model(self) -> NameModel:
return self.fulltext_models.name_citation_model
@property
def fulltext_model(self) -> FullTextModel:
return self.fulltext_models.fulltext_model
@property
def figure_model(self) -> FigureModel:
return self.fulltext_models.figure_model
@property
def table_model(self) -> TableModel:
return self.fulltext_models.table_model
@property
def reference_segmenter_model(self) -> ReferenceSegmenterModel:
return self.fulltext_models.reference_segmenter_model
@property
def citation_model(self) -> CitationModel:
return self.fulltext_models.citation_model
def get_semantic_document_for_layout_document(
self,
layout_document: LayoutDocument,
context: Optional[FullTextProcessorDocumentContext] = None
) -> SemanticDocument:
if context is None:
context = FullTextProcessorDocumentContext()
layout_document = self._preprocess_layout_graphics(
layout_document,
context=context
)
segmentation_label_result = self.segmentation_model.get_label_layout_document_result(
layout_document,
app_features_context=self.app_features_context
)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
document = SemanticDocument()
if self.config.extract_front:
self._process_header_layout_document(
header_layout_document=header_layout_document,
semantic_document=document
)
if self.config.extract_body_sections:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.body_section,
segmentation_label_result,
'<body>',
SemanticSectionTypes.OTHER
)
if self.config.extract_acknowledgements:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.back_section,
segmentation_label_result,
'<acknowledgement>',
SemanticSectionTypes.ACKNOWLEDGEMENT
)
if self.config.extract_back_sections:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.back_section,
segmentation_label_result,
'<annex>',
SemanticSectionTypes.OTHER
)
if self.config.extract_references:
self._extract_raw_references_from_segmentation(
semantic_document=document,
segmentation_label_result=segmentation_label_result
)
if self.config.extract_citation_fields:
self._extract_reference_fields_from_raw_references(
semantic_document=document
)
if self.config.extract_citation_authors or self.config.extract_citation_editors:
self._extract_reference_name_lists_from_raw_references(
semantic_document=document
)
references = list(document.iter_by_type_recursively(SemanticReference))
ref_citations = list(document.iter_by_type_recursively(SemanticReferenceCitation))
self._assign_content_ids(references, iter(iter_ids('b')))
self._assign_target_content_ids(ref_citations, ChainedContentIdMatcher([
SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(references, SemanticLabel)
),
PartialContentIdMatcher(
self._get_semantic_content_text_by_content_id(
references, SemanticRawReferenceText
)
)
]))
if self.config.extract_figure_fields:
self._extract_figure_fields_from_raw_figures(semantic_document=document)
figures = list(document.iter_by_type_recursively(SemanticFigure))
figure_citations = list(document.iter_by_type_recursively(SemanticFigureCitation))
self._assign_content_ids(figures, iter(iter_ids('fig_')))
self._assign_target_content_ids(figure_citations, SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(figures, SemanticLabel)
))
if self.config.extract_table_fields:
self._extract_table_fields_from_raw_tables(semantic_document=document)
tables = list(document.iter_by_type_recursively(SemanticTable))
table_citations = list(document.iter_by_type_recursively(SemanticTableCitation))
self._assign_content_ids(tables, iter(iter_ids('tab_')))
self._assign_target_content_ids(table_citations, SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(tables, SemanticLabel)
))
if self.config.extract_graphic_bounding_boxes:
self._process_graphics(
document=document,
layout_document=layout_document,
context=context
)
return document
def _process_header_layout_document(
self,
header_layout_document: LayoutDocument,
semantic_document: SemanticDocument
):
LOGGER.debug('header_layout_document: %s', header_layout_document)
if not header_layout_document.pages:
return
labeled_layout_tokens = self.header_model.predict_labels_for_layout_document(
header_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
entity_blocks = self.header_model.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
)
self.header_model.update_semantic_document_with_entity_blocks(
semantic_document, entity_blocks
)
if self.config.extract_authors:
self._process_raw_authors(semantic_document.front)
if self.config.extract_affiliations:
self._process_raw_affiliations(semantic_document)
def _preprocess_layout_graphics(
self,
layout_document: LayoutDocument,
context: FullTextProcessorDocumentContext
) -> LayoutDocument:
if not self.config.use_cv_model:
return layout_document
candidate_page_numbers = sorted(
set(get_page_numbers_with_uncommon_page_dimension(layout_document))
- set(get_page_numbers_with_mostly_bitmap_graphics(layout_document))
)
LOGGER.debug('candidate_page_numbers: %r', candidate_page_numbers)
if not candidate_page_numbers:
return layout_document
document_graphic_provider = self._get_document_graphic_provider(
context=context,
page_numbers=candidate_page_numbers
)
semantic_graphics = list(
document_graphic_provider.iter_semantic_graphic_for_layout_document(
layout_document,
extract_graphic_assets=self.config.extract_graphic_assets
)
)
if not semantic_graphics:
LOGGER.info('no semantic graphics found on pages %r', candidate_page_numbers)
return layout_document
if not self.config.replace_text_by_cv_graphic:
return get_layout_document_with_graphics_replaced_by_graphics(
layout_document,
semantic_graphics
)
return get_layout_document_with_text_and_graphics_replaced_by_graphics(
layout_document,
semantic_graphics
)
def _process_graphics(
self,
document: SemanticDocument,
layout_document: LayoutDocument,
context: FullTextProcessorDocumentContext
):
unmatched_graphics_container = SemanticMixedNote(note_type='unmatched_graphics')
candidate_semantic_content_list = list(
document.iter_by_types_recursively((SemanticFigure, SemanticTable,))
)
self._match_graphic_elements(
semantic_graphic_list=list(
self._get_document_graphic_provider(
context=context,
page_numbers=(
get_graphic_matching_candidate_page_numbers_for_semantic_content_list(
candidate_semantic_content_list,
layout_document=layout_document
)
)
).iter_semantic_graphic_for_layout_document(
layout_document,
extract_graphic_assets=self.config.extract_graphic_assets
)
),
candidate_semantic_content_list=candidate_semantic_content_list,
unmatched_graphics_container=unmatched_graphics_container
)
if not unmatched_graphics_container.is_empty():
LOGGER.debug('unmatched_graphics_container: %r', unmatched_graphics_container)
document.back_section.add_content(unmatched_graphics_container)
else:
LOGGER.debug('no unmatched graphics')
def _get_document_graphic_provider(
self,
context: FullTextProcessorDocumentContext,
page_numbers: Optional[Sequence[int]]
) -> DocumentGraphicProvider:
if self.config.use_cv_model:
assert self.fulltext_models.cv_model is not None
return get_cv_document_graphic_provider(
cv_model=self.fulltext_models.cv_model,
context=context,
page_numbers=page_numbers,
cv_render_dpi=self.config.cv_render_dpi
)
return SimpleDocumentGraphicProvider()
def _match_graphic_elements(
self,
semantic_graphic_list: Sequence[SemanticGraphic],
candidate_semantic_content_list: Sequence[SemanticContentWrapper],
unmatched_graphics_container: SemanticMixedContentWrapper
):
_graphic_matchers: List[GraphicMatcher] = [
BoundingBoxDistanceGraphicMatcher(
max_distance=self.config.max_graphic_distance
),
GraphicRelatedBlockTextGraphicMatcher()
]
if self.config.use_ocr_model:
assert self.fulltext_models.ocr_model
_graphic_matchers.append(
OpticalCharacterRecognitionGraphicMatcher(
ocr_model=self.fulltext_models.ocr_model
)
)
graphic_matcher = ChainedGraphicMatcher(_graphic_matchers)
graphic_match_result = graphic_matcher.get_graphic_matches(
semantic_graphic_list=semantic_graphic_list,
candidate_semantic_content_list=candidate_semantic_content_list
)
for graphic_match in graphic_match_result.graphic_matches:
if isinstance(graphic_match.candidate_semantic_content, SemanticMixedContentWrapper):
graphic_match.candidate_semantic_content.add_content(
graphic_match.semantic_graphic
)
LOGGER.info('unmatched_graphics: %r', graphic_match_result.unmatched_graphics)
for unmatched_graphic in graphic_match_result.unmatched_graphics:
unmatched_graphics_container.add_content(unmatched_graphic)
def _assign_content_ids(
self,
semantic_content_iterable: Iterable[SemanticMixedContentWrapper],
content_id_iterator: Iterator[str]
):
for semantic_content in semantic_content_iterable:
semantic_content.content_id = next(content_id_iterator)
def _get_semantic_content_text_by_content_id(
self,
semantic_content_iterable: Iterable[SemanticMixedContentWrapper],
type_: Type[SemanticContentWrapper]
) -> Mapping[str, str]:
d = {}
for semantic_content in semantic_content_iterable:
if not semantic_content.content_id:
continue
text = semantic_content.get_text_by_type(type_)
if not text:
continue
d[semantic_content.content_id] = text
return d
def _assign_target_content_ids(
self,
semantic_content_iterable: Iterable[SemanticCitation],
content_id_matcher: ContentIdMatcher
):
for citation in semantic_content_iterable:
content_id = content_id_matcher.get_id_by_text(citation.get_text())
if content_id:
citation.target_content_id = content_id
def _process_raw_authors(self, semantic_parent: SemanticMixedContentWrapper):
result_content: List[SemanticContentWrapper] = []
raw_authors: List[SemanticRawAuthors] = []
for semantic_content in semantic_parent:
if isinstance(semantic_content, SemanticRawAuthors):
raw_authors.append(semantic_content)
continue
result_content.append(semantic_content)
if raw_authors:
if self.config.merge_raw_authors:
raw_authors_layout_documents = [
LayoutDocument.for_blocks([
block
for raw_author in raw_authors
for block in raw_author.iter_blocks()
])
]
else:
raw_authors_layout_documents = [
LayoutDocument.for_blocks(list(raw_author.iter_blocks()))
for raw_author in raw_authors
]
labeled_layout_tokens_list = self.name_header_model.predict_labels_for_layout_documents(
raw_authors_layout_documents,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens_list (author): %r', labeled_layout_tokens_list)
authors_iterable = (
author
for labeled_layout_tokens in labeled_layout_tokens_list
for author in (
self.name_header_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)
)
for author in authors_iterable:
result_content.append(author)
semantic_parent.mixed_content = result_content
def _process_raw_affiliations(self, semantic_document: SemanticDocument):
result_content: List[SemanticContentWrapper] = []
raw_aff_address_list: List[SemanticRawAffiliationAddress] = []
for semantic_content in semantic_document.front:
if isinstance(semantic_content, SemanticRawAffiliationAddress):
raw_aff_address_list.append(semantic_content)
continue
result_content.append(semantic_content)
if raw_aff_address_list:
raw_aff_layout_documents = [
LayoutDocument.for_blocks(list(raw_aff_or_address.iter_blocks()))
for raw_aff_or_address in raw_aff_address_list
]
labeled_layout_tokens_list = (
self.affiliation_address_model
.predict_labels_for_layout_documents(
raw_aff_layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list (aff): %r', labeled_layout_tokens_list)
aff_iterable = (
aff
for labeled_layout_tokens in labeled_layout_tokens_list
for aff in (
self.affiliation_address_model
.iter_semantic_content_for_labeled_layout_tokens(labeled_layout_tokens)
)
)
for aff in aff_iterable:
result_content.append(aff)
semantic_document.front.mixed_content = result_content
self._assign_content_ids(
semantic_document.front.iter_by_type(SemanticAffiliationAddress),
iter(iter_ids('aff'))
)
def _extract_raw_references_from_segmentation(
self,
semantic_document: SemanticDocument,
segmentation_label_result: LayoutDocumentLabelResult
):
references_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
LOGGER.debug('references_layout_document: %s', references_layout_document)
if not references_layout_document:
return
labeled_layout_tokens = self.reference_segmenter_model.predict_labels_for_layout_document(
references_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_content_iterable = (
self.reference_segmenter_model
.iter_semantic_content_for_labeled_layout_tokens(labeled_layout_tokens)
)
reference_list = SemanticReferenceList(list(semantic_content_iterable))
semantic_document.back_section.add_content(reference_list)
def _iter_parse_semantic_references(
self,
semantic_raw_references: List[SemanticRawReference]
) -> Iterable[Union[SemanticReference, SemanticInvalidReference]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_reference.merged_block])
for semantic_raw_reference in semantic_raw_references
]
labeled_layout_tokens_list = (
self.citation_model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_reference in zip(
labeled_layout_tokens_list, semantic_raw_references
):
semantic_content_iterable = (
self.citation_model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens,
semantic_raw_reference=semantic_raw_reference
)
)
ref: Optional[Union[SemanticReference, SemanticInvalidReference]] = None
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, (SemanticReference, SemanticInvalidReference)):
ref = semantic_content
if not ref:
raise AssertionError('no semantic reference extracted')
yield ref
def _extract_reference_fields_from_raw_references(
self,
semantic_document: SemanticDocument
):
reference_lists = list(semantic_document.back_section.iter_by_type(
SemanticReferenceList
))
semantic_raw_references = [
raw_reference
for reference_list in reference_lists
for raw_reference in reference_list.iter_by_type(SemanticRawReference)
]
semantic_references = list(self._iter_parse_semantic_references(
semantic_raw_references
))
LOGGER.debug('semantic_references: %r', semantic_references)
semantic_reference_by_semantic_raw_reference_id = {
id(semantic_raw_reference): semantic_reference
for semantic_raw_reference, semantic_reference in zip(
semantic_raw_references, semantic_references
)
}
LOGGER.debug(
'semantic_reference_by_semantic_raw_reference_id keys: %s',
semantic_reference_by_semantic_raw_reference_id.keys()
)
for reference_list in reference_lists:
updated_content: List[SemanticContentWrapper] = []
for semantic_content in reference_list:
if isinstance(semantic_content, SemanticRawReference):
semantic_reference = semantic_reference_by_semantic_raw_reference_id[
id(semantic_content)
]
updated_content.append(semantic_reference)
continue
updated_content.append(semantic_content)
reference_list.mixed_content = updated_content
def _iter_parse_semantic_name_lists(
self,
semantic_raw_name_lists: Sequence[T_SemanticRawNameList],
name_type: Type[T_SemanticName]
) -> Iterable[Tuple[T_SemanticRawNameList, List[SemanticContentWrapper]]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_name_list.merged_block])
for semantic_raw_name_list in semantic_raw_name_lists
]
labeled_layout_tokens_list = (
self.name_citation_model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_name_list in zip(
labeled_layout_tokens_list, semantic_raw_name_lists
):
semantic_content_iterable = (
self.name_citation_model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens,
name_type=name_type
)
)
yield semantic_raw_name_list, list(semantic_content_iterable)
def _extract_reference_name_lists_from_raw_references(
self,
semantic_document: SemanticDocument
):
reference_lists = list(semantic_document.back_section.iter_by_type(
SemanticReferenceList
))
ref_list = [
ref
for reference_list in reference_lists
for ref in reference_list.iter_by_type(SemanticReference)
]
if self.config.extract_citation_authors:
raw_authors = [
raw_author
for ref in ref_list
for raw_author in ref.iter_by_type(SemanticRawAuthors)
]
else:
raw_authors = []
if self.config.extract_citation_editors:
raw_editors = [
raw_author
for ref in ref_list
for raw_author in ref.iter_by_type(SemanticRawEditors)
]
else:
raw_editors = []
content_list_by_raw_author_id = {
id(raw_author): content_list
for raw_author, content_list in (
self._iter_parse_semantic_name_lists(raw_authors, name_type=SemanticAuthor)
)
}
content_list_by_raw_editor_id = {
id(raw_author): content_list
for raw_author, content_list in (
self._iter_parse_semantic_name_lists(raw_editors, name_type=SemanticEditor)
)
}
LOGGER.debug(
'content_list_by_raw_author_id keys: %s',
content_list_by_raw_author_id.keys()
)
LOGGER.debug(
'content_list_by_raw_editor_id keys: %s',
content_list_by_raw_editor_id.keys()
)
for reference_list in reference_lists:
for semantic_content in reference_list:
if isinstance(semantic_content, SemanticReference):
if self.config.extract_citation_authors:
semantic_content.flat_map_inplace_by_type(
SemanticRawAuthors,
lambda raw_author: content_list_by_raw_author_id[
id(raw_author)
]
)
if self.config.extract_citation_editors:
semantic_content.flat_map_inplace_by_type(
SemanticRawEditors,
lambda raw_editor: content_list_by_raw_editor_id[
id(raw_editor)
]
)
def _iter_parse_semantic_content_lists(
self,
semantic_raw_content_lists: Sequence[T_SemanticContentWrapper],
model: Model
) -> Iterable[Tuple[T_SemanticContentWrapper, List[SemanticContentWrapper]]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_name_list.merged_block])
for semantic_raw_name_list in semantic_raw_content_lists
]
labeled_layout_tokens_list = (
model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_name_list in zip(
labeled_layout_tokens_list, semantic_raw_content_lists
):
semantic_content_iterable = (
model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)
yield semantic_raw_name_list, list(semantic_content_iterable)
def _extract_semantic_content_from_raw_content(
self,
semantic_document: SemanticDocument,
semantic_type: Type[T_SemanticContentWrapper],
model: Model
):
parents = [
parent
for root in [
semantic_document.body_section,
semantic_document.back_section
]
for parent in root.iter_parent_by_semantic_type_recursively(
semantic_type
)
]
raw_content_lists = [
raw_content
for parent in parents
for raw_content in parent.iter_by_type(semantic_type)
]
content_list_by_raw_content_id = {
id(raw_content): content_list
for raw_content, content_list in (
self._iter_parse_semantic_content_lists(
raw_content_lists,
model
)
)
}
LOGGER.debug(
'content_list_by_raw_content_id keys: %s',
content_list_by_raw_content_id.keys()
)
for parent in parents:
parent.flat_map_inplace_by_type(
semantic_type,
lambda raw_content: content_list_by_raw_content_id[
id(raw_content)
]
)
def _extract_figure_fields_from_raw_figures(
self,
semantic_document: SemanticDocument
):
self._extract_semantic_content_from_raw_content(
semantic_document,
SemanticRawFigure,
self.figure_model
)
def _extract_table_fields_from_raw_tables(
self,
semantic_document: SemanticDocument
):
self._extract_semantic_content_from_raw_content(
semantic_document,
SemanticRawTable,
self.table_model
)
def _update_semantic_section_using_segmentation_result_and_fulltext_model(
self,
semantic_section: SemanticSection,
segmentation_label_result: LayoutDocumentLabelResult,
segmentation_tag: str,
section_type: str
):
layout_document = segmentation_label_result.get_filtered_document_by_label(
segmentation_tag
).remove_empty_blocks()
self._update_semantic_section_using_layout_document_and_fulltext_model(
semantic_section,
layout_document,
section_name=segmentation_tag,
section_type=section_type
)
def _update_semantic_section_using_layout_document_and_fulltext_model(
self,
semantic_section: SemanticSection,
layout_document: LayoutDocument,
section_name: str,
section_type: str
):
LOGGER.debug('layout_document (%r): %s', section_name, layout_document)
if not layout_document.pages:
return
labeled_layout_tokens = self.fulltext_model.predict_labels_for_layout_document(
layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens (%r): %r', section_name, labeled_layout_tokens)
entity_blocks = self.fulltext_model.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
)
self.fulltext_model.update_section_with_entity_blocks(
semantic_section,
entity_blocks,
section_type=section_type
)
def get_tei_document_for_layout_document(
self,
layout_document: LayoutDocument
) -> TeiDocument:
return get_tei_for_semantic_document(
self.get_semantic_document_for_layout_document(
layout_document
)
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/fulltext/processor.py
|
processor.py
|
import logging
import multiprocessing
from typing import (
Iterable,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Tuple,
Type,
Union
)
from sciencebeam_parser.models.data import AppFeaturesContext, DEFAULT_APP_FEATURES_CONTEXT
from sciencebeam_parser.models.model import LayoutDocumentLabelResult, Model
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticAffiliationAddress,
SemanticAuthor,
SemanticCitation,
SemanticContentWrapper,
SemanticDocument,
SemanticEditor,
SemanticFigure,
SemanticFigureCitation,
SemanticGraphic,
SemanticInvalidReference,
SemanticLabel,
SemanticMixedContentWrapper,
SemanticMixedNote,
SemanticRawAffiliationAddress,
SemanticRawAuthors,
SemanticRawEditors,
SemanticRawFigure,
SemanticRawReference,
SemanticRawReferenceText,
SemanticRawTable,
SemanticReference,
SemanticReferenceCitation,
SemanticReferenceList,
SemanticSection,
SemanticSectionTypes,
SemanticTable,
SemanticTableCitation,
T_SemanticContentWrapper,
T_SemanticName,
T_SemanticRawNameList
)
from sciencebeam_parser.document.tei_document import TeiDocument, get_tei_for_semantic_document
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.models.segmentation.model import SegmentationModel
from sciencebeam_parser.models.header.model import HeaderModel
from sciencebeam_parser.models.name.model import NameModel
from sciencebeam_parser.models.affiliation_address.model import AffiliationAddressModel
from sciencebeam_parser.models.fulltext.model import FullTextModel
from sciencebeam_parser.models.figure.model import FigureModel
from sciencebeam_parser.models.table.model import TableModel
from sciencebeam_parser.models.reference_segmenter.model import ReferenceSegmenterModel
from sciencebeam_parser.models.citation.model import CitationModel
from sciencebeam_parser.processors.ref_matching import (
ChainedContentIdMatcher,
ContentIdMatcher,
PartialContentIdMatcher,
SimpleContentIdMatcher
)
from sciencebeam_parser.processors.document_page_image import (
iter_pdf_document_page_images
)
from sciencebeam_parser.processors.graphic_matching import (
BoundingBoxDistanceGraphicMatcher,
ChainedGraphicMatcher,
GraphicMatcher,
GraphicRelatedBlockTextGraphicMatcher,
OpticalCharacterRecognitionGraphicMatcher
)
from sciencebeam_parser.processors.graphic_provider import (
DocumentGraphicProvider,
SimpleDocumentGraphicProvider,
get_graphic_matching_candidate_page_numbers_for_semantic_content_list,
get_layout_document_with_graphics_replaced_by_graphics,
get_layout_document_with_text_and_graphics_replaced_by_graphics,
get_page_numbers_with_mostly_bitmap_graphics,
get_page_numbers_with_uncommon_page_dimension
)
from sciencebeam_parser.processors.fulltext.config import (
FullTextProcessorConfig
)
LOGGER = logging.getLogger(__name__)
class FullTextProcessorDocumentContext(NamedTuple):
pdf_path: Optional[str] = None
temp_dir: Optional[str] = None
def get_cv_document_graphic_provider(
cv_model: ComputerVisionModel,
context: FullTextProcessorDocumentContext,
page_numbers: Optional[Sequence[int]],
cv_render_dpi: float
) -> DocumentGraphicProvider:
from sciencebeam_parser.processors.cv_graphic_provider import ( # noqa pylint: disable=import-outside-toplevel
ComputerVisionDocumentGraphicProvider
)
assert context.pdf_path
assert context.temp_dir
return ComputerVisionDocumentGraphicProvider(
cv_model,
iter_pdf_document_page_images(
pdf_path=context.pdf_path,
output_dir=context.temp_dir,
page_numbers=page_numbers,
dpi=cv_render_dpi,
thread_count=multiprocessing.cpu_count()
),
temp_dir=context.temp_dir
)
class FullTextProcessor:
def __init__(
self,
fulltext_models: FullTextModels,
config: Optional[FullTextProcessorConfig] = None,
app_features_context: AppFeaturesContext = DEFAULT_APP_FEATURES_CONTEXT
) -> None:
self.fulltext_models = fulltext_models
self.app_features_context = app_features_context
if not config:
config = FullTextProcessorConfig()
self.config = config
@property
def segmentation_model(self) -> SegmentationModel:
return self.fulltext_models.segmentation_model
@property
def header_model(self) -> HeaderModel:
return self.fulltext_models.header_model
@property
def affiliation_address_model(self) -> AffiliationAddressModel:
return self.fulltext_models.affiliation_address_model
@property
def name_header_model(self) -> NameModel:
return self.fulltext_models.name_header_model
@property
def name_citation_model(self) -> NameModel:
return self.fulltext_models.name_citation_model
@property
def fulltext_model(self) -> FullTextModel:
return self.fulltext_models.fulltext_model
@property
def figure_model(self) -> FigureModel:
return self.fulltext_models.figure_model
@property
def table_model(self) -> TableModel:
return self.fulltext_models.table_model
@property
def reference_segmenter_model(self) -> ReferenceSegmenterModel:
return self.fulltext_models.reference_segmenter_model
@property
def citation_model(self) -> CitationModel:
return self.fulltext_models.citation_model
def get_semantic_document_for_layout_document(
self,
layout_document: LayoutDocument,
context: Optional[FullTextProcessorDocumentContext] = None
) -> SemanticDocument:
if context is None:
context = FullTextProcessorDocumentContext()
layout_document = self._preprocess_layout_graphics(
layout_document,
context=context
)
segmentation_label_result = self.segmentation_model.get_label_layout_document_result(
layout_document,
app_features_context=self.app_features_context
)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
document = SemanticDocument()
if self.config.extract_front:
self._process_header_layout_document(
header_layout_document=header_layout_document,
semantic_document=document
)
if self.config.extract_body_sections:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.body_section,
segmentation_label_result,
'<body>',
SemanticSectionTypes.OTHER
)
if self.config.extract_acknowledgements:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.back_section,
segmentation_label_result,
'<acknowledgement>',
SemanticSectionTypes.ACKNOWLEDGEMENT
)
if self.config.extract_back_sections:
self._update_semantic_section_using_segmentation_result_and_fulltext_model(
document.back_section,
segmentation_label_result,
'<annex>',
SemanticSectionTypes.OTHER
)
if self.config.extract_references:
self._extract_raw_references_from_segmentation(
semantic_document=document,
segmentation_label_result=segmentation_label_result
)
if self.config.extract_citation_fields:
self._extract_reference_fields_from_raw_references(
semantic_document=document
)
if self.config.extract_citation_authors or self.config.extract_citation_editors:
self._extract_reference_name_lists_from_raw_references(
semantic_document=document
)
references = list(document.iter_by_type_recursively(SemanticReference))
ref_citations = list(document.iter_by_type_recursively(SemanticReferenceCitation))
self._assign_content_ids(references, iter(iter_ids('b')))
self._assign_target_content_ids(ref_citations, ChainedContentIdMatcher([
SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(references, SemanticLabel)
),
PartialContentIdMatcher(
self._get_semantic_content_text_by_content_id(
references, SemanticRawReferenceText
)
)
]))
if self.config.extract_figure_fields:
self._extract_figure_fields_from_raw_figures(semantic_document=document)
figures = list(document.iter_by_type_recursively(SemanticFigure))
figure_citations = list(document.iter_by_type_recursively(SemanticFigureCitation))
self._assign_content_ids(figures, iter(iter_ids('fig_')))
self._assign_target_content_ids(figure_citations, SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(figures, SemanticLabel)
))
if self.config.extract_table_fields:
self._extract_table_fields_from_raw_tables(semantic_document=document)
tables = list(document.iter_by_type_recursively(SemanticTable))
table_citations = list(document.iter_by_type_recursively(SemanticTableCitation))
self._assign_content_ids(tables, iter(iter_ids('tab_')))
self._assign_target_content_ids(table_citations, SimpleContentIdMatcher(
self._get_semantic_content_text_by_content_id(tables, SemanticLabel)
))
if self.config.extract_graphic_bounding_boxes:
self._process_graphics(
document=document,
layout_document=layout_document,
context=context
)
return document
def _process_header_layout_document(
self,
header_layout_document: LayoutDocument,
semantic_document: SemanticDocument
):
LOGGER.debug('header_layout_document: %s', header_layout_document)
if not header_layout_document.pages:
return
labeled_layout_tokens = self.header_model.predict_labels_for_layout_document(
header_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
entity_blocks = self.header_model.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
)
self.header_model.update_semantic_document_with_entity_blocks(
semantic_document, entity_blocks
)
if self.config.extract_authors:
self._process_raw_authors(semantic_document.front)
if self.config.extract_affiliations:
self._process_raw_affiliations(semantic_document)
def _preprocess_layout_graphics(
self,
layout_document: LayoutDocument,
context: FullTextProcessorDocumentContext
) -> LayoutDocument:
if not self.config.use_cv_model:
return layout_document
candidate_page_numbers = sorted(
set(get_page_numbers_with_uncommon_page_dimension(layout_document))
- set(get_page_numbers_with_mostly_bitmap_graphics(layout_document))
)
LOGGER.debug('candidate_page_numbers: %r', candidate_page_numbers)
if not candidate_page_numbers:
return layout_document
document_graphic_provider = self._get_document_graphic_provider(
context=context,
page_numbers=candidate_page_numbers
)
semantic_graphics = list(
document_graphic_provider.iter_semantic_graphic_for_layout_document(
layout_document,
extract_graphic_assets=self.config.extract_graphic_assets
)
)
if not semantic_graphics:
LOGGER.info('no semantic graphics found on pages %r', candidate_page_numbers)
return layout_document
if not self.config.replace_text_by_cv_graphic:
return get_layout_document_with_graphics_replaced_by_graphics(
layout_document,
semantic_graphics
)
return get_layout_document_with_text_and_graphics_replaced_by_graphics(
layout_document,
semantic_graphics
)
def _process_graphics(
self,
document: SemanticDocument,
layout_document: LayoutDocument,
context: FullTextProcessorDocumentContext
):
unmatched_graphics_container = SemanticMixedNote(note_type='unmatched_graphics')
candidate_semantic_content_list = list(
document.iter_by_types_recursively((SemanticFigure, SemanticTable,))
)
self._match_graphic_elements(
semantic_graphic_list=list(
self._get_document_graphic_provider(
context=context,
page_numbers=(
get_graphic_matching_candidate_page_numbers_for_semantic_content_list(
candidate_semantic_content_list,
layout_document=layout_document
)
)
).iter_semantic_graphic_for_layout_document(
layout_document,
extract_graphic_assets=self.config.extract_graphic_assets
)
),
candidate_semantic_content_list=candidate_semantic_content_list,
unmatched_graphics_container=unmatched_graphics_container
)
if not unmatched_graphics_container.is_empty():
LOGGER.debug('unmatched_graphics_container: %r', unmatched_graphics_container)
document.back_section.add_content(unmatched_graphics_container)
else:
LOGGER.debug('no unmatched graphics')
def _get_document_graphic_provider(
self,
context: FullTextProcessorDocumentContext,
page_numbers: Optional[Sequence[int]]
) -> DocumentGraphicProvider:
if self.config.use_cv_model:
assert self.fulltext_models.cv_model is not None
return get_cv_document_graphic_provider(
cv_model=self.fulltext_models.cv_model,
context=context,
page_numbers=page_numbers,
cv_render_dpi=self.config.cv_render_dpi
)
return SimpleDocumentGraphicProvider()
def _match_graphic_elements(
self,
semantic_graphic_list: Sequence[SemanticGraphic],
candidate_semantic_content_list: Sequence[SemanticContentWrapper],
unmatched_graphics_container: SemanticMixedContentWrapper
):
_graphic_matchers: List[GraphicMatcher] = [
BoundingBoxDistanceGraphicMatcher(
max_distance=self.config.max_graphic_distance
),
GraphicRelatedBlockTextGraphicMatcher()
]
if self.config.use_ocr_model:
assert self.fulltext_models.ocr_model
_graphic_matchers.append(
OpticalCharacterRecognitionGraphicMatcher(
ocr_model=self.fulltext_models.ocr_model
)
)
graphic_matcher = ChainedGraphicMatcher(_graphic_matchers)
graphic_match_result = graphic_matcher.get_graphic_matches(
semantic_graphic_list=semantic_graphic_list,
candidate_semantic_content_list=candidate_semantic_content_list
)
for graphic_match in graphic_match_result.graphic_matches:
if isinstance(graphic_match.candidate_semantic_content, SemanticMixedContentWrapper):
graphic_match.candidate_semantic_content.add_content(
graphic_match.semantic_graphic
)
LOGGER.info('unmatched_graphics: %r', graphic_match_result.unmatched_graphics)
for unmatched_graphic in graphic_match_result.unmatched_graphics:
unmatched_graphics_container.add_content(unmatched_graphic)
def _assign_content_ids(
self,
semantic_content_iterable: Iterable[SemanticMixedContentWrapper],
content_id_iterator: Iterator[str]
):
for semantic_content in semantic_content_iterable:
semantic_content.content_id = next(content_id_iterator)
def _get_semantic_content_text_by_content_id(
self,
semantic_content_iterable: Iterable[SemanticMixedContentWrapper],
type_: Type[SemanticContentWrapper]
) -> Mapping[str, str]:
d = {}
for semantic_content in semantic_content_iterable:
if not semantic_content.content_id:
continue
text = semantic_content.get_text_by_type(type_)
if not text:
continue
d[semantic_content.content_id] = text
return d
def _assign_target_content_ids(
self,
semantic_content_iterable: Iterable[SemanticCitation],
content_id_matcher: ContentIdMatcher
):
for citation in semantic_content_iterable:
content_id = content_id_matcher.get_id_by_text(citation.get_text())
if content_id:
citation.target_content_id = content_id
def _process_raw_authors(self, semantic_parent: SemanticMixedContentWrapper):
result_content: List[SemanticContentWrapper] = []
raw_authors: List[SemanticRawAuthors] = []
for semantic_content in semantic_parent:
if isinstance(semantic_content, SemanticRawAuthors):
raw_authors.append(semantic_content)
continue
result_content.append(semantic_content)
if raw_authors:
if self.config.merge_raw_authors:
raw_authors_layout_documents = [
LayoutDocument.for_blocks([
block
for raw_author in raw_authors
for block in raw_author.iter_blocks()
])
]
else:
raw_authors_layout_documents = [
LayoutDocument.for_blocks(list(raw_author.iter_blocks()))
for raw_author in raw_authors
]
labeled_layout_tokens_list = self.name_header_model.predict_labels_for_layout_documents(
raw_authors_layout_documents,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens_list (author): %r', labeled_layout_tokens_list)
authors_iterable = (
author
for labeled_layout_tokens in labeled_layout_tokens_list
for author in (
self.name_header_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)
)
for author in authors_iterable:
result_content.append(author)
semantic_parent.mixed_content = result_content
def _process_raw_affiliations(self, semantic_document: SemanticDocument):
result_content: List[SemanticContentWrapper] = []
raw_aff_address_list: List[SemanticRawAffiliationAddress] = []
for semantic_content in semantic_document.front:
if isinstance(semantic_content, SemanticRawAffiliationAddress):
raw_aff_address_list.append(semantic_content)
continue
result_content.append(semantic_content)
if raw_aff_address_list:
raw_aff_layout_documents = [
LayoutDocument.for_blocks(list(raw_aff_or_address.iter_blocks()))
for raw_aff_or_address in raw_aff_address_list
]
labeled_layout_tokens_list = (
self.affiliation_address_model
.predict_labels_for_layout_documents(
raw_aff_layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list (aff): %r', labeled_layout_tokens_list)
aff_iterable = (
aff
for labeled_layout_tokens in labeled_layout_tokens_list
for aff in (
self.affiliation_address_model
.iter_semantic_content_for_labeled_layout_tokens(labeled_layout_tokens)
)
)
for aff in aff_iterable:
result_content.append(aff)
semantic_document.front.mixed_content = result_content
self._assign_content_ids(
semantic_document.front.iter_by_type(SemanticAffiliationAddress),
iter(iter_ids('aff'))
)
def _extract_raw_references_from_segmentation(
self,
semantic_document: SemanticDocument,
segmentation_label_result: LayoutDocumentLabelResult
):
references_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
LOGGER.debug('references_layout_document: %s', references_layout_document)
if not references_layout_document:
return
labeled_layout_tokens = self.reference_segmenter_model.predict_labels_for_layout_document(
references_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_content_iterable = (
self.reference_segmenter_model
.iter_semantic_content_for_labeled_layout_tokens(labeled_layout_tokens)
)
reference_list = SemanticReferenceList(list(semantic_content_iterable))
semantic_document.back_section.add_content(reference_list)
def _iter_parse_semantic_references(
self,
semantic_raw_references: List[SemanticRawReference]
) -> Iterable[Union[SemanticReference, SemanticInvalidReference]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_reference.merged_block])
for semantic_raw_reference in semantic_raw_references
]
labeled_layout_tokens_list = (
self.citation_model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_reference in zip(
labeled_layout_tokens_list, semantic_raw_references
):
semantic_content_iterable = (
self.citation_model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens,
semantic_raw_reference=semantic_raw_reference
)
)
ref: Optional[Union[SemanticReference, SemanticInvalidReference]] = None
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, (SemanticReference, SemanticInvalidReference)):
ref = semantic_content
if not ref:
raise AssertionError('no semantic reference extracted')
yield ref
def _extract_reference_fields_from_raw_references(
self,
semantic_document: SemanticDocument
):
reference_lists = list(semantic_document.back_section.iter_by_type(
SemanticReferenceList
))
semantic_raw_references = [
raw_reference
for reference_list in reference_lists
for raw_reference in reference_list.iter_by_type(SemanticRawReference)
]
semantic_references = list(self._iter_parse_semantic_references(
semantic_raw_references
))
LOGGER.debug('semantic_references: %r', semantic_references)
semantic_reference_by_semantic_raw_reference_id = {
id(semantic_raw_reference): semantic_reference
for semantic_raw_reference, semantic_reference in zip(
semantic_raw_references, semantic_references
)
}
LOGGER.debug(
'semantic_reference_by_semantic_raw_reference_id keys: %s',
semantic_reference_by_semantic_raw_reference_id.keys()
)
for reference_list in reference_lists:
updated_content: List[SemanticContentWrapper] = []
for semantic_content in reference_list:
if isinstance(semantic_content, SemanticRawReference):
semantic_reference = semantic_reference_by_semantic_raw_reference_id[
id(semantic_content)
]
updated_content.append(semantic_reference)
continue
updated_content.append(semantic_content)
reference_list.mixed_content = updated_content
def _iter_parse_semantic_name_lists(
self,
semantic_raw_name_lists: Sequence[T_SemanticRawNameList],
name_type: Type[T_SemanticName]
) -> Iterable[Tuple[T_SemanticRawNameList, List[SemanticContentWrapper]]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_name_list.merged_block])
for semantic_raw_name_list in semantic_raw_name_lists
]
labeled_layout_tokens_list = (
self.name_citation_model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_name_list in zip(
labeled_layout_tokens_list, semantic_raw_name_lists
):
semantic_content_iterable = (
self.name_citation_model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens,
name_type=name_type
)
)
yield semantic_raw_name_list, list(semantic_content_iterable)
def _extract_reference_name_lists_from_raw_references(
self,
semantic_document: SemanticDocument
):
reference_lists = list(semantic_document.back_section.iter_by_type(
SemanticReferenceList
))
ref_list = [
ref
for reference_list in reference_lists
for ref in reference_list.iter_by_type(SemanticReference)
]
if self.config.extract_citation_authors:
raw_authors = [
raw_author
for ref in ref_list
for raw_author in ref.iter_by_type(SemanticRawAuthors)
]
else:
raw_authors = []
if self.config.extract_citation_editors:
raw_editors = [
raw_author
for ref in ref_list
for raw_author in ref.iter_by_type(SemanticRawEditors)
]
else:
raw_editors = []
content_list_by_raw_author_id = {
id(raw_author): content_list
for raw_author, content_list in (
self._iter_parse_semantic_name_lists(raw_authors, name_type=SemanticAuthor)
)
}
content_list_by_raw_editor_id = {
id(raw_author): content_list
for raw_author, content_list in (
self._iter_parse_semantic_name_lists(raw_editors, name_type=SemanticEditor)
)
}
LOGGER.debug(
'content_list_by_raw_author_id keys: %s',
content_list_by_raw_author_id.keys()
)
LOGGER.debug(
'content_list_by_raw_editor_id keys: %s',
content_list_by_raw_editor_id.keys()
)
for reference_list in reference_lists:
for semantic_content in reference_list:
if isinstance(semantic_content, SemanticReference):
if self.config.extract_citation_authors:
semantic_content.flat_map_inplace_by_type(
SemanticRawAuthors,
lambda raw_author: content_list_by_raw_author_id[
id(raw_author)
]
)
if self.config.extract_citation_editors:
semantic_content.flat_map_inplace_by_type(
SemanticRawEditors,
lambda raw_editor: content_list_by_raw_editor_id[
id(raw_editor)
]
)
def _iter_parse_semantic_content_lists(
self,
semantic_raw_content_lists: Sequence[T_SemanticContentWrapper],
model: Model
) -> Iterable[Tuple[T_SemanticContentWrapper, List[SemanticContentWrapper]]]:
layout_documents = [
LayoutDocument.for_blocks([semantic_raw_name_list.merged_block])
for semantic_raw_name_list in semantic_raw_content_lists
]
labeled_layout_tokens_list = (
model
.predict_labels_for_layout_documents(
layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
for labeled_layout_tokens, semantic_raw_name_list in zip(
labeled_layout_tokens_list, semantic_raw_content_lists
):
semantic_content_iterable = (
model
.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)
yield semantic_raw_name_list, list(semantic_content_iterable)
def _extract_semantic_content_from_raw_content(
self,
semantic_document: SemanticDocument,
semantic_type: Type[T_SemanticContentWrapper],
model: Model
):
parents = [
parent
for root in [
semantic_document.body_section,
semantic_document.back_section
]
for parent in root.iter_parent_by_semantic_type_recursively(
semantic_type
)
]
raw_content_lists = [
raw_content
for parent in parents
for raw_content in parent.iter_by_type(semantic_type)
]
content_list_by_raw_content_id = {
id(raw_content): content_list
for raw_content, content_list in (
self._iter_parse_semantic_content_lists(
raw_content_lists,
model
)
)
}
LOGGER.debug(
'content_list_by_raw_content_id keys: %s',
content_list_by_raw_content_id.keys()
)
for parent in parents:
parent.flat_map_inplace_by_type(
semantic_type,
lambda raw_content: content_list_by_raw_content_id[
id(raw_content)
]
)
def _extract_figure_fields_from_raw_figures(
self,
semantic_document: SemanticDocument
):
self._extract_semantic_content_from_raw_content(
semantic_document,
SemanticRawFigure,
self.figure_model
)
def _extract_table_fields_from_raw_tables(
self,
semantic_document: SemanticDocument
):
self._extract_semantic_content_from_raw_content(
semantic_document,
SemanticRawTable,
self.table_model
)
def _update_semantic_section_using_segmentation_result_and_fulltext_model(
self,
semantic_section: SemanticSection,
segmentation_label_result: LayoutDocumentLabelResult,
segmentation_tag: str,
section_type: str
):
layout_document = segmentation_label_result.get_filtered_document_by_label(
segmentation_tag
).remove_empty_blocks()
self._update_semantic_section_using_layout_document_and_fulltext_model(
semantic_section,
layout_document,
section_name=segmentation_tag,
section_type=section_type
)
def _update_semantic_section_using_layout_document_and_fulltext_model(
self,
semantic_section: SemanticSection,
layout_document: LayoutDocument,
section_name: str,
section_type: str
):
LOGGER.debug('layout_document (%r): %s', section_name, layout_document)
if not layout_document.pages:
return
labeled_layout_tokens = self.fulltext_model.predict_labels_for_layout_document(
layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens (%r): %r', section_name, labeled_layout_tokens)
entity_blocks = self.fulltext_model.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
)
self.fulltext_model.update_section_with_entity_blocks(
semantic_section,
entity_blocks,
section_type=section_type
)
def get_tei_document_for_layout_document(
self,
layout_document: LayoutDocument
) -> TeiDocument:
return get_tei_for_semantic_document(
self.get_semantic_document_for_layout_document(
layout_document
)
)
| 0.82029 | 0.169956 |
import dataclasses
import logging
from dataclasses import dataclass
from typing import (
Any,
Iterable,
Optional,
Type,
TypeVar
)
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.models.model import Model
from sciencebeam_parser.models.model_impl_factory import get_model_impl_factory_for_config
from sciencebeam_parser.cv_models.cv_model_factory import get_lazy_cv_model_for_config
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.ocr_models.ocr_model_factory import get_lazy_ocr_model_for_config
from sciencebeam_parser.ocr_models.ocr_model import OpticalCharacterRecognitionModel
from sciencebeam_parser.models.segmentation.model import SegmentationModel
from sciencebeam_parser.models.header.model import HeaderModel
from sciencebeam_parser.models.name.model import NameModel
from sciencebeam_parser.models.affiliation_address.model import AffiliationAddressModel
from sciencebeam_parser.models.fulltext.model import FullTextModel
from sciencebeam_parser.models.figure.model import FigureModel
from sciencebeam_parser.models.table.model import TableModel
from sciencebeam_parser.models.reference_segmenter.model import ReferenceSegmenterModel
from sciencebeam_parser.models.citation.model import CitationModel
from sciencebeam_parser.processors.fulltext.config import FullTextProcessorConfig
LOGGER = logging.getLogger(__name__)
@dataclass
class FullTextModels:
segmentation_model: SegmentationModel
header_model: HeaderModel
name_header_model: NameModel
name_citation_model: NameModel
affiliation_address_model: AffiliationAddressModel
fulltext_model: FullTextModel
figure_model: FigureModel
table_model: TableModel
reference_segmenter_model: ReferenceSegmenterModel
citation_model: CitationModel
cv_model: Optional[ComputerVisionModel] = None
ocr_model: Optional[OpticalCharacterRecognitionModel] = None
def _iter_models(self) -> Iterable[Any]:
for field in dataclasses.fields(self):
model = getattr(self, field.name)
if model is not None:
yield model
def preload(self):
LOGGER.info('preloading models...')
models = list(self._iter_models())
for model_index, model in enumerate(models):
LOGGER.info('preloading: %d/%d: %r', 1 + model_index, len(models), model)
try:
model.preload()
except AttributeError:
pass
LOGGER.info('preloaded models')
def get_sequence_model_by_name(self, model_name: str) -> Model:
model = getattr(self, model_name + '_model')
assert isinstance(model, Model)
return model
T_Model = TypeVar('T_Model', bound=Model)
def load_model(
app_config: AppConfig,
app_context: AppContext,
model_name: str,
model_class: Type[T_Model]
) -> T_Model:
models_config = app_config['models']
model_config = models_config[model_name]
model = model_class(
get_model_impl_factory_for_config(
model_config,
app_context=app_context
),
model_config=model_config
)
return model
def get_cv_model_for_app_config(
app_config: AppConfig,
enabled: bool = True
) -> Optional[ComputerVisionModel]:
cv_model_config = app_config.get('cv_models', {}).get('default')
if enabled and cv_model_config:
return get_lazy_cv_model_for_config(cv_model_config)
return None
def get_ocr_model_for_app_config(
app_config: AppConfig,
enabled: bool = True
) -> Optional[OpticalCharacterRecognitionModel]:
ocr_model_config = app_config.get('ocr_models', {}).get('default')
if enabled and ocr_model_config:
return get_lazy_ocr_model_for_config(ocr_model_config)
return None
def load_models(
app_config: AppConfig,
app_context: AppContext,
fulltext_processor_config: FullTextProcessorConfig
) -> FullTextModels:
segmentation_model = load_model(
app_config, app_context, 'segmentation', SegmentationModel
)
header_model = load_model(
app_config, app_context, 'header', HeaderModel
)
name_header_model = load_model(
app_config, app_context, 'name_header', NameModel
)
name_citation_model = load_model(
app_config, app_context, 'name_citation', NameModel
)
affiliation_address_model = load_model(
app_config, app_context, 'affiliation_address', AffiliationAddressModel
)
fulltext_model = load_model(
app_config, app_context, 'fulltext', FullTextModel
)
figure_model = load_model(
app_config, app_context, 'figure', FigureModel
)
table_model = load_model(
app_config, app_context, 'table', TableModel
)
reference_segmenter_model = load_model(
app_config, app_context, 'reference_segmenter', ReferenceSegmenterModel
)
citation_model = load_model(
app_config, app_context, 'citation', CitationModel
)
cv_model = get_cv_model_for_app_config(
app_config,
enabled=fulltext_processor_config.use_cv_model
)
ocr_model = get_ocr_model_for_app_config(
app_config,
enabled=fulltext_processor_config.use_ocr_model
)
return FullTextModels(
segmentation_model=segmentation_model,
header_model=header_model,
name_header_model=name_header_model,
name_citation_model=name_citation_model,
affiliation_address_model=affiliation_address_model,
fulltext_model=fulltext_model,
figure_model=figure_model,
table_model=table_model,
reference_segmenter_model=reference_segmenter_model,
citation_model=citation_model,
cv_model=cv_model,
ocr_model=ocr_model
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/processors/fulltext/models.py
|
models.py
|
import dataclasses
import logging
from dataclasses import dataclass
from typing import (
Any,
Iterable,
Optional,
Type,
TypeVar
)
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.models.model import Model
from sciencebeam_parser.models.model_impl_factory import get_model_impl_factory_for_config
from sciencebeam_parser.cv_models.cv_model_factory import get_lazy_cv_model_for_config
from sciencebeam_parser.cv_models.cv_model import ComputerVisionModel
from sciencebeam_parser.ocr_models.ocr_model_factory import get_lazy_ocr_model_for_config
from sciencebeam_parser.ocr_models.ocr_model import OpticalCharacterRecognitionModel
from sciencebeam_parser.models.segmentation.model import SegmentationModel
from sciencebeam_parser.models.header.model import HeaderModel
from sciencebeam_parser.models.name.model import NameModel
from sciencebeam_parser.models.affiliation_address.model import AffiliationAddressModel
from sciencebeam_parser.models.fulltext.model import FullTextModel
from sciencebeam_parser.models.figure.model import FigureModel
from sciencebeam_parser.models.table.model import TableModel
from sciencebeam_parser.models.reference_segmenter.model import ReferenceSegmenterModel
from sciencebeam_parser.models.citation.model import CitationModel
from sciencebeam_parser.processors.fulltext.config import FullTextProcessorConfig
LOGGER = logging.getLogger(__name__)
@dataclass
class FullTextModels:
segmentation_model: SegmentationModel
header_model: HeaderModel
name_header_model: NameModel
name_citation_model: NameModel
affiliation_address_model: AffiliationAddressModel
fulltext_model: FullTextModel
figure_model: FigureModel
table_model: TableModel
reference_segmenter_model: ReferenceSegmenterModel
citation_model: CitationModel
cv_model: Optional[ComputerVisionModel] = None
ocr_model: Optional[OpticalCharacterRecognitionModel] = None
def _iter_models(self) -> Iterable[Any]:
for field in dataclasses.fields(self):
model = getattr(self, field.name)
if model is not None:
yield model
def preload(self):
LOGGER.info('preloading models...')
models = list(self._iter_models())
for model_index, model in enumerate(models):
LOGGER.info('preloading: %d/%d: %r', 1 + model_index, len(models), model)
try:
model.preload()
except AttributeError:
pass
LOGGER.info('preloaded models')
def get_sequence_model_by_name(self, model_name: str) -> Model:
model = getattr(self, model_name + '_model')
assert isinstance(model, Model)
return model
T_Model = TypeVar('T_Model', bound=Model)
def load_model(
app_config: AppConfig,
app_context: AppContext,
model_name: str,
model_class: Type[T_Model]
) -> T_Model:
models_config = app_config['models']
model_config = models_config[model_name]
model = model_class(
get_model_impl_factory_for_config(
model_config,
app_context=app_context
),
model_config=model_config
)
return model
def get_cv_model_for_app_config(
app_config: AppConfig,
enabled: bool = True
) -> Optional[ComputerVisionModel]:
cv_model_config = app_config.get('cv_models', {}).get('default')
if enabled and cv_model_config:
return get_lazy_cv_model_for_config(cv_model_config)
return None
def get_ocr_model_for_app_config(
app_config: AppConfig,
enabled: bool = True
) -> Optional[OpticalCharacterRecognitionModel]:
ocr_model_config = app_config.get('ocr_models', {}).get('default')
if enabled and ocr_model_config:
return get_lazy_ocr_model_for_config(ocr_model_config)
return None
def load_models(
app_config: AppConfig,
app_context: AppContext,
fulltext_processor_config: FullTextProcessorConfig
) -> FullTextModels:
segmentation_model = load_model(
app_config, app_context, 'segmentation', SegmentationModel
)
header_model = load_model(
app_config, app_context, 'header', HeaderModel
)
name_header_model = load_model(
app_config, app_context, 'name_header', NameModel
)
name_citation_model = load_model(
app_config, app_context, 'name_citation', NameModel
)
affiliation_address_model = load_model(
app_config, app_context, 'affiliation_address', AffiliationAddressModel
)
fulltext_model = load_model(
app_config, app_context, 'fulltext', FullTextModel
)
figure_model = load_model(
app_config, app_context, 'figure', FigureModel
)
table_model = load_model(
app_config, app_context, 'table', TableModel
)
reference_segmenter_model = load_model(
app_config, app_context, 'reference_segmenter', ReferenceSegmenterModel
)
citation_model = load_model(
app_config, app_context, 'citation', CitationModel
)
cv_model = get_cv_model_for_app_config(
app_config,
enabled=fulltext_processor_config.use_cv_model
)
ocr_model = get_ocr_model_for_app_config(
app_config,
enabled=fulltext_processor_config.use_ocr_model
)
return FullTextModels(
segmentation_model=segmentation_model,
header_model=header_model,
name_header_model=name_header_model,
name_citation_model=name_citation_model,
affiliation_address_model=affiliation_address_model,
fulltext_model=fulltext_model,
figure_model=figure_model,
table_model=table_model,
reference_segmenter_model=reference_segmenter_model,
citation_model=citation_model,
cv_model=cv_model,
ocr_model=ocr_model
)
| 0.77373 | 0.138841 |
import logging
import os
from contextlib import ExitStack
from dataclasses import dataclass
from pathlib import Path
from tempfile import TemporaryDirectory
from time import monotonic
from typing import List, Optional, Set
from zipfile import ZipFile
from lxml import etree
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.config.config import AppConfig, get_download_dir
from sciencebeam_parser.external.pdfalto.wrapper import PdfAltoWrapper
from sciencebeam_parser.external.pdfalto.parser import parse_alto_root
from sciencebeam_parser.external.wapiti.wrapper import LazyWapitiBinaryWrapper
from sciencebeam_parser.lookup.loader import load_lookup_from_config
from sciencebeam_parser.models.data import AppFeaturesContext
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.document.semantic_document import (
SemanticDocument,
SemanticGraphic
)
from sciencebeam_parser.document.tei_document import get_tei_for_semantic_document
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.resources.xslt import TEI_TO_JATS_XSLT_FILE
from sciencebeam_parser.transformers.doc_converter_wrapper import DocConverterWrapper
from sciencebeam_parser.transformers.xslt import XsltTransformerWrapper
from sciencebeam_parser.utils.lazy import LazyLoaded
from sciencebeam_parser.utils.media_types import (
MediaTypes
)
from sciencebeam_parser.utils.text import normalize_text
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.processors.fulltext.api import (
FullTextProcessor,
FullTextProcessorConfig,
FullTextProcessorDocumentContext,
load_models
)
LOGGER = logging.getLogger(__name__)
TEMP_ALTO_XML_FILENAME = 'temp.lxml'
DOC_TO_PDF_SUPPORTED_MEDIA_TYPES = {
MediaTypes.DOCX,
MediaTypes.DOTX,
MediaTypes.DOC,
MediaTypes.RTF
}
JATS_MEDIA_TYPES = {MediaTypes.JATS_XML, MediaTypes.JATS_ZIP}
ASSET_ZIP_MEDIA_TYPES = {MediaTypes.TEI_ZIP, MediaTypes.JATS_ZIP}
def normalize_and_tokenize_text(text: str) -> List[str]:
return get_tokenized_tokens(
normalize_text(text),
keep_whitespace=True
)
def normalize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return (
layout_document
.retokenize(tokenize_fn=normalize_and_tokenize_text)
.remove_empty_blocks(**kwargs)
)
def load_app_features_context(
config: AppConfig,
download_manager: DownloadManager
):
return AppFeaturesContext(
country_lookup=load_lookup_from_config(
config.get('lookup', {}).get('country'),
download_manager=download_manager
),
first_name_lookup=load_lookup_from_config(
config.get('lookup', {}).get('first_name'),
download_manager=download_manager
),
last_name_lookup=load_lookup_from_config(
config.get('lookup', {}).get('last_name'),
download_manager=download_manager
)
)
def create_asset_zip_for_semantic_document(
zip_filename: str,
semantic_document: SemanticDocument,
relative_xml_filename: str,
local_xml_filename: str
):
semantic_graphic_list = list(semantic_document.iter_by_type_recursively(
SemanticGraphic
))
LOGGER.debug('semantic_graphic_list: %r', semantic_graphic_list)
with ZipFile(zip_filename, 'w') as zip_file:
zip_file.write(
local_xml_filename,
relative_xml_filename
)
for semantic_graphic in semantic_graphic_list:
assert semantic_graphic.relative_path, \
"graphic relative_path missing, ensure extract_graphic_assets was enabled"
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
assert layout_graphic.local_file_path
zip_file.write(
layout_graphic.local_file_path,
semantic_graphic.relative_path
)
LOGGER.debug('response_content (bytes): %d', Path(zip_filename).stat().st_size)
def get_xml_tree(xml_root: etree.ElementBase) -> etree._ElementTree:
if isinstance(xml_root, etree._ElementTree): # pylint: disable=protected-access
# Note: _XSLTResultTree is extending _ElementTree
return xml_root
return etree.ElementTree(xml_root)
def serialize_xml_to_file(
xml_root: etree.ElementBase,
filename: str
):
get_xml_tree(xml_root).write(
filename,
encoding='utf-8',
pretty_print=False
)
@dataclass
class DocumentRequestParameters:
first_page: Optional[int] = None
last_page: Optional[int] = None
class ScienceBeamParserError(RuntimeError):
pass
class BadRequestScienceBeamParserError(ScienceBeamParserError):
pass
class UnsupportedRequestMediaTypeScienceBeamParserError(BadRequestScienceBeamParserError):
pass
class UnsupportedResponseMediaTypeScienceBeamParserError(BadRequestScienceBeamParserError):
pass
class ScienceBeamBaseParser:
def __init__(self, config: AppConfig):
self.config = config
self.download_manager = DownloadManager(
download_dir=get_download_dir(config)
)
self.pdfalto_wrapper = PdfAltoWrapper(
self.download_manager.download_if_url(config['pdfalto']['path'])
)
self.pdfalto_wrapper.ensure_executable()
self.app_context = AppContext(
app_config=config,
download_manager=self.download_manager,
lazy_wapiti_binary_wrapper=LazyWapitiBinaryWrapper(
install_url=config.get('wapiti', {}).get('install_source'),
download_manager=self.download_manager
)
)
self.fulltext_processor_config = FullTextProcessorConfig.from_app_config(app_config=config)
self.fulltext_models = load_models(
config,
app_context=self.app_context,
fulltext_processor_config=self.fulltext_processor_config
)
if config.get('preload_on_startup'):
self.fulltext_models.preload()
self.app_features_context = load_app_features_context(
config,
download_manager=self.download_manager
)
tei_to_jats_config = config.get('xslt', {}).get('tei_to_jats', {})
self.tei_to_jats_xslt_transformer = XsltTransformerWrapper.from_template_file(
TEI_TO_JATS_XSLT_FILE,
xslt_template_parameters=tei_to_jats_config.get('parameters', {})
)
self.doc_to_pdf_enabled = config.get('doc_to_pdf', {}).get('enabled', True)
self.doc_to_pdf_convert_parameters = config.get('doc_to_pdf', {}).get('convert', {})
self.doc_converter_wrapper = DocConverterWrapper(
**config.get('doc_to_pdf', {}).get('listener', {})
)
class ScienceBeamParserBaseSession:
def __init__(
self,
parser: 'ScienceBeamParser',
temp_dir: Optional[str] = None,
fulltext_processor_config: Optional[FullTextProcessorConfig] = None,
document_request_parameters: Optional[DocumentRequestParameters] = None
):
self.parser = parser
self.exit_stack = ExitStack()
self._temp_dir: Optional[str] = temp_dir
if fulltext_processor_config is None:
fulltext_processor_config = parser.fulltext_processor_config
self.fulltext_processor_config = fulltext_processor_config
if document_request_parameters is None:
document_request_parameters = DocumentRequestParameters()
self.document_request_parameters = document_request_parameters
def __enter__(self) -> 'ScienceBeamParserBaseSession':
return self
def close(self):
self.exit_stack.close()
def __exit__(self, exc, value, tb):
self.close()
@property
def temp_dir(self) -> str:
if not self._temp_dir:
temp_dir_context = TemporaryDirectory( # pylint: disable=consider-using-with
suffix='-sb-parser'
)
self.exit_stack.push(temp_dir_context)
self._temp_dir = temp_dir_context.__enter__()
return self._temp_dir
@property
def temp_path(self) -> Path:
return Path(self.temp_dir)
class _ScienceBeamParserSessionDerivative:
def __init__(
self,
session: 'ScienceBeamParserBaseSession'
):
self.session = session
@property
def parser(self) -> ScienceBeamBaseParser:
return self.session.parser
@property
def temp_dir(self) -> str:
return self.session.temp_dir
@property
def temp_path(self) -> Path:
return self.session.temp_path
class ScienceBeamParserSessionParsedSemanticDocument(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
semantic_document: SemanticDocument
):
super().__init__(session=session)
self.semantic_document = semantic_document
@property
def tei_to_jats_xslt_transformer(self) -> XsltTransformerWrapper:
return self.parser.tei_to_jats_xslt_transformer
def _get_tei_to_jats_xml_root(self, xml_root: etree.ElementBase) -> etree.ElementBase:
start = monotonic()
xml_root = self.tei_to_jats_xslt_transformer(xml_root)
end = monotonic()
LOGGER.info('tei to jats, took=%.3fs', end - start)
return xml_root
def _serialize_xml_to_file(
self,
xml_root: etree.ElementBase,
filename: str
) -> str:
start = monotonic()
serialize_xml_to_file(xml_root, filename=filename)
end = monotonic()
LOGGER.info('serializing xml, took=%.3fs', end - start)
return filename
def get_supported_response_media_type(self) -> Set[str]:
return {
MediaTypes.TEI_XML,
MediaTypes.TEI_ZIP,
MediaTypes.JATS_XML,
MediaTypes.JATS_ZIP
}
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type not in self.get_supported_response_media_type():
raise UnsupportedResponseMediaTypeScienceBeamParserError()
tei_document = get_tei_for_semantic_document(
self.semantic_document
)
xml_root = tei_document.root
relative_xml_filename = 'tei.xml'
if response_media_type in JATS_MEDIA_TYPES:
xml_root = self._get_tei_to_jats_xml_root(xml_root)
relative_xml_filename = 'jats.xml'
local_xml_filename = os.path.join(self.temp_dir, relative_xml_filename)
self._serialize_xml_to_file(xml_root, local_xml_filename)
LOGGER.debug('local_xml_filename: %r', local_xml_filename)
if response_media_type in ASSET_ZIP_MEDIA_TYPES:
zip_filename = os.path.join(self.temp_dir, 'results.zip')
create_asset_zip_for_semantic_document(
zip_filename,
semantic_document=self.semantic_document,
local_xml_filename=local_xml_filename,
relative_xml_filename=relative_xml_filename
)
return zip_filename
return local_xml_filename
class ScienceBeamParserSessionParsedLayoutDocument(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
layout_document: LayoutDocument,
pdf_path: str
):
super().__init__(session=session)
self.layout_document = layout_document
self.pdf_path = pdf_path
@property
def fulltext_models(self) -> FullTextModels:
return self.parser.fulltext_models
@property
def app_features_context(self) -> AppFeaturesContext:
return self.parser.app_features_context
def _get_semantic_document(
self,
fulltext_processor: FullTextProcessor
) -> SemanticDocument:
context = FullTextProcessorDocumentContext(
pdf_path=self.pdf_path,
temp_dir=self.temp_dir
)
semantic_document = (
fulltext_processor
.get_semantic_document_for_layout_document(
self.layout_document,
context=context
)
)
return semantic_document
def get_parsed_semantic_document(
self,
fulltext_processor_config: Optional[FullTextProcessorConfig] = None
) -> ScienceBeamParserSessionParsedSemanticDocument:
if fulltext_processor_config is None:
fulltext_processor_config = self.session.fulltext_processor_config
fulltext_processor = FullTextProcessor(
self.fulltext_models,
app_features_context=self.app_features_context,
config=fulltext_processor_config
)
return ScienceBeamParserSessionParsedSemanticDocument(
self.session,
self._get_semantic_document(fulltext_processor)
)
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type == MediaTypes.PDF:
return self.pdf_path
fulltext_processor_config = self.session.fulltext_processor_config
if response_media_type in ASSET_ZIP_MEDIA_TYPES:
fulltext_processor_config = (
fulltext_processor_config
._replace(
extract_graphic_assets=True,
extract_graphic_bounding_boxes=True
)
)
assert fulltext_processor_config.extract_graphic_assets, \
"extract_graphic_assets required for asset zip"
return (
self.get_parsed_semantic_document(
fulltext_processor_config
).get_local_file_for_response_media_type(
response_media_type
)
)
class ScienceBeamParserSessionSource(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
source_path: str,
source_media_type: str
):
super().__init__(session=session)
self.source_path = source_path
self.source_media_type = source_media_type
self.lazy_pdf_path = LazyLoaded[str](self._get_or_convert_to_pdf_path)
self.lazy_alto_xml_path = LazyLoaded[str](self._parse_to_alto_xml)
self.lazy_parsed_layout_document = LazyLoaded[
ScienceBeamParserSessionParsedLayoutDocument
](self._parse_to_parsed_layout_document)
@property
def parser(self) -> ScienceBeamBaseParser:
return self.session.parser
@property
def doc_to_pdf_enabled(self) -> bool:
return self.parser.doc_to_pdf_enabled
@property
def doc_converter_wrapper(self) -> DocConverterWrapper:
return self.parser.doc_converter_wrapper
@property
def doc_to_pdf_convert_parameters(self) -> dict:
return self.parser.doc_to_pdf_convert_parameters
@property
def pdfalto_wrapper(self) -> PdfAltoWrapper:
return self.parser.pdfalto_wrapper
@property
def document_request_parameters(self) -> DocumentRequestParameters:
return self.session.document_request_parameters
def _get_or_convert_to_pdf_path(
self
) -> str:
LOGGER.info(
'media_type=%r (filename=%r)',
self.source_media_type,
self.source_path
)
if self.source_media_type in DOC_TO_PDF_SUPPORTED_MEDIA_TYPES:
if not self.doc_to_pdf_enabled:
LOGGER.info('doc to pdf not enabled')
raise UnsupportedRequestMediaTypeScienceBeamParserError(
'doc to pdf not enabled'
)
target_temp_file = self.doc_converter_wrapper.convert(
self.source_path,
**self.doc_to_pdf_convert_parameters
)
return target_temp_file
if self.source_media_type != MediaTypes.PDF:
raise UnsupportedRequestMediaTypeScienceBeamParserError(
'unsupported media type: %r' % self.source_media_type
)
return self.source_path
def _parse_to_alto_xml(self) -> str:
output_path = os.path.join(self.temp_dir, TEMP_ALTO_XML_FILENAME)
self.pdfalto_wrapper.convert_pdf_to_pdfalto_xml(
str(self.lazy_pdf_path.get()),
str(output_path),
first_page=self.document_request_parameters.first_page,
last_page=self.document_request_parameters.last_page
)
return output_path
def _parse_to_parsed_layout_document(
self
) -> ScienceBeamParserSessionParsedLayoutDocument:
pdf_path = self.lazy_pdf_path.get()
root = etree.parse(self.lazy_alto_xml_path.get())
layout_document = normalize_layout_document(
parse_alto_root(root),
preserve_empty_pages=True
)
return ScienceBeamParserSessionParsedLayoutDocument(
self.session,
layout_document=layout_document,
pdf_path=pdf_path
)
def get_parsed_layout_document(self) -> ScienceBeamParserSessionParsedLayoutDocument:
return self.lazy_parsed_layout_document.get()
def get_layout_document(self) -> LayoutDocument:
return self.get_parsed_layout_document().layout_document
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type == MediaTypes.PDF:
return self.lazy_pdf_path.get()
if response_media_type == MediaTypes.ALTO_XML:
return self.lazy_alto_xml_path.get()
return self.get_parsed_layout_document().get_local_file_for_response_media_type(
response_media_type
)
class ScienceBeamParserSession(ScienceBeamParserBaseSession):
def __enter__(self) -> 'ScienceBeamParserSession':
super().__enter__()
return self
def get_source(
self,
source_path: str,
source_media_type: str
) -> ScienceBeamParserSessionSource:
return ScienceBeamParserSessionSource(
self,
source_path=source_path,
source_media_type=source_media_type
)
class ScienceBeamParser(ScienceBeamBaseParser):
@staticmethod
def from_config(config: AppConfig) -> 'ScienceBeamParser':
return ScienceBeamParser(config)
def get_new_session(self, **kwargs) -> ScienceBeamParserSession:
return ScienceBeamParserSession(self, **kwargs)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/app/parser.py
|
parser.py
|
import logging
import os
from contextlib import ExitStack
from dataclasses import dataclass
from pathlib import Path
from tempfile import TemporaryDirectory
from time import monotonic
from typing import List, Optional, Set
from zipfile import ZipFile
from lxml import etree
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.config.config import AppConfig, get_download_dir
from sciencebeam_parser.external.pdfalto.wrapper import PdfAltoWrapper
from sciencebeam_parser.external.pdfalto.parser import parse_alto_root
from sciencebeam_parser.external.wapiti.wrapper import LazyWapitiBinaryWrapper
from sciencebeam_parser.lookup.loader import load_lookup_from_config
from sciencebeam_parser.models.data import AppFeaturesContext
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.document.semantic_document import (
SemanticDocument,
SemanticGraphic
)
from sciencebeam_parser.document.tei_document import get_tei_for_semantic_document
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.resources.xslt import TEI_TO_JATS_XSLT_FILE
from sciencebeam_parser.transformers.doc_converter_wrapper import DocConverterWrapper
from sciencebeam_parser.transformers.xslt import XsltTransformerWrapper
from sciencebeam_parser.utils.lazy import LazyLoaded
from sciencebeam_parser.utils.media_types import (
MediaTypes
)
from sciencebeam_parser.utils.text import normalize_text
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.processors.fulltext.api import (
FullTextProcessor,
FullTextProcessorConfig,
FullTextProcessorDocumentContext,
load_models
)
LOGGER = logging.getLogger(__name__)
TEMP_ALTO_XML_FILENAME = 'temp.lxml'
DOC_TO_PDF_SUPPORTED_MEDIA_TYPES = {
MediaTypes.DOCX,
MediaTypes.DOTX,
MediaTypes.DOC,
MediaTypes.RTF
}
JATS_MEDIA_TYPES = {MediaTypes.JATS_XML, MediaTypes.JATS_ZIP}
ASSET_ZIP_MEDIA_TYPES = {MediaTypes.TEI_ZIP, MediaTypes.JATS_ZIP}
def normalize_and_tokenize_text(text: str) -> List[str]:
return get_tokenized_tokens(
normalize_text(text),
keep_whitespace=True
)
def normalize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return (
layout_document
.retokenize(tokenize_fn=normalize_and_tokenize_text)
.remove_empty_blocks(**kwargs)
)
def load_app_features_context(
config: AppConfig,
download_manager: DownloadManager
):
return AppFeaturesContext(
country_lookup=load_lookup_from_config(
config.get('lookup', {}).get('country'),
download_manager=download_manager
),
first_name_lookup=load_lookup_from_config(
config.get('lookup', {}).get('first_name'),
download_manager=download_manager
),
last_name_lookup=load_lookup_from_config(
config.get('lookup', {}).get('last_name'),
download_manager=download_manager
)
)
def create_asset_zip_for_semantic_document(
zip_filename: str,
semantic_document: SemanticDocument,
relative_xml_filename: str,
local_xml_filename: str
):
semantic_graphic_list = list(semantic_document.iter_by_type_recursively(
SemanticGraphic
))
LOGGER.debug('semantic_graphic_list: %r', semantic_graphic_list)
with ZipFile(zip_filename, 'w') as zip_file:
zip_file.write(
local_xml_filename,
relative_xml_filename
)
for semantic_graphic in semantic_graphic_list:
assert semantic_graphic.relative_path, \
"graphic relative_path missing, ensure extract_graphic_assets was enabled"
layout_graphic = semantic_graphic.layout_graphic
assert layout_graphic
assert layout_graphic.local_file_path
zip_file.write(
layout_graphic.local_file_path,
semantic_graphic.relative_path
)
LOGGER.debug('response_content (bytes): %d', Path(zip_filename).stat().st_size)
def get_xml_tree(xml_root: etree.ElementBase) -> etree._ElementTree:
if isinstance(xml_root, etree._ElementTree): # pylint: disable=protected-access
# Note: _XSLTResultTree is extending _ElementTree
return xml_root
return etree.ElementTree(xml_root)
def serialize_xml_to_file(
xml_root: etree.ElementBase,
filename: str
):
get_xml_tree(xml_root).write(
filename,
encoding='utf-8',
pretty_print=False
)
@dataclass
class DocumentRequestParameters:
first_page: Optional[int] = None
last_page: Optional[int] = None
class ScienceBeamParserError(RuntimeError):
pass
class BadRequestScienceBeamParserError(ScienceBeamParserError):
pass
class UnsupportedRequestMediaTypeScienceBeamParserError(BadRequestScienceBeamParserError):
pass
class UnsupportedResponseMediaTypeScienceBeamParserError(BadRequestScienceBeamParserError):
pass
class ScienceBeamBaseParser:
def __init__(self, config: AppConfig):
self.config = config
self.download_manager = DownloadManager(
download_dir=get_download_dir(config)
)
self.pdfalto_wrapper = PdfAltoWrapper(
self.download_manager.download_if_url(config['pdfalto']['path'])
)
self.pdfalto_wrapper.ensure_executable()
self.app_context = AppContext(
app_config=config,
download_manager=self.download_manager,
lazy_wapiti_binary_wrapper=LazyWapitiBinaryWrapper(
install_url=config.get('wapiti', {}).get('install_source'),
download_manager=self.download_manager
)
)
self.fulltext_processor_config = FullTextProcessorConfig.from_app_config(app_config=config)
self.fulltext_models = load_models(
config,
app_context=self.app_context,
fulltext_processor_config=self.fulltext_processor_config
)
if config.get('preload_on_startup'):
self.fulltext_models.preload()
self.app_features_context = load_app_features_context(
config,
download_manager=self.download_manager
)
tei_to_jats_config = config.get('xslt', {}).get('tei_to_jats', {})
self.tei_to_jats_xslt_transformer = XsltTransformerWrapper.from_template_file(
TEI_TO_JATS_XSLT_FILE,
xslt_template_parameters=tei_to_jats_config.get('parameters', {})
)
self.doc_to_pdf_enabled = config.get('doc_to_pdf', {}).get('enabled', True)
self.doc_to_pdf_convert_parameters = config.get('doc_to_pdf', {}).get('convert', {})
self.doc_converter_wrapper = DocConverterWrapper(
**config.get('doc_to_pdf', {}).get('listener', {})
)
class ScienceBeamParserBaseSession:
def __init__(
self,
parser: 'ScienceBeamParser',
temp_dir: Optional[str] = None,
fulltext_processor_config: Optional[FullTextProcessorConfig] = None,
document_request_parameters: Optional[DocumentRequestParameters] = None
):
self.parser = parser
self.exit_stack = ExitStack()
self._temp_dir: Optional[str] = temp_dir
if fulltext_processor_config is None:
fulltext_processor_config = parser.fulltext_processor_config
self.fulltext_processor_config = fulltext_processor_config
if document_request_parameters is None:
document_request_parameters = DocumentRequestParameters()
self.document_request_parameters = document_request_parameters
def __enter__(self) -> 'ScienceBeamParserBaseSession':
return self
def close(self):
self.exit_stack.close()
def __exit__(self, exc, value, tb):
self.close()
@property
def temp_dir(self) -> str:
if not self._temp_dir:
temp_dir_context = TemporaryDirectory( # pylint: disable=consider-using-with
suffix='-sb-parser'
)
self.exit_stack.push(temp_dir_context)
self._temp_dir = temp_dir_context.__enter__()
return self._temp_dir
@property
def temp_path(self) -> Path:
return Path(self.temp_dir)
class _ScienceBeamParserSessionDerivative:
def __init__(
self,
session: 'ScienceBeamParserBaseSession'
):
self.session = session
@property
def parser(self) -> ScienceBeamBaseParser:
return self.session.parser
@property
def temp_dir(self) -> str:
return self.session.temp_dir
@property
def temp_path(self) -> Path:
return self.session.temp_path
class ScienceBeamParserSessionParsedSemanticDocument(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
semantic_document: SemanticDocument
):
super().__init__(session=session)
self.semantic_document = semantic_document
@property
def tei_to_jats_xslt_transformer(self) -> XsltTransformerWrapper:
return self.parser.tei_to_jats_xslt_transformer
def _get_tei_to_jats_xml_root(self, xml_root: etree.ElementBase) -> etree.ElementBase:
start = monotonic()
xml_root = self.tei_to_jats_xslt_transformer(xml_root)
end = monotonic()
LOGGER.info('tei to jats, took=%.3fs', end - start)
return xml_root
def _serialize_xml_to_file(
self,
xml_root: etree.ElementBase,
filename: str
) -> str:
start = monotonic()
serialize_xml_to_file(xml_root, filename=filename)
end = monotonic()
LOGGER.info('serializing xml, took=%.3fs', end - start)
return filename
def get_supported_response_media_type(self) -> Set[str]:
return {
MediaTypes.TEI_XML,
MediaTypes.TEI_ZIP,
MediaTypes.JATS_XML,
MediaTypes.JATS_ZIP
}
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type not in self.get_supported_response_media_type():
raise UnsupportedResponseMediaTypeScienceBeamParserError()
tei_document = get_tei_for_semantic_document(
self.semantic_document
)
xml_root = tei_document.root
relative_xml_filename = 'tei.xml'
if response_media_type in JATS_MEDIA_TYPES:
xml_root = self._get_tei_to_jats_xml_root(xml_root)
relative_xml_filename = 'jats.xml'
local_xml_filename = os.path.join(self.temp_dir, relative_xml_filename)
self._serialize_xml_to_file(xml_root, local_xml_filename)
LOGGER.debug('local_xml_filename: %r', local_xml_filename)
if response_media_type in ASSET_ZIP_MEDIA_TYPES:
zip_filename = os.path.join(self.temp_dir, 'results.zip')
create_asset_zip_for_semantic_document(
zip_filename,
semantic_document=self.semantic_document,
local_xml_filename=local_xml_filename,
relative_xml_filename=relative_xml_filename
)
return zip_filename
return local_xml_filename
class ScienceBeamParserSessionParsedLayoutDocument(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
layout_document: LayoutDocument,
pdf_path: str
):
super().__init__(session=session)
self.layout_document = layout_document
self.pdf_path = pdf_path
@property
def fulltext_models(self) -> FullTextModels:
return self.parser.fulltext_models
@property
def app_features_context(self) -> AppFeaturesContext:
return self.parser.app_features_context
def _get_semantic_document(
self,
fulltext_processor: FullTextProcessor
) -> SemanticDocument:
context = FullTextProcessorDocumentContext(
pdf_path=self.pdf_path,
temp_dir=self.temp_dir
)
semantic_document = (
fulltext_processor
.get_semantic_document_for_layout_document(
self.layout_document,
context=context
)
)
return semantic_document
def get_parsed_semantic_document(
self,
fulltext_processor_config: Optional[FullTextProcessorConfig] = None
) -> ScienceBeamParserSessionParsedSemanticDocument:
if fulltext_processor_config is None:
fulltext_processor_config = self.session.fulltext_processor_config
fulltext_processor = FullTextProcessor(
self.fulltext_models,
app_features_context=self.app_features_context,
config=fulltext_processor_config
)
return ScienceBeamParserSessionParsedSemanticDocument(
self.session,
self._get_semantic_document(fulltext_processor)
)
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type == MediaTypes.PDF:
return self.pdf_path
fulltext_processor_config = self.session.fulltext_processor_config
if response_media_type in ASSET_ZIP_MEDIA_TYPES:
fulltext_processor_config = (
fulltext_processor_config
._replace(
extract_graphic_assets=True,
extract_graphic_bounding_boxes=True
)
)
assert fulltext_processor_config.extract_graphic_assets, \
"extract_graphic_assets required for asset zip"
return (
self.get_parsed_semantic_document(
fulltext_processor_config
).get_local_file_for_response_media_type(
response_media_type
)
)
class ScienceBeamParserSessionSource(_ScienceBeamParserSessionDerivative):
def __init__(
self,
session: 'ScienceBeamParserBaseSession',
source_path: str,
source_media_type: str
):
super().__init__(session=session)
self.source_path = source_path
self.source_media_type = source_media_type
self.lazy_pdf_path = LazyLoaded[str](self._get_or_convert_to_pdf_path)
self.lazy_alto_xml_path = LazyLoaded[str](self._parse_to_alto_xml)
self.lazy_parsed_layout_document = LazyLoaded[
ScienceBeamParserSessionParsedLayoutDocument
](self._parse_to_parsed_layout_document)
@property
def parser(self) -> ScienceBeamBaseParser:
return self.session.parser
@property
def doc_to_pdf_enabled(self) -> bool:
return self.parser.doc_to_pdf_enabled
@property
def doc_converter_wrapper(self) -> DocConverterWrapper:
return self.parser.doc_converter_wrapper
@property
def doc_to_pdf_convert_parameters(self) -> dict:
return self.parser.doc_to_pdf_convert_parameters
@property
def pdfalto_wrapper(self) -> PdfAltoWrapper:
return self.parser.pdfalto_wrapper
@property
def document_request_parameters(self) -> DocumentRequestParameters:
return self.session.document_request_parameters
def _get_or_convert_to_pdf_path(
self
) -> str:
LOGGER.info(
'media_type=%r (filename=%r)',
self.source_media_type,
self.source_path
)
if self.source_media_type in DOC_TO_PDF_SUPPORTED_MEDIA_TYPES:
if not self.doc_to_pdf_enabled:
LOGGER.info('doc to pdf not enabled')
raise UnsupportedRequestMediaTypeScienceBeamParserError(
'doc to pdf not enabled'
)
target_temp_file = self.doc_converter_wrapper.convert(
self.source_path,
**self.doc_to_pdf_convert_parameters
)
return target_temp_file
if self.source_media_type != MediaTypes.PDF:
raise UnsupportedRequestMediaTypeScienceBeamParserError(
'unsupported media type: %r' % self.source_media_type
)
return self.source_path
def _parse_to_alto_xml(self) -> str:
output_path = os.path.join(self.temp_dir, TEMP_ALTO_XML_FILENAME)
self.pdfalto_wrapper.convert_pdf_to_pdfalto_xml(
str(self.lazy_pdf_path.get()),
str(output_path),
first_page=self.document_request_parameters.first_page,
last_page=self.document_request_parameters.last_page
)
return output_path
def _parse_to_parsed_layout_document(
self
) -> ScienceBeamParserSessionParsedLayoutDocument:
pdf_path = self.lazy_pdf_path.get()
root = etree.parse(self.lazy_alto_xml_path.get())
layout_document = normalize_layout_document(
parse_alto_root(root),
preserve_empty_pages=True
)
return ScienceBeamParserSessionParsedLayoutDocument(
self.session,
layout_document=layout_document,
pdf_path=pdf_path
)
def get_parsed_layout_document(self) -> ScienceBeamParserSessionParsedLayoutDocument:
return self.lazy_parsed_layout_document.get()
def get_layout_document(self) -> LayoutDocument:
return self.get_parsed_layout_document().layout_document
def get_local_file_for_response_media_type(
self,
response_media_type: str
) -> str:
if response_media_type == MediaTypes.PDF:
return self.lazy_pdf_path.get()
if response_media_type == MediaTypes.ALTO_XML:
return self.lazy_alto_xml_path.get()
return self.get_parsed_layout_document().get_local_file_for_response_media_type(
response_media_type
)
class ScienceBeamParserSession(ScienceBeamParserBaseSession):
def __enter__(self) -> 'ScienceBeamParserSession':
super().__enter__()
return self
def get_source(
self,
source_path: str,
source_media_type: str
) -> ScienceBeamParserSessionSource:
return ScienceBeamParserSessionSource(
self,
source_path=source_path,
source_media_type=source_media_type
)
class ScienceBeamParser(ScienceBeamBaseParser):
@staticmethod
def from_config(config: AppConfig) -> 'ScienceBeamParser':
return ScienceBeamParser(config)
def get_new_session(self, **kwargs) -> ScienceBeamParserSession:
return ScienceBeamParserSession(self, **kwargs)
| 0.703142 | 0.154951 |
import logging
import threading
from typing import Optional, Union
import PIL.Image
import tesserocr
from tesserocr import PyTessBaseAPI
from sciencebeam_parser.ocr_models.ocr_model import (
OpticalCharacterRecognitionModel,
OpticalCharacterRecognitionModelResult,
SimpleOpticalCharacterRecognitionModelResult
)
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
DEFAULT_OCR_LANG = 'eng'
def get_enum_value(enum_class, value: Optional[Union[int, str]], default_value: int) -> int:
if value is None:
return default_value
if isinstance(value, int):
return value
return getattr(enum_class, value)
class TesserComputerVisionModel(OpticalCharacterRecognitionModel):
def __init__(self, config: dict):
super().__init__()
self._lock = threading.Lock()
self.lang = str(config.get('lang') or DEFAULT_OCR_LANG)
self.oem = get_enum_value(tesserocr.OEM, config.get('oem'), tesserocr.OEM.DEFAULT)
self.psm = get_enum_value(tesserocr.PSM, config.get('psm'), tesserocr.PSM.AUTO)
self._lazy_tesser_api = LazyLoaded[PyTessBaseAPI](self._create_tesser_api)
def _create_tesser_api(self) -> PyTessBaseAPI:
LOGGER.info(
'creating tesser api with oem=%r, psm=%r, lang=%r',
self.oem, self.psm, self.lang
)
return PyTessBaseAPI(
oem=self.oem,
psm=self.psm,
lang=self.lang
).__enter__()
@property
def tesser_api(self) -> PyTessBaseAPI:
return self._lazy_tesser_api.get()
def preload(self):
with self._lock:
self._lazy_tesser_api.get()
def predict_single(self, image: PIL.Image.Image) -> OpticalCharacterRecognitionModelResult:
with self._lock:
tesser_api = self.tesser_api
LOGGER.info(
'setting ocr image: %dx%d (format=%r)',
image.width, image.height, image.format
)
tesser_api.SetImage(image)
text = self.tesser_api.GetUTF8Text()
return SimpleOpticalCharacterRecognitionModelResult(
text=text
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/ocr_models/tesserocr_ocr_model.py
|
tesserocr_ocr_model.py
|
import logging
import threading
from typing import Optional, Union
import PIL.Image
import tesserocr
from tesserocr import PyTessBaseAPI
from sciencebeam_parser.ocr_models.ocr_model import (
OpticalCharacterRecognitionModel,
OpticalCharacterRecognitionModelResult,
SimpleOpticalCharacterRecognitionModelResult
)
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
DEFAULT_OCR_LANG = 'eng'
def get_enum_value(enum_class, value: Optional[Union[int, str]], default_value: int) -> int:
if value is None:
return default_value
if isinstance(value, int):
return value
return getattr(enum_class, value)
class TesserComputerVisionModel(OpticalCharacterRecognitionModel):
def __init__(self, config: dict):
super().__init__()
self._lock = threading.Lock()
self.lang = str(config.get('lang') or DEFAULT_OCR_LANG)
self.oem = get_enum_value(tesserocr.OEM, config.get('oem'), tesserocr.OEM.DEFAULT)
self.psm = get_enum_value(tesserocr.PSM, config.get('psm'), tesserocr.PSM.AUTO)
self._lazy_tesser_api = LazyLoaded[PyTessBaseAPI](self._create_tesser_api)
def _create_tesser_api(self) -> PyTessBaseAPI:
LOGGER.info(
'creating tesser api with oem=%r, psm=%r, lang=%r',
self.oem, self.psm, self.lang
)
return PyTessBaseAPI(
oem=self.oem,
psm=self.psm,
lang=self.lang
).__enter__()
@property
def tesser_api(self) -> PyTessBaseAPI:
return self._lazy_tesser_api.get()
def preload(self):
with self._lock:
self._lazy_tesser_api.get()
def predict_single(self, image: PIL.Image.Image) -> OpticalCharacterRecognitionModelResult:
with self._lock:
tesser_api = self.tesser_api
LOGGER.info(
'setting ocr image: %dx%d (format=%r)',
image.width, image.height, image.format
)
tesser_api.SetImage(image)
text = self.tesser_api.GetUTF8Text()
return SimpleOpticalCharacterRecognitionModelResult(
text=text
)
| 0.87105 | 0.103477 |
import atexit
import logging
import signal
import subprocess
import time
from functools import partial
from threading import Timer, Thread
from typing import Iterable, Optional, Sequence, Union
LOGGER = logging.getLogger(__name__)
class ChildProcessReturnCodeError(ChildProcessError):
def __init__(self, *args, returncode: int, process=None):
super().__init__(*args)
self.returncode = returncode
self.process = process
class ChildProcessTimeoutError(ChildProcessReturnCodeError):
pass
class BackgroundProcess:
def __init__(self, process: subprocess.Popen):
self.process = process
self._stopped_by_timeout = False
self._created_time = time.monotonic()
self._returncode: Optional[int] = None
def __repr__(self) -> str:
return (
'{type_name}('
'pid={self.process.pid}'
', returncode={self.process.returncode}'
')'
).format(
type_name=type(self).__name__,
self=self
)
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.process.__exit__(exc_type, value, traceback)
def is_running(self) -> bool:
self.process.poll()
return self.returncode is None
@property
def returncode(self) -> Optional[int]:
if self.process.returncode != self._returncode:
self._returncode = self.process.returncode
LOGGER.debug('process(pid=%s).returncode: %s', self.process.pid, self._returncode)
return self._returncode
@property
def pid(self) -> Optional[int]:
return self.process.pid
def send_signal(self, sig: int) -> None:
if self.process.returncode is not None:
LOGGER.debug(
'not sending signal %r, process has already stopped: %s',
sig, self.process.pid
)
return
LOGGER.info('sending %s to process %s', sig, self.process.pid)
self.process.send_signal(sig)
def terminate(self) -> None:
self.send_signal(signal.SIGINT)
def kill(self) -> None:
self.send_signal(signal.SIGKILL)
def kill_if_runing(self) -> None:
if not self.is_running():
return
self.kill()
def wait(self) -> int:
self.process.wait()
# Note: not using the return type in order to use the logging logic of returncode function
returncode = self.returncode
assert returncode is not None
return returncode
def get_uptime(self) -> float:
return time.monotonic() - self._created_time
def stop(self, wait: bool = True, kill_timeout: int = 60) -> None:
self.terminate()
if kill_timeout:
Timer(kill_timeout, self.kill_if_runing).start()
if wait:
LOGGER.info('waiting for process(pid=%s) to stop', self.process.pid)
self.wait()
LOGGER.info(
'process(pid=%s) has stopped with returncode: %s',
self.process.pid, self.returncode
)
def stop_due_to_timeout(self, **kwargs) -> None:
LOGGER.info('process timeout, stopping: %s', self.process.pid)
self._stopped_by_timeout = True
self.stop(**kwargs)
def is_stopped_by_timeout(self) -> bool:
return self._stopped_by_timeout
def check_returncode(self) -> None:
returncode = self.process.returncode
if returncode is None:
return
if self.is_stopped_by_timeout():
LOGGER.debug('process stopped by timeout, return code: %s', returncode)
raise ChildProcessTimeoutError(
'process stopped by timeout, return code: %s' % returncode,
returncode=returncode
)
if returncode != 0:
LOGGER.debug('process failed with return code: %s', returncode)
raise ChildProcessReturnCodeError(
'process failed with return code: %s' % returncode,
returncode=returncode
)
def stop_if_running(self, **kwargs) -> None:
if not self.is_running():
return
self.stop(**kwargs)
def stream_lines_to_logger(
lines: Iterable[str],
logger: logging.Logger,
prefix: str = ''
):
for line in lines:
line = line.strip()
if line:
logger.info('%s%s', prefix, line)
def exec_with_logging(
command: Union[str, Sequence[str]],
logging_prefix: Optional[str] = None,
process_timeout: Optional[float] = None,
daemon: bool = False,
check_returncode: bool = True,
**kwargs
) -> BackgroundProcess:
p = BackgroundProcess(subprocess.Popen( # pylint: disable=consider-using-with
command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs
))
if logging_prefix is None:
logging_prefix = 'process'
logging_prefix += '[pid:%s]: ' % p.process.pid
if not daemon:
timer = None
if process_timeout:
timer = Timer(process_timeout, p.stop_due_to_timeout)
timer.start()
assert p.process.stdout is not None
stream_lines_to_logger(p.process.stdout, LOGGER, logging_prefix)
p.wait()
if timer:
timer.cancel()
if check_returncode:
p.check_returncode()
return p
t = Thread(target=partial(
stream_lines_to_logger,
lines=p.process.stdout,
logger=LOGGER,
prefix=logging_prefix
))
t.daemon = True
t.start()
return p
class CommandRestartableBackgroundProcess:
def __init__(
self,
command: Union[str, Sequence[str]],
name: str = None,
logging_prefix: str = None,
stop_at_exit: bool = False
):
self.command = command
self.name = name
self.logging_prefix = logging_prefix
self.process: Optional[BackgroundProcess] = None
self.stop_at_exit = stop_at_exit
self._atexit_registered = False
def stop(self, wait: bool = True) -> None:
if self.process:
self.process.stop(wait=wait)
def stop_if_running(self, wait: bool = True, **kwargs) -> None:
if self.process:
self.process.stop_if_running(wait=wait, **kwargs)
def start(self, stop: bool = True) -> None:
if stop:
self.stop_if_running(wait=True)
if self.stop_at_exit and not self._atexit_registered:
atexit.register(self.stop_if_running)
self._atexit_registered = True
LOGGER.info('starting %s', self.name)
LOGGER.debug('running background command: %s', self.command)
self.process = exec_with_logging(
self.command,
logging_prefix=self.logging_prefix or self.name,
daemon=True
)
def is_running(self) -> bool:
return self.process is not None and self.process.is_running()
def get_uptime(self) -> float:
assert self.process is not None
return self.process.get_uptime()
def start_if_not_running(self) -> None:
if not self.is_running():
if self.process:
LOGGER.info('process has stopped, restarting: %s', self.process.pid)
self.start()
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/background_process.py
|
background_process.py
|
import atexit
import logging
import signal
import subprocess
import time
from functools import partial
from threading import Timer, Thread
from typing import Iterable, Optional, Sequence, Union
LOGGER = logging.getLogger(__name__)
class ChildProcessReturnCodeError(ChildProcessError):
def __init__(self, *args, returncode: int, process=None):
super().__init__(*args)
self.returncode = returncode
self.process = process
class ChildProcessTimeoutError(ChildProcessReturnCodeError):
pass
class BackgroundProcess:
def __init__(self, process: subprocess.Popen):
self.process = process
self._stopped_by_timeout = False
self._created_time = time.monotonic()
self._returncode: Optional[int] = None
def __repr__(self) -> str:
return (
'{type_name}('
'pid={self.process.pid}'
', returncode={self.process.returncode}'
')'
).format(
type_name=type(self).__name__,
self=self
)
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.process.__exit__(exc_type, value, traceback)
def is_running(self) -> bool:
self.process.poll()
return self.returncode is None
@property
def returncode(self) -> Optional[int]:
if self.process.returncode != self._returncode:
self._returncode = self.process.returncode
LOGGER.debug('process(pid=%s).returncode: %s', self.process.pid, self._returncode)
return self._returncode
@property
def pid(self) -> Optional[int]:
return self.process.pid
def send_signal(self, sig: int) -> None:
if self.process.returncode is not None:
LOGGER.debug(
'not sending signal %r, process has already stopped: %s',
sig, self.process.pid
)
return
LOGGER.info('sending %s to process %s', sig, self.process.pid)
self.process.send_signal(sig)
def terminate(self) -> None:
self.send_signal(signal.SIGINT)
def kill(self) -> None:
self.send_signal(signal.SIGKILL)
def kill_if_runing(self) -> None:
if not self.is_running():
return
self.kill()
def wait(self) -> int:
self.process.wait()
# Note: not using the return type in order to use the logging logic of returncode function
returncode = self.returncode
assert returncode is not None
return returncode
def get_uptime(self) -> float:
return time.monotonic() - self._created_time
def stop(self, wait: bool = True, kill_timeout: int = 60) -> None:
self.terminate()
if kill_timeout:
Timer(kill_timeout, self.kill_if_runing).start()
if wait:
LOGGER.info('waiting for process(pid=%s) to stop', self.process.pid)
self.wait()
LOGGER.info(
'process(pid=%s) has stopped with returncode: %s',
self.process.pid, self.returncode
)
def stop_due_to_timeout(self, **kwargs) -> None:
LOGGER.info('process timeout, stopping: %s', self.process.pid)
self._stopped_by_timeout = True
self.stop(**kwargs)
def is_stopped_by_timeout(self) -> bool:
return self._stopped_by_timeout
def check_returncode(self) -> None:
returncode = self.process.returncode
if returncode is None:
return
if self.is_stopped_by_timeout():
LOGGER.debug('process stopped by timeout, return code: %s', returncode)
raise ChildProcessTimeoutError(
'process stopped by timeout, return code: %s' % returncode,
returncode=returncode
)
if returncode != 0:
LOGGER.debug('process failed with return code: %s', returncode)
raise ChildProcessReturnCodeError(
'process failed with return code: %s' % returncode,
returncode=returncode
)
def stop_if_running(self, **kwargs) -> None:
if not self.is_running():
return
self.stop(**kwargs)
def stream_lines_to_logger(
lines: Iterable[str],
logger: logging.Logger,
prefix: str = ''
):
for line in lines:
line = line.strip()
if line:
logger.info('%s%s', prefix, line)
def exec_with_logging(
command: Union[str, Sequence[str]],
logging_prefix: Optional[str] = None,
process_timeout: Optional[float] = None,
daemon: bool = False,
check_returncode: bool = True,
**kwargs
) -> BackgroundProcess:
p = BackgroundProcess(subprocess.Popen( # pylint: disable=consider-using-with
command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs
))
if logging_prefix is None:
logging_prefix = 'process'
logging_prefix += '[pid:%s]: ' % p.process.pid
if not daemon:
timer = None
if process_timeout:
timer = Timer(process_timeout, p.stop_due_to_timeout)
timer.start()
assert p.process.stdout is not None
stream_lines_to_logger(p.process.stdout, LOGGER, logging_prefix)
p.wait()
if timer:
timer.cancel()
if check_returncode:
p.check_returncode()
return p
t = Thread(target=partial(
stream_lines_to_logger,
lines=p.process.stdout,
logger=LOGGER,
prefix=logging_prefix
))
t.daemon = True
t.start()
return p
class CommandRestartableBackgroundProcess:
def __init__(
self,
command: Union[str, Sequence[str]],
name: str = None,
logging_prefix: str = None,
stop_at_exit: bool = False
):
self.command = command
self.name = name
self.logging_prefix = logging_prefix
self.process: Optional[BackgroundProcess] = None
self.stop_at_exit = stop_at_exit
self._atexit_registered = False
def stop(self, wait: bool = True) -> None:
if self.process:
self.process.stop(wait=wait)
def stop_if_running(self, wait: bool = True, **kwargs) -> None:
if self.process:
self.process.stop_if_running(wait=wait, **kwargs)
def start(self, stop: bool = True) -> None:
if stop:
self.stop_if_running(wait=True)
if self.stop_at_exit and not self._atexit_registered:
atexit.register(self.stop_if_running)
self._atexit_registered = True
LOGGER.info('starting %s', self.name)
LOGGER.debug('running background command: %s', self.command)
self.process = exec_with_logging(
self.command,
logging_prefix=self.logging_prefix or self.name,
daemon=True
)
def is_running(self) -> bool:
return self.process is not None and self.process.is_running()
def get_uptime(self) -> float:
assert self.process is not None
return self.process.get_uptime()
def start_if_not_running(self) -> None:
if not self.is_running():
if self.process:
LOGGER.info('process has stopped, restarting: %s', self.process.pid)
self.start()
| 0.767167 | 0.128225 |
import mimetypes
from typing import Optional, Sequence
class MediaTypes:
"""
Media Types used by ScienceBeam Parser.
Where possible, these correspond to official media types.
In some instances, no official media type is defined yet.
"""
PDF = 'application/pdf'
DOC = 'application/msword'
DOCX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
DOTX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.template'
RTF = 'application/rtf'
XML = 'application/xml'
ZIP = 'application/zip'
TEI_XML = 'application/tei+xml'
JATS_XML = 'application/vnd.jats+xml'
TEI_ZIP = 'application/tei+xml+zip'
JATS_ZIP = 'application/vnd.jats+xml+zip'
ALTO_XML = 'application/vnd.alto+xml'
JSON = 'application/json'
OCTET_STREAM = 'application/octet-stream'
WILDCARD_MEDIA_TYPE = '*/*'
MEDIA_TYPE_SUFFIX_MAP = {
# fixed mime type suffix map (which may be incorrectly defined in Python 3.5)
MediaTypes.DOC: '.doc',
# additional types
MediaTypes.TEI_XML: '.tei.xml',
MediaTypes.JATS_XML: '.jats.xml',
MediaTypes.TEI_ZIP: '.tei.zip',
MediaTypes.JATS_ZIP: '.jats.zip'
}
def guess_extension_for_media_type(media_type: str) -> Optional[str]:
ext = MEDIA_TYPE_SUFFIX_MAP.get(media_type)
if not ext:
ext = mimetypes.guess_extension(media_type)
return ext
def guess_media_type_for_filename(filename: str) -> Optional[str]:
return mimetypes.guess_type(filename)[0]
def get_first_matching_media_type(
accept_media_types: Sequence[str],
available_media_types: Sequence[str]
) -> Optional[str]:
if not available_media_types:
return None
if not accept_media_types:
return available_media_types[0]
for accept_media_type in accept_media_types:
if accept_media_type == WILDCARD_MEDIA_TYPE:
return available_media_types[0]
for available_media_type in available_media_types:
if accept_media_type == available_media_type:
return available_media_type
return None
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/media_types.py
|
media_types.py
|
import mimetypes
from typing import Optional, Sequence
class MediaTypes:
"""
Media Types used by ScienceBeam Parser.
Where possible, these correspond to official media types.
In some instances, no official media type is defined yet.
"""
PDF = 'application/pdf'
DOC = 'application/msword'
DOCX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
DOTX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.template'
RTF = 'application/rtf'
XML = 'application/xml'
ZIP = 'application/zip'
TEI_XML = 'application/tei+xml'
JATS_XML = 'application/vnd.jats+xml'
TEI_ZIP = 'application/tei+xml+zip'
JATS_ZIP = 'application/vnd.jats+xml+zip'
ALTO_XML = 'application/vnd.alto+xml'
JSON = 'application/json'
OCTET_STREAM = 'application/octet-stream'
WILDCARD_MEDIA_TYPE = '*/*'
MEDIA_TYPE_SUFFIX_MAP = {
# fixed mime type suffix map (which may be incorrectly defined in Python 3.5)
MediaTypes.DOC: '.doc',
# additional types
MediaTypes.TEI_XML: '.tei.xml',
MediaTypes.JATS_XML: '.jats.xml',
MediaTypes.TEI_ZIP: '.tei.zip',
MediaTypes.JATS_ZIP: '.jats.zip'
}
def guess_extension_for_media_type(media_type: str) -> Optional[str]:
ext = MEDIA_TYPE_SUFFIX_MAP.get(media_type)
if not ext:
ext = mimetypes.guess_extension(media_type)
return ext
def guess_media_type_for_filename(filename: str) -> Optional[str]:
return mimetypes.guess_type(filename)[0]
def get_first_matching_media_type(
accept_media_types: Sequence[str],
available_media_types: Sequence[str]
) -> Optional[str]:
if not available_media_types:
return None
if not accept_media_types:
return available_media_types[0]
for accept_media_type in accept_media_types:
if accept_media_type == WILDCARD_MEDIA_TYPE:
return available_media_types[0]
for available_media_type in available_media_types:
if accept_media_type == available_media_type:
return available_media_type
return None
| 0.699152 | 0.204501 |
import logging
import re
from itertools import zip_longest
from typing import Mapping, NamedTuple, Optional, Sequence, Tuple, Union
from lxml import etree
from lxml.builder import ElementMaker
LOGGER = logging.getLogger(__name__)
class TagExpression(NamedTuple):
tag: str
attrib: Mapping[str, str]
def create_node(self, *args, element_maker: ElementMaker):
try:
return element_maker(self.tag, self.attrib, *args)
except ValueError as exc:
raise ValueError(
'failed to create node with tag=%r, attrib=%r due to %s' % (
self.tag, self.attrib, exc
)
) from exc
def parse_tag_expression(tag_expression: str) -> TagExpression:
match = re.match(r'^([^\[]+)(\[@?([^=]+)="(.+)"\])?$', tag_expression)
if not match:
raise ValueError('invalid tag expression: %s' % tag_expression)
LOGGER.debug('match: %s', match.groups())
tag_name = match.group(1)
if match.group(2):
attrib = {match.group(3): match.group(4)}
else:
attrib = {}
return TagExpression(tag=tag_name, attrib=attrib)
def _get_last_child_or_none(element: etree.ElementBase) -> Optional[etree.ElementBase]:
try:
return element[-1]
except IndexError:
return None
def _append_text(element: etree.ElementBase, text: Optional[str]) -> None:
if not text:
return
last_child = _get_last_child_or_none(element)
if last_child is not None and last_child.tail:
last_child.tail = last_child.tail + '' + text
elif last_child is not None:
last_child.tail = text
elif element.text:
element.text = element.text + '' + text
else:
element.text = text
def _get_common_path(path1: Sequence[str], path2: Sequence[str]) -> Sequence[str]:
if path1 == path2:
return path1
common_path = []
for path1_element, path2_element in zip_longest(path1, path2):
if path1_element != path2_element:
break
common_path.append(path1_element)
return common_path
def _get_element_at_path(
current_element: etree.ElementBase,
current_path: Sequence[str],
required_path: Sequence[str],
element_maker: ElementMaker
) -> Tuple[etree.ElementBase, Sequence[str]]:
if required_path != current_path:
common_path = _get_common_path(current_path, required_path)
LOGGER.debug(
'required element path: %s -> %s (common path: %s)',
current_path, required_path, common_path
)
for _ in range(len(current_path) - len(common_path)):
current_element = current_element.getparent()
current_path = list(common_path)
for path_fragment in required_path[len(common_path):]:
try:
parsed_path_fragment = parse_tag_expression(path_fragment)
child = parsed_path_fragment.create_node(
element_maker=element_maker
)
except ValueError as exc:
raise ValueError('failed to create node for %r due to %s' % (
path_fragment, exc
)) from exc
current_element.append(child)
current_element = child
current_path.append(path_fragment)
return current_element, current_path
class XmlTreeWriter:
def __init__(
self,
parent: etree.ElementBase,
element_maker: ElementMaker
):
self.current_element = parent
self.current_path: Sequence[str] = []
self.element_maker = element_maker
@property
def root(self) -> etree.ElementBase:
return self.current_element.getroottree().getroot()
def append_text(self, text: str):
_append_text(self.current_element, text)
def append(self, element_or_text: Union[etree.ElementBase, str]):
if isinstance(element_or_text, str):
self.append_text(element_or_text)
else:
self.current_element.append(element_or_text)
def append_all(self, *element_or_text_list: Sequence[Union[etree.ElementBase, str]]):
for element_or_text in element_or_text_list:
self.append(element_or_text)
def require_path(self, required_path: Sequence[str]):
self.current_element, self.current_path = _get_element_at_path(
self.current_element, self.current_path,
required_path,
element_maker=self.element_maker
)
def require_path_or_below(self, required_path: Sequence[str]):
self.require_path(
_get_common_path(self.current_path, required_path)
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/xml_writer.py
|
xml_writer.py
|
import logging
import re
from itertools import zip_longest
from typing import Mapping, NamedTuple, Optional, Sequence, Tuple, Union
from lxml import etree
from lxml.builder import ElementMaker
LOGGER = logging.getLogger(__name__)
class TagExpression(NamedTuple):
tag: str
attrib: Mapping[str, str]
def create_node(self, *args, element_maker: ElementMaker):
try:
return element_maker(self.tag, self.attrib, *args)
except ValueError as exc:
raise ValueError(
'failed to create node with tag=%r, attrib=%r due to %s' % (
self.tag, self.attrib, exc
)
) from exc
def parse_tag_expression(tag_expression: str) -> TagExpression:
match = re.match(r'^([^\[]+)(\[@?([^=]+)="(.+)"\])?$', tag_expression)
if not match:
raise ValueError('invalid tag expression: %s' % tag_expression)
LOGGER.debug('match: %s', match.groups())
tag_name = match.group(1)
if match.group(2):
attrib = {match.group(3): match.group(4)}
else:
attrib = {}
return TagExpression(tag=tag_name, attrib=attrib)
def _get_last_child_or_none(element: etree.ElementBase) -> Optional[etree.ElementBase]:
try:
return element[-1]
except IndexError:
return None
def _append_text(element: etree.ElementBase, text: Optional[str]) -> None:
if not text:
return
last_child = _get_last_child_or_none(element)
if last_child is not None and last_child.tail:
last_child.tail = last_child.tail + '' + text
elif last_child is not None:
last_child.tail = text
elif element.text:
element.text = element.text + '' + text
else:
element.text = text
def _get_common_path(path1: Sequence[str], path2: Sequence[str]) -> Sequence[str]:
if path1 == path2:
return path1
common_path = []
for path1_element, path2_element in zip_longest(path1, path2):
if path1_element != path2_element:
break
common_path.append(path1_element)
return common_path
def _get_element_at_path(
current_element: etree.ElementBase,
current_path: Sequence[str],
required_path: Sequence[str],
element_maker: ElementMaker
) -> Tuple[etree.ElementBase, Sequence[str]]:
if required_path != current_path:
common_path = _get_common_path(current_path, required_path)
LOGGER.debug(
'required element path: %s -> %s (common path: %s)',
current_path, required_path, common_path
)
for _ in range(len(current_path) - len(common_path)):
current_element = current_element.getparent()
current_path = list(common_path)
for path_fragment in required_path[len(common_path):]:
try:
parsed_path_fragment = parse_tag_expression(path_fragment)
child = parsed_path_fragment.create_node(
element_maker=element_maker
)
except ValueError as exc:
raise ValueError('failed to create node for %r due to %s' % (
path_fragment, exc
)) from exc
current_element.append(child)
current_element = child
current_path.append(path_fragment)
return current_element, current_path
class XmlTreeWriter:
def __init__(
self,
parent: etree.ElementBase,
element_maker: ElementMaker
):
self.current_element = parent
self.current_path: Sequence[str] = []
self.element_maker = element_maker
@property
def root(self) -> etree.ElementBase:
return self.current_element.getroottree().getroot()
def append_text(self, text: str):
_append_text(self.current_element, text)
def append(self, element_or_text: Union[etree.ElementBase, str]):
if isinstance(element_or_text, str):
self.append_text(element_or_text)
else:
self.current_element.append(element_or_text)
def append_all(self, *element_or_text_list: Sequence[Union[etree.ElementBase, str]]):
for element_or_text in element_or_text_list:
self.append(element_or_text)
def require_path(self, required_path: Sequence[str]):
self.current_element, self.current_path = _get_element_at_path(
self.current_element, self.current_path,
required_path,
element_maker=self.element_maker
)
def require_path_or_below(self, required_path: Sequence[str]):
self.require_path(
_get_common_path(self.current_path, required_path)
)
| 0.72331 | 0.179064 |
import os
import codecs
from contextlib import contextmanager
from typing import Iterable, Sequence
from urllib.parse import urlparse
import fsspec
from sciencebeam_trainer_delft.utils.io import (
auto_uploading_output_file as _auto_uploading_output_file,
is_external_location,
open_file
)
def get_file_system_protocol_for_url(url: str) -> fsspec.AbstractFileSystem:
parsed_url = urlparse(url)
return parsed_url.scheme or 'file'
def get_file_system_for_url(url: str) -> fsspec.AbstractFileSystem:
return fsspec.filesystem(get_file_system_protocol_for_url(url))
def get_file_system_protocols(
fs: fsspec.AbstractFileSystem
) -> Sequence[str]:
return (fs.protocol,) if isinstance(fs.protocol, str) else fs.protocol
def get_file_system_default_protocol(
fs: fsspec.AbstractFileSystem
) -> str:
return get_file_system_protocols(fs)[0]
def get_fully_qualified_path_for_protocol_and_path(
protocol: str,
path: str
) -> str:
if 'file' in protocol:
return path
return f'{protocol}://{path}'
def iter_fully_qualified_paths_for_protocol_and_paths(
protocol: str,
paths: Iterable[str]
) -> Iterable[str]:
return (
get_fully_qualified_path_for_protocol_and_path(protocol, path)
for path in paths
)
def get_fully_qualified_path_for_fs_and_path(
fs: fsspec.AbstractFileSystem,
path: str
) -> str:
return get_fully_qualified_path_for_protocol_and_path(
get_file_system_default_protocol(fs),
path
)
def glob(
glob_pattern: str
) -> Sequence[str]:
protocol = get_file_system_protocol_for_url(glob_pattern)
fs: fsspec.AbstractFileSystem = fsspec.filesystem(protocol)
return list(iter_fully_qualified_paths_for_protocol_and_paths(
protocol,
fs.glob(glob_pattern)
))
def makedirs(
path: str,
exist_ok: bool = False
):
get_file_system_for_url(path).makedirs(path, exist_ok=exist_ok)
@contextmanager
def auto_uploading_binary_output_file(filepath: str, **kwargs):
if not is_external_location(filepath):
# Note: the upstream implementation doesn't currently auto-compress local files
file_dirname = os.path.dirname(filepath)
if file_dirname:
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open_file(filepath, mode='wb', **kwargs) as fp:
yield fp
return
with _auto_uploading_output_file(filepath, 'wb', **kwargs) as fp:
yield fp
@contextmanager
def auto_uploading_text_output_file(filepath: str, encoding: str, **kwargs):
with auto_uploading_binary_output_file(filepath, **kwargs) as fp:
yield codecs.getwriter(encoding)(fp)
def auto_uploading_output_file(filepath: str, mode: str, encoding: str = 'utf-8', **kwargs):
if mode == 'w':
return auto_uploading_text_output_file(filepath, encoding=encoding, **kwargs)
if mode == 'wb':
return auto_uploading_binary_output_file(filepath, **kwargs)
raise ValueError('invalid mode: %r' % mode)
def write_bytes(filepath: str, data: bytes, **kwargs):
with auto_uploading_output_file(filepath, mode='wb', **kwargs) as fp:
fp.write(data)
def write_text(filepath: str, text: str, encoding: str, **kwargs):
# Note: the upstream implementation doesn't support encoding with compression
write_bytes(
filepath,
codecs.encode(text, encoding=encoding),
**kwargs
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/io.py
|
io.py
|
import os
import codecs
from contextlib import contextmanager
from typing import Iterable, Sequence
from urllib.parse import urlparse
import fsspec
from sciencebeam_trainer_delft.utils.io import (
auto_uploading_output_file as _auto_uploading_output_file,
is_external_location,
open_file
)
def get_file_system_protocol_for_url(url: str) -> fsspec.AbstractFileSystem:
parsed_url = urlparse(url)
return parsed_url.scheme or 'file'
def get_file_system_for_url(url: str) -> fsspec.AbstractFileSystem:
return fsspec.filesystem(get_file_system_protocol_for_url(url))
def get_file_system_protocols(
fs: fsspec.AbstractFileSystem
) -> Sequence[str]:
return (fs.protocol,) if isinstance(fs.protocol, str) else fs.protocol
def get_file_system_default_protocol(
fs: fsspec.AbstractFileSystem
) -> str:
return get_file_system_protocols(fs)[0]
def get_fully_qualified_path_for_protocol_and_path(
protocol: str,
path: str
) -> str:
if 'file' in protocol:
return path
return f'{protocol}://{path}'
def iter_fully_qualified_paths_for_protocol_and_paths(
protocol: str,
paths: Iterable[str]
) -> Iterable[str]:
return (
get_fully_qualified_path_for_protocol_and_path(protocol, path)
for path in paths
)
def get_fully_qualified_path_for_fs_and_path(
fs: fsspec.AbstractFileSystem,
path: str
) -> str:
return get_fully_qualified_path_for_protocol_and_path(
get_file_system_default_protocol(fs),
path
)
def glob(
glob_pattern: str
) -> Sequence[str]:
protocol = get_file_system_protocol_for_url(glob_pattern)
fs: fsspec.AbstractFileSystem = fsspec.filesystem(protocol)
return list(iter_fully_qualified_paths_for_protocol_and_paths(
protocol,
fs.glob(glob_pattern)
))
def makedirs(
path: str,
exist_ok: bool = False
):
get_file_system_for_url(path).makedirs(path, exist_ok=exist_ok)
@contextmanager
def auto_uploading_binary_output_file(filepath: str, **kwargs):
if not is_external_location(filepath):
# Note: the upstream implementation doesn't currently auto-compress local files
file_dirname = os.path.dirname(filepath)
if file_dirname:
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open_file(filepath, mode='wb', **kwargs) as fp:
yield fp
return
with _auto_uploading_output_file(filepath, 'wb', **kwargs) as fp:
yield fp
@contextmanager
def auto_uploading_text_output_file(filepath: str, encoding: str, **kwargs):
with auto_uploading_binary_output_file(filepath, **kwargs) as fp:
yield codecs.getwriter(encoding)(fp)
def auto_uploading_output_file(filepath: str, mode: str, encoding: str = 'utf-8', **kwargs):
if mode == 'w':
return auto_uploading_text_output_file(filepath, encoding=encoding, **kwargs)
if mode == 'wb':
return auto_uploading_binary_output_file(filepath, **kwargs)
raise ValueError('invalid mode: %r' % mode)
def write_bytes(filepath: str, data: bytes, **kwargs):
with auto_uploading_output_file(filepath, mode='wb', **kwargs) as fp:
fp.write(data)
def write_text(filepath: str, text: str, encoding: str, **kwargs):
# Note: the upstream implementation doesn't support encoding with compression
write_bytes(
filepath,
codecs.encode(text, encoding=encoding),
**kwargs
)
| 0.659076 | 0.151467 |
import logging
from typing import Callable, Optional, Sequence, TypeVar
from flask import request
from werkzeug.exceptions import BadRequest
from sciencebeam_parser.utils.media_types import (
get_first_matching_media_type
)
from sciencebeam_parser.utils.data_wrapper import MediaDataWrapper
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
DEFAULT_FILENAME = 'file'
def get_optional_post_data_wrapper() -> MediaDataWrapper:
if not request.files:
return MediaDataWrapper(
data=request.data,
media_type=request.mimetype,
filename=request.args.get('filename')
)
supported_file_keys = ['file', 'input']
for name in supported_file_keys:
if name not in request.files:
continue
uploaded_file = request.files[name]
data = uploaded_file.stream.read()
return MediaDataWrapper(
data=data,
media_type=uploaded_file.mimetype,
filename=uploaded_file.filename
)
raise BadRequest(
f'missing file named one pf "{supported_file_keys}", found: {request.files.keys()}'
)
def get_required_post_data_wrapper() -> MediaDataWrapper:
data_wrapper = get_optional_post_data_wrapper()
if not data_wrapper.data:
raise BadRequest('no contents')
return data_wrapper
def get_required_post_data() -> bytes:
return get_required_post_data_wrapper().data
def get_request_accept_media_types() -> Sequence[str]:
accept_media_types = list(request.accept_mimetypes.values())
LOGGER.info('accept_media_types: %s', accept_media_types)
return accept_media_types
def assert_and_get_first_matching_media_type(
accept_media_types: Sequence[str],
available_media_types: Sequence[str]
) -> str:
media_type = get_first_matching_media_type(
accept_media_types,
available_media_types
)
if not media_type:
raise BadRequest(
f'unsupported accept media types: {accept_media_types},'
f' supported types are: {available_media_types}'
)
LOGGER.info('resolved media type: %r', media_type)
return media_type
def assert_and_get_first_accept_matching_media_type(
available_media_types: Sequence[str]
) -> str:
return assert_and_get_first_matching_media_type(
get_request_accept_media_types(),
available_media_types
)
def get_typed_request_arg(
name: str,
type_: Callable[[str], T],
default_value: Optional[T] = None,
required: bool = False
) -> Optional[T]:
value = request.args.get(name)
if value:
return type_(value)
if required:
raise ValueError(f'request arg {name} is required')
return default_value
def str_to_bool(value: str) -> bool:
value_lower = value.lower()
if value_lower in {'true', '1'}:
return True
if value_lower in {'false', '0'}:
return False
raise ValueError('unrecognised boolean value: %r' % value)
def get_bool_request_arg(
name: str,
default_value: Optional[bool] = None,
required: bool = False
) -> Optional[bool]:
return get_typed_request_arg(
name, str_to_bool, default_value=default_value, required=required
)
def get_int_request_arg(
name: str,
default_value: Optional[int] = None,
required: bool = False
) -> Optional[int]:
return get_typed_request_arg(
name, int, default_value=default_value, required=required
)
def get_str_request_arg(
name: str,
default_value: Optional[str] = None,
required: bool = False
) -> Optional[str]:
return get_typed_request_arg(
name, str, default_value=default_value, required=required
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/utils/flask.py
|
flask.py
|
import logging
from typing import Callable, Optional, Sequence, TypeVar
from flask import request
from werkzeug.exceptions import BadRequest
from sciencebeam_parser.utils.media_types import (
get_first_matching_media_type
)
from sciencebeam_parser.utils.data_wrapper import MediaDataWrapper
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
DEFAULT_FILENAME = 'file'
def get_optional_post_data_wrapper() -> MediaDataWrapper:
if not request.files:
return MediaDataWrapper(
data=request.data,
media_type=request.mimetype,
filename=request.args.get('filename')
)
supported_file_keys = ['file', 'input']
for name in supported_file_keys:
if name not in request.files:
continue
uploaded_file = request.files[name]
data = uploaded_file.stream.read()
return MediaDataWrapper(
data=data,
media_type=uploaded_file.mimetype,
filename=uploaded_file.filename
)
raise BadRequest(
f'missing file named one pf "{supported_file_keys}", found: {request.files.keys()}'
)
def get_required_post_data_wrapper() -> MediaDataWrapper:
data_wrapper = get_optional_post_data_wrapper()
if not data_wrapper.data:
raise BadRequest('no contents')
return data_wrapper
def get_required_post_data() -> bytes:
return get_required_post_data_wrapper().data
def get_request_accept_media_types() -> Sequence[str]:
accept_media_types = list(request.accept_mimetypes.values())
LOGGER.info('accept_media_types: %s', accept_media_types)
return accept_media_types
def assert_and_get_first_matching_media_type(
accept_media_types: Sequence[str],
available_media_types: Sequence[str]
) -> str:
media_type = get_first_matching_media_type(
accept_media_types,
available_media_types
)
if not media_type:
raise BadRequest(
f'unsupported accept media types: {accept_media_types},'
f' supported types are: {available_media_types}'
)
LOGGER.info('resolved media type: %r', media_type)
return media_type
def assert_and_get_first_accept_matching_media_type(
available_media_types: Sequence[str]
) -> str:
return assert_and_get_first_matching_media_type(
get_request_accept_media_types(),
available_media_types
)
def get_typed_request_arg(
name: str,
type_: Callable[[str], T],
default_value: Optional[T] = None,
required: bool = False
) -> Optional[T]:
value = request.args.get(name)
if value:
return type_(value)
if required:
raise ValueError(f'request arg {name} is required')
return default_value
def str_to_bool(value: str) -> bool:
value_lower = value.lower()
if value_lower in {'true', '1'}:
return True
if value_lower in {'false', '0'}:
return False
raise ValueError('unrecognised boolean value: %r' % value)
def get_bool_request_arg(
name: str,
default_value: Optional[bool] = None,
required: bool = False
) -> Optional[bool]:
return get_typed_request_arg(
name, str_to_bool, default_value=default_value, required=required
)
def get_int_request_arg(
name: str,
default_value: Optional[int] = None,
required: bool = False
) -> Optional[int]:
return get_typed_request_arg(
name, int, default_value=default_value, required=required
)
def get_str_request_arg(
name: str,
default_value: Optional[str] = None,
required: bool = False
) -> Optional[str]:
return get_typed_request_arg(
name, str, default_value=default_value, required=required
)
| 0.839471 | 0.110856 |
import argparse
import logging
import os
from typing import Iterable, List, Optional, Sequence, Tuple
from lxml import etree
from sciencebeam_trainer_delft.utils.io import (
auto_download_input_file
)
from sciencebeam_trainer_delft.sequence_labelling.reader import (
load_data_crf_lines
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
iter_format_tag_result
)
from sciencebeam_parser.utils.io import (
auto_uploading_output_file,
glob
)
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument
)
from sciencebeam_parser.models.data import (
DocumentFeaturesContext,
LabeledLayoutToken,
ModelDataGenerator
)
from sciencebeam_parser.models.training_data import TrainingTeiParser
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.app.parser import ScienceBeamParser
LOGGER = logging.getLogger(__name__)
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
'ScienceBeam Parser: Generate DELFT Training Data'
)
parser.add_argument(
'--model-name',
type=str,
required=True
)
parser.add_argument(
'--tei-source-path',
type=str,
required=True
)
parser.add_argument(
'--raw-source-path',
type=str,
required=False
)
parser.add_argument(
'--delft-output-path',
type=str,
required=True
)
parser.add_argument(
'--debug',
action='store_true',
help='Enable debug logging'
)
return parser.parse_args(argv)
def translate_tags_IOB_to_grobid(tag: str) -> str:
"""
Convert labels from IOB2 to the ones used by GROBID (expected by the wapiti model)
"""
if tag == 'O':
# outside
return '<other>'
if tag.startswith('B-'):
# begin
return 'I-' + tag[2:]
if tag.startswith('I-'):
# inside
return '' + tag[2:]
return tag
def translate_tag_result_tags_IOB_to_grobid(
tag_result: Sequence[Sequence[Tuple[str, str]]]
) -> List[List[Tuple[str, str]]]:
return [
[
(token_text, translate_tags_IOB_to_grobid(tag))
for token_text, tag in doc_tag_result
]
for doc_tag_result in tag_result
]
def get_tag_result_for_labeled_layout_tokens_list(
labeled_layout_tokens_list: Sequence[Sequence[LabeledLayoutToken]]
) -> List[List[Tuple[str, str]]]:
return [
[
(
labeled_layout_token.layout_token.text,
labeled_layout_token.label
)
for labeled_layout_token in labeled_layout_tokens
]
for labeled_layout_tokens in labeled_layout_tokens_list
]
def get_raw_file_for_tei_file(
tei_file: str,
raw_source_path: str
) -> str:
compression_suffix = ''
if tei_file.endswith('.gz'):
compression_suffix = '.gz'
tei_file = tei_file[:-len(compression_suffix)]
tei_suffix = '.tei.xml'
assert tei_file.endswith(tei_suffix)
return os.path.join(
raw_source_path,
os.path.basename(tei_file[:-len(tei_suffix)] + compression_suffix)
)
def get_raw_file_list_for_tei_file_list(
tei_file_list: Iterable[str],
raw_source_path: str
) -> Sequence[str]:
return [
get_raw_file_for_tei_file(tei_file, raw_source_path=raw_source_path)
for tei_file in tei_file_list
]
def get_training_tei_parser_for_model_name(
model_name: str,
sciencebeam_parser: ScienceBeamParser
) -> TrainingTeiParser:
model = sciencebeam_parser.fulltext_models.get_sequence_model_by_name(model_name)
try:
training_tei_parser = model.get_training_tei_parser()
assert training_tei_parser is not None
return training_tei_parser
except NotImplementedError as exc:
training_tei_parser = None
raise RuntimeError('unsupported model: %r' % model_name) from exc
def get_data_generator_for_model_name(
model_name: str,
sciencebeam_parser: ScienceBeamParser
) -> ModelDataGenerator:
model = sciencebeam_parser.fulltext_models.get_sequence_model_by_name(model_name)
return model.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=sciencebeam_parser.app_features_context
)
)
def iter_generate_delft_training_data_lines_for_document( # pylint: disable=too-many-locals
tei_file: str,
raw_file: Optional[str],
training_tei_parser: TrainingTeiParser,
data_generator: ModelDataGenerator
) -> Iterable[str]:
with auto_download_input_file(
tei_file,
auto_decompress=True
) as local_tei_file:
tei_root = etree.parse(local_tei_file).getroot()
labeled_layout_tokens_list = (
training_tei_parser.parse_training_tei_to_labeled_layout_tokens_list(
tei_root
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
translated_tag_result = translate_tag_result_tags_IOB_to_grobid(
get_tag_result_for_labeled_layout_tokens_list(
labeled_layout_tokens_list
)
)
LOGGER.debug('translated_tag_result: %r', translated_tag_result)
if raw_file:
with auto_download_input_file(
raw_file,
auto_decompress=True
) as local_raw_file:
with open(local_raw_file, 'r', encoding='utf-8') as raw_fp:
texts, features = load_data_crf_lines(
raw_fp
)
assert len(texts) == len(translated_tag_result)
for doc_tokens, doc_tag_result in zip(texts, translated_tag_result):
assert len(doc_tokens) == len(doc_tag_result)
else:
layout_documents = [
LayoutDocument.for_blocks([
LayoutBlock.for_tokens([
labeled_layout_token.layout_token
for labeled_layout_token in labeled_layout_tokens
])
])
for labeled_layout_tokens in labeled_layout_tokens_list
]
LOGGER.debug('layout_documents: %r', layout_documents)
data_line_iterable = list(data_generator.iter_data_lines_for_layout_documents(
layout_documents
))
_texts, features = load_data_crf_lines(data_line_iterable)
LOGGER.debug('features: %r', features)
yield from iter_format_tag_result(
tag_result=translated_tag_result,
output_format=TagOutputFormats.DATA,
texts=None,
features=features
)
def generate_delft_training_data(
model_name: str,
tei_source_path: str,
raw_source_path: str,
delft_output_path: str,
sciencebeam_parser: ScienceBeamParser
):
training_tei_parser = get_training_tei_parser_for_model_name(
model_name,
sciencebeam_parser=sciencebeam_parser
)
data_generator = get_data_generator_for_model_name(
model_name,
sciencebeam_parser=sciencebeam_parser
)
LOGGER.debug('tei_source_path: %r', tei_source_path)
tei_file_list = glob(tei_source_path)
if not tei_file_list:
raise RuntimeError('no files found for file pattern %r' % tei_source_path)
LOGGER.info('tei_file_list: %r', tei_file_list)
if raw_source_path:
raw_file_list: Sequence[Optional[str]] = get_raw_file_list_for_tei_file_list(
tei_file_list,
raw_source_path=raw_source_path
)
else:
raw_file_list = [None] * len(tei_file_list)
LOGGER.info('raw_file_list: %r', raw_file_list)
LOGGER.info('writing to : %r', delft_output_path)
with auto_uploading_output_file(
delft_output_path,
mode='w',
encoding='utf-8',
) as data_fp:
for document_index, (tei_file, raw_file) in enumerate(zip(tei_file_list, raw_file_list)):
if document_index > 0:
data_fp.write('\n\n')
data_fp.writelines(iter_generate_delft_training_data_lines_for_document(
tei_file=tei_file,
raw_file=raw_file,
training_tei_parser=training_tei_parser,
data_generator=data_generator
))
def run(args: argparse.Namespace):
LOGGER.info('args: %r', args)
config = AppConfig.load_yaml(
DEFAULT_CONFIG_FILE
)
sciencebeam_parser = ScienceBeamParser.from_config(config)
generate_delft_training_data(
model_name=args.model_name,
tei_source_path=args.tei_source_path,
raw_source_path=args.raw_source_path,
delft_output_path=args.delft_output_path,
sciencebeam_parser=sciencebeam_parser
)
def main(argv: Optional[List[str]] = None):
LOGGER.debug('argv: %r', argv)
args = parse_args(argv)
if args.debug:
for name in [__name__, 'sciencebeam_parser', 'sciencebeam_trainer_delft']:
logging.getLogger(name).setLevel('DEBUG')
run(args)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/training/cli/generate_delft_data.py
|
generate_delft_data.py
|
import argparse
import logging
import os
from typing import Iterable, List, Optional, Sequence, Tuple
from lxml import etree
from sciencebeam_trainer_delft.utils.io import (
auto_download_input_file
)
from sciencebeam_trainer_delft.sequence_labelling.reader import (
load_data_crf_lines
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
iter_format_tag_result
)
from sciencebeam_parser.utils.io import (
auto_uploading_output_file,
glob
)
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutDocument
)
from sciencebeam_parser.models.data import (
DocumentFeaturesContext,
LabeledLayoutToken,
ModelDataGenerator
)
from sciencebeam_parser.models.training_data import TrainingTeiParser
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.app.parser import ScienceBeamParser
LOGGER = logging.getLogger(__name__)
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
'ScienceBeam Parser: Generate DELFT Training Data'
)
parser.add_argument(
'--model-name',
type=str,
required=True
)
parser.add_argument(
'--tei-source-path',
type=str,
required=True
)
parser.add_argument(
'--raw-source-path',
type=str,
required=False
)
parser.add_argument(
'--delft-output-path',
type=str,
required=True
)
parser.add_argument(
'--debug',
action='store_true',
help='Enable debug logging'
)
return parser.parse_args(argv)
def translate_tags_IOB_to_grobid(tag: str) -> str:
"""
Convert labels from IOB2 to the ones used by GROBID (expected by the wapiti model)
"""
if tag == 'O':
# outside
return '<other>'
if tag.startswith('B-'):
# begin
return 'I-' + tag[2:]
if tag.startswith('I-'):
# inside
return '' + tag[2:]
return tag
def translate_tag_result_tags_IOB_to_grobid(
tag_result: Sequence[Sequence[Tuple[str, str]]]
) -> List[List[Tuple[str, str]]]:
return [
[
(token_text, translate_tags_IOB_to_grobid(tag))
for token_text, tag in doc_tag_result
]
for doc_tag_result in tag_result
]
def get_tag_result_for_labeled_layout_tokens_list(
labeled_layout_tokens_list: Sequence[Sequence[LabeledLayoutToken]]
) -> List[List[Tuple[str, str]]]:
return [
[
(
labeled_layout_token.layout_token.text,
labeled_layout_token.label
)
for labeled_layout_token in labeled_layout_tokens
]
for labeled_layout_tokens in labeled_layout_tokens_list
]
def get_raw_file_for_tei_file(
tei_file: str,
raw_source_path: str
) -> str:
compression_suffix = ''
if tei_file.endswith('.gz'):
compression_suffix = '.gz'
tei_file = tei_file[:-len(compression_suffix)]
tei_suffix = '.tei.xml'
assert tei_file.endswith(tei_suffix)
return os.path.join(
raw_source_path,
os.path.basename(tei_file[:-len(tei_suffix)] + compression_suffix)
)
def get_raw_file_list_for_tei_file_list(
tei_file_list: Iterable[str],
raw_source_path: str
) -> Sequence[str]:
return [
get_raw_file_for_tei_file(tei_file, raw_source_path=raw_source_path)
for tei_file in tei_file_list
]
def get_training_tei_parser_for_model_name(
model_name: str,
sciencebeam_parser: ScienceBeamParser
) -> TrainingTeiParser:
model = sciencebeam_parser.fulltext_models.get_sequence_model_by_name(model_name)
try:
training_tei_parser = model.get_training_tei_parser()
assert training_tei_parser is not None
return training_tei_parser
except NotImplementedError as exc:
training_tei_parser = None
raise RuntimeError('unsupported model: %r' % model_name) from exc
def get_data_generator_for_model_name(
model_name: str,
sciencebeam_parser: ScienceBeamParser
) -> ModelDataGenerator:
model = sciencebeam_parser.fulltext_models.get_sequence_model_by_name(model_name)
return model.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=sciencebeam_parser.app_features_context
)
)
def iter_generate_delft_training_data_lines_for_document( # pylint: disable=too-many-locals
tei_file: str,
raw_file: Optional[str],
training_tei_parser: TrainingTeiParser,
data_generator: ModelDataGenerator
) -> Iterable[str]:
with auto_download_input_file(
tei_file,
auto_decompress=True
) as local_tei_file:
tei_root = etree.parse(local_tei_file).getroot()
labeled_layout_tokens_list = (
training_tei_parser.parse_training_tei_to_labeled_layout_tokens_list(
tei_root
)
)
LOGGER.debug('labeled_layout_tokens_list: %r', labeled_layout_tokens_list)
translated_tag_result = translate_tag_result_tags_IOB_to_grobid(
get_tag_result_for_labeled_layout_tokens_list(
labeled_layout_tokens_list
)
)
LOGGER.debug('translated_tag_result: %r', translated_tag_result)
if raw_file:
with auto_download_input_file(
raw_file,
auto_decompress=True
) as local_raw_file:
with open(local_raw_file, 'r', encoding='utf-8') as raw_fp:
texts, features = load_data_crf_lines(
raw_fp
)
assert len(texts) == len(translated_tag_result)
for doc_tokens, doc_tag_result in zip(texts, translated_tag_result):
assert len(doc_tokens) == len(doc_tag_result)
else:
layout_documents = [
LayoutDocument.for_blocks([
LayoutBlock.for_tokens([
labeled_layout_token.layout_token
for labeled_layout_token in labeled_layout_tokens
])
])
for labeled_layout_tokens in labeled_layout_tokens_list
]
LOGGER.debug('layout_documents: %r', layout_documents)
data_line_iterable = list(data_generator.iter_data_lines_for_layout_documents(
layout_documents
))
_texts, features = load_data_crf_lines(data_line_iterable)
LOGGER.debug('features: %r', features)
yield from iter_format_tag_result(
tag_result=translated_tag_result,
output_format=TagOutputFormats.DATA,
texts=None,
features=features
)
def generate_delft_training_data(
model_name: str,
tei_source_path: str,
raw_source_path: str,
delft_output_path: str,
sciencebeam_parser: ScienceBeamParser
):
training_tei_parser = get_training_tei_parser_for_model_name(
model_name,
sciencebeam_parser=sciencebeam_parser
)
data_generator = get_data_generator_for_model_name(
model_name,
sciencebeam_parser=sciencebeam_parser
)
LOGGER.debug('tei_source_path: %r', tei_source_path)
tei_file_list = glob(tei_source_path)
if not tei_file_list:
raise RuntimeError('no files found for file pattern %r' % tei_source_path)
LOGGER.info('tei_file_list: %r', tei_file_list)
if raw_source_path:
raw_file_list: Sequence[Optional[str]] = get_raw_file_list_for_tei_file_list(
tei_file_list,
raw_source_path=raw_source_path
)
else:
raw_file_list = [None] * len(tei_file_list)
LOGGER.info('raw_file_list: %r', raw_file_list)
LOGGER.info('writing to : %r', delft_output_path)
with auto_uploading_output_file(
delft_output_path,
mode='w',
encoding='utf-8',
) as data_fp:
for document_index, (tei_file, raw_file) in enumerate(zip(tei_file_list, raw_file_list)):
if document_index > 0:
data_fp.write('\n\n')
data_fp.writelines(iter_generate_delft_training_data_lines_for_document(
tei_file=tei_file,
raw_file=raw_file,
training_tei_parser=training_tei_parser,
data_generator=data_generator
))
def run(args: argparse.Namespace):
LOGGER.info('args: %r', args)
config = AppConfig.load_yaml(
DEFAULT_CONFIG_FILE
)
sciencebeam_parser = ScienceBeamParser.from_config(config)
generate_delft_training_data(
model_name=args.model_name,
tei_source_path=args.tei_source_path,
raw_source_path=args.raw_source_path,
delft_output_path=args.delft_output_path,
sciencebeam_parser=sciencebeam_parser
)
def main(argv: Optional[List[str]] = None):
LOGGER.debug('argv: %r', argv)
args = parse_args(argv)
if args.debug:
for name in [__name__, 'sciencebeam_parser', 'sciencebeam_trainer_delft']:
logging.getLogger(name).setLevel('DEBUG')
run(args)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
| 0.68215 | 0.190253 |
from abc import ABC, abstractmethod
import argparse
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Iterable, List, NamedTuple, Optional, Sequence
from lxml import etree
from sciencebeam_trainer_delft.utils.io import (
auto_download_input_file
)
from sciencebeam_parser.utils.io import glob, makedirs, write_bytes, write_text
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.document.semantic_document import (
SemanticMixedContentWrapper,
SemanticRawAffiliationAddress,
SemanticRawAuthors,
SemanticRawFigure,
SemanticRawReference,
SemanticRawReferenceText,
SemanticRawTable
)
from sciencebeam_parser.models.data import (
DocumentFeaturesContext,
LabeledLayoutModelData,
LayoutModelData
)
from sciencebeam_parser.models.model import (
LabeledLayoutToken,
LayoutDocumentLabelResult,
LayoutModelLabel,
Model,
iter_data_lines_for_model_data_iterables,
iter_labeled_layout_token_for_layout_model_label
)
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.app.parser import ScienceBeamParser
from sciencebeam_parser.utils.media_types import MediaTypes
LOGGER = logging.getLogger(__name__)
@dataclass
class ModelResultCache:
model_data_lists_by_key_map: Dict[
str, Sequence[Sequence[LabeledLayoutModelData]]
] = field(default_factory=dict)
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
'ScienceBeam Parser: Generate Training Data'
)
parser.add_argument(
'--source-path',
type=str,
required=True
)
parser.add_argument(
'--output-path',
type=str,
required=True
)
parser.add_argument(
'--limit',
type=int,
required=False
)
parser.add_argument(
'--use-model',
action='store_true',
help='Use configured models to pre-annotate training data'
)
parser.add_argument(
'--use-directory-structure',
action='store_true',
help='Output training data to a directory structure'
)
parser.add_argument(
'--gzip',
action='store_true',
help='Enable gzip compression for output files (with .gz suffix)'
)
parser.add_argument(
'--debug',
action='store_true',
help='Enable debug logging'
)
return parser.parse_args(argv)
def get_labeled_model_data_list_list(
model_data_list_list: Sequence[Sequence[LayoutModelData]],
model: Model
) -> Sequence[Sequence[LabeledLayoutModelData]]:
return list(
model.iter_labeled_model_data_list_for_model_data_list_iterable(
model_data_list_list
)
)
def get_labeled_model_data_list(
model_data_list: Sequence[LayoutModelData],
model: Model
) -> Sequence[LabeledLayoutModelData]:
return get_labeled_model_data_list_list(
[model_data_list],
model=model
)[0]
def get_labeled_model_data_list_for_layout_document(
layout_document: LayoutDocument,
model: Model,
document_features_context: DocumentFeaturesContext
) -> Sequence[LabeledLayoutModelData]:
data_generator = model.get_data_generator(
document_features_context=document_features_context
)
model_data_list: Sequence[LayoutModelData] = list(
data_generator.iter_model_data_for_layout_document(layout_document)
)
return get_labeled_model_data_list(
model_data_list,
model=model
)
def get_layout_model_label_for_labeled_model_data(
labeled_model_data: LabeledLayoutModelData
) -> LayoutModelLabel:
return LayoutModelLabel(
label=labeled_model_data.label or '',
label_token_text=labeled_model_data.label_token_text,
layout_line=labeled_model_data.layout_line,
layout_token=labeled_model_data.layout_token
)
def iter_layout_model_label_for_labeled_model_data_list(
labeled_model_data_iterable: Iterable[LabeledLayoutModelData],
) -> Iterable[LayoutModelLabel]:
return (
get_layout_model_label_for_labeled_model_data(labeled_model_data)
for labeled_model_data in labeled_model_data_iterable
)
def get_layout_document_label_result_for_labeled_model_data_list(
labeled_model_data_iterable: Iterable[LabeledLayoutModelData],
layout_document: LayoutDocument
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=iter_layout_model_label_for_labeled_model_data_list(
labeled_model_data_iterable
)
)
class TrainingDataDocumentContext(NamedTuple):
output_path: str
source_filename: str
document_features_context: DocumentFeaturesContext
fulltext_models: FullTextModels
use_model: bool
use_directory_structure: bool
model_result_cache: ModelResultCache
gzip_enabled: bool
@property
def source_name(self) -> str:
source_basename = os.path.basename(self.source_filename)
return os.path.splitext(source_basename)[0]
def iter_unlabeled_model_data_list_for_model_and_layout_documents(
model: Model,
model_layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
if not model_layout_documents:
return []
data_generator = model.get_data_generator(
document_features_context=document_context.document_features_context
)
return [
list(
data_generator.iter_model_data_for_layout_document(model_layout_document)
)
for model_layout_document in model_layout_documents
]
def iter_labeled_model_data_list_for_model_and_layout_documents(
model: Model,
model_layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LabeledLayoutModelData]]:
if not model_layout_documents:
return []
cache_key = f'{type(model).__name__}_{id(model)}'
LOGGER.debug('cache_key: %r', cache_key)
model_data_lists = document_context.model_result_cache.model_data_lists_by_key_map.get(
cache_key
)
if model_data_lists is not None:
return model_data_lists
unlabeled_model_data_lists = list(
iter_unlabeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
)
model_data_lists = get_labeled_model_data_list_list(
unlabeled_model_data_lists,
model=model
)
document_context.model_result_cache.model_data_lists_by_key_map[cache_key] = (
model_data_lists
)
return model_data_lists
def iter_model_data_list_for_model_and_layout_documents(
model: Model,
model_layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
if not document_context.use_model:
return iter_unlabeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
return iter_labeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
def get_labeled_layout_tokens_list_for_model_and_layout_documents(
model: Model,
layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Sequence[Sequence[LabeledLayoutToken]]:
model_data_lists = list(
iter_labeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=layout_documents,
document_context=document_context
)
)
assert len(model_data_lists) == len(layout_documents)
return [
list(iter_labeled_layout_token_for_layout_model_label(
iter_layout_model_label_for_labeled_model_data_list(
model_data_list
)
))
for model_data_list in model_data_lists
]
def get_labeled_layout_tokens_for_model_and_layout_document(
model: Model,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Sequence[LabeledLayoutToken]:
labeled_layout_tokens_list = get_labeled_layout_tokens_list_for_model_and_layout_documents(
model,
[layout_document],
document_context
)
assert len(labeled_layout_tokens_list) == 1
return labeled_layout_tokens_list[0]
def get_segmentation_label_result(
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> LayoutDocumentLabelResult:
segmentation_label_model_data_lists = list(
iter_labeled_model_data_list_for_model_and_layout_documents(
model=document_context.fulltext_models.segmentation_model,
model_layout_documents=[layout_document],
document_context=document_context
)
)
assert len(segmentation_label_model_data_lists) == 1
LOGGER.debug('segmentation_label_model_data_lists: %r', segmentation_label_model_data_lists)
return get_layout_document_label_result_for_labeled_model_data_list(
labeled_model_data_iterable=segmentation_label_model_data_lists[0],
layout_document=layout_document
)
class AbstractModelTrainingDataGenerator(ABC):
def get_pre_file_path_suffix(self) -> str:
return ''
def _get_file_path_with_suffix(
self,
suffix: Optional[str],
document_context: TrainingDataDocumentContext,
sub_directory: Optional[str] = None
) -> Optional[str]:
if not suffix:
return None
output_path = document_context.output_path
if sub_directory and document_context.use_directory_structure:
output_path = os.path.join(output_path, sub_directory)
if document_context.gzip_enabled:
suffix += '.gz'
return os.path.join(
output_path,
document_context.source_name + self.get_pre_file_path_suffix() + suffix
)
@abstractmethod
def get_tei_training_data_generator(
self,
document_context: TrainingDataDocumentContext
) -> TeiTrainingDataGenerator:
pass
@abstractmethod
def iter_model_data_list(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
return []
def get_default_tei_sub_directory(
self,
tei_training_data_generator: TeiTrainingDataGenerator
) -> Optional[str]:
return tei_training_data_generator.get_default_tei_sub_directory()
def generate_data_for_layout_document(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
):
tei_training_data_generator = self.get_tei_training_data_generator(document_context)
tei_file_path = self._get_file_path_with_suffix(
tei_training_data_generator.get_default_tei_filename_suffix(),
document_context=document_context,
sub_directory=self.get_default_tei_sub_directory(tei_training_data_generator)
)
data_file_path = self._get_file_path_with_suffix(
tei_training_data_generator.get_default_data_filename_suffix(),
document_context=document_context,
sub_directory=tei_training_data_generator.get_default_data_sub_directory()
)
assert tei_file_path
model_data_list_list = list(self.iter_model_data_list(
layout_document=layout_document,
document_context=document_context
))
if not model_data_list_list:
LOGGER.info('no entities found, skipping (%r)', tei_file_path)
return
training_tei_root = (
tei_training_data_generator
.get_training_tei_xml_for_multiple_model_data_iterables(
model_data_list_list
)
)
LOGGER.info('writing training tei to: %r', tei_file_path)
write_bytes(
tei_file_path,
etree.tostring(training_tei_root, pretty_print=True)
)
if data_file_path:
LOGGER.info('writing training raw data to: %r', data_file_path)
write_text(
data_file_path,
'\n'.join(
iter_data_lines_for_model_data_iterables(model_data_list_list)
),
encoding='utf-8'
)
class AbstractDocumentModelTrainingDataGenerator(AbstractModelTrainingDataGenerator):
@abstractmethod
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
pass
def get_tei_training_data_generator(
self,
document_context: TrainingDataDocumentContext
) -> TeiTrainingDataGenerator:
return self.get_main_model(document_context).get_tei_training_data_generator()
@abstractmethod
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
pass
def iter_model_data_list(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
model = self.get_main_model(document_context)
model_layout_documents = list(self.iter_model_layout_documents(
layout_document,
document_context=document_context
))
return iter_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
class SegmentationModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.segmentation_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
return [layout_document]
class HeaderModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.header_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
LOGGER.debug('segmentation_label_result: %r', segmentation_label_result)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
LOGGER.debug('header_layout_document: %r', header_layout_document)
if not header_layout_document.pages:
return []
return [header_layout_document]
class AffiliationAddressModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.affiliation_address_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
header_model = document_context.fulltext_models.header_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
LOGGER.debug('header_layout_document: %r', header_layout_document)
if not header_layout_document.pages:
return []
header_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=header_model,
layout_document=header_layout_document,
document_context=document_context
)
semantic_raw_aff_address_list = list(
SemanticMixedContentWrapper(list(
header_model.iter_semantic_content_for_labeled_layout_tokens(
header_labeled_layout_tokens
)
)).iter_by_type(SemanticRawAffiliationAddress)
)
LOGGER.info('semantic_raw_aff_address_list count: %d', len(semantic_raw_aff_address_list))
if not semantic_raw_aff_address_list:
return []
return [
LayoutDocument.for_blocks(
list(semantic_raw_aff_address.iter_blocks())
)
for semantic_raw_aff_address in semantic_raw_aff_address_list
]
class NameHeaderModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.name_header_model
def get_default_tei_sub_directory(
self,
tei_training_data_generator: TeiTrainingDataGenerator
) -> str:
return 'name/header/corpus'
def get_pre_file_path_suffix(self) -> str:
return '.header'
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
header_model = document_context.fulltext_models.header_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
LOGGER.debug('header_layout_document: %r', header_layout_document)
if not header_layout_document.pages:
return []
header_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=header_model,
layout_document=header_layout_document,
document_context=document_context
)
semantic_raw_author_list = list(
SemanticMixedContentWrapper(list(
header_model.iter_semantic_content_for_labeled_layout_tokens(
header_labeled_layout_tokens
)
)).iter_by_type(SemanticRawAuthors)
)
LOGGER.info('semantic_raw_author_list count: %d', len(semantic_raw_author_list))
if not semantic_raw_author_list:
return []
return [
LayoutDocument.for_blocks([
block
for semantic_raw_author in semantic_raw_author_list
for block in semantic_raw_author.iter_blocks()
])
]
class NameCitationModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.name_citation_model
def get_default_tei_sub_directory(
self,
tei_training_data_generator: TeiTrainingDataGenerator
) -> str:
return 'name/citation/corpus'
def get_pre_file_path_suffix(self) -> str:
return '.citations'
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
reference_segmenter_model = document_context.fulltext_models.reference_segmenter_model
citation_model = document_context.fulltext_models.citation_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
references_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
reference_segmenter_labeled_layout_tokens = (
get_labeled_layout_tokens_for_model_and_layout_document(
model=reference_segmenter_model,
layout_document=references_layout_document,
document_context=document_context
)
)
raw_reference_text_list = [
raw_reference_text
for raw_reference in SemanticMixedContentWrapper(list(
reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
reference_segmenter_labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
for raw_reference_text in raw_reference.iter_by_type(SemanticRawReferenceText)
]
LOGGER.info('raw_reference_text_list count: %d', len(raw_reference_text_list))
if not raw_reference_text_list:
return []
citation_layout_documents = [
LayoutDocument.for_blocks(
list(semantic_raw_reference_text.iter_blocks())
)
for semantic_raw_reference_text in raw_reference_text_list
]
citation_labeled_layout_tokens_list = (
get_labeled_layout_tokens_list_for_model_and_layout_documents(
model=citation_model,
layout_documents=citation_layout_documents,
document_context=document_context
)
)
semantic_raw_author_list = [
raw_author
for citation_labeled_layout_tokens in citation_labeled_layout_tokens_list
for raw_author in SemanticMixedContentWrapper(list(
citation_model.iter_semantic_content_for_labeled_layout_tokens(
citation_labeled_layout_tokens
)
)).iter_by_type_recursively(SemanticRawAuthors)
]
LOGGER.info('semantic_raw_author_list count: %d', len(semantic_raw_author_list))
if not semantic_raw_author_list:
return []
return [
LayoutDocument.for_blocks([
block
for semantic_raw_author in semantic_raw_author_list
for block in semantic_raw_author.iter_blocks()
])
]
class FullTextModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.fulltext_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
body_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<body>'
).remove_empty_blocks()
if not body_layout_document.pages:
return []
return [body_layout_document]
class FigureModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.figure_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
fulltext_model = document_context.fulltext_models.fulltext_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
body_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<body>'
).remove_empty_blocks()
if not body_layout_document.pages:
return []
fulltext_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=fulltext_model,
layout_document=body_layout_document,
document_context=document_context
)
raw_figure_list = list(
SemanticMixedContentWrapper(list(
fulltext_model.iter_semantic_content_for_labeled_layout_tokens(
fulltext_labeled_layout_tokens
)
)).iter_by_type_recursively(SemanticRawFigure)
)
LOGGER.info('raw_figure_list count: %d', len(raw_figure_list))
if not raw_figure_list:
return []
return [
LayoutDocument.for_blocks(list(raw_figure.iter_blocks()))
for raw_figure in raw_figure_list
]
class TableModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.table_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
fulltext_model = document_context.fulltext_models.fulltext_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
body_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<body>'
).remove_empty_blocks()
if not body_layout_document.pages:
return []
fulltext_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=fulltext_model,
layout_document=body_layout_document,
document_context=document_context
)
raw_table_list = list(
SemanticMixedContentWrapper(list(
fulltext_model.iter_semantic_content_for_labeled_layout_tokens(
fulltext_labeled_layout_tokens
)
)).iter_by_type_recursively(SemanticRawTable)
)
LOGGER.info('raw_table_list count: %d', len(raw_table_list))
if not raw_table_list:
return []
return [
LayoutDocument.for_blocks(list(raw_table.iter_blocks()))
for raw_table in raw_table_list
]
class ReferenceSegmenterModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.reference_segmenter_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
ref_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
if not ref_layout_document.pages:
return []
return [ref_layout_document]
class CitationModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.citation_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
reference_segmenter_model = document_context.fulltext_models.reference_segmenter_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
references_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
reference_segmenter_labeled_layout_tokens = (
get_labeled_layout_tokens_for_model_and_layout_document(
model=reference_segmenter_model,
layout_document=references_layout_document,
document_context=document_context
)
)
raw_reference_text_list = [
raw_reference_text
for raw_reference in SemanticMixedContentWrapper(list(
reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
reference_segmenter_labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
for raw_reference_text in raw_reference.iter_by_type(SemanticRawReferenceText)
]
LOGGER.info('raw_reference_text_list count: %d', len(raw_reference_text_list))
if not raw_reference_text_list:
return []
return [
LayoutDocument.for_blocks(
list(semantic_raw_reference_text.iter_blocks())
)
for semantic_raw_reference_text in raw_reference_text_list
]
def generate_training_data_for_layout_document(
layout_document: LayoutDocument,
output_path: str,
source_filename: str,
document_features_context: DocumentFeaturesContext,
fulltext_models: FullTextModels,
use_model: bool,
use_directory_structure: bool,
gzip_enabled: bool = False
):
model_result_cache = ModelResultCache()
document_context = TrainingDataDocumentContext(
output_path=output_path,
source_filename=source_filename,
document_features_context=document_features_context,
fulltext_models=fulltext_models,
use_model=use_model,
use_directory_structure=use_directory_structure,
model_result_cache=model_result_cache,
gzip_enabled=gzip_enabled
)
training_data_generators = [
SegmentationModelTrainingDataGenerator(),
HeaderModelTrainingDataGenerator(),
AffiliationAddressModelTrainingDataGenerator(),
NameHeaderModelTrainingDataGenerator(),
FullTextModelTrainingDataGenerator(),
FigureModelTrainingDataGenerator(),
TableModelTrainingDataGenerator(),
ReferenceSegmenterModelTrainingDataGenerator(),
CitationModelTrainingDataGenerator(),
NameCitationModelTrainingDataGenerator()
]
for training_data_generator in training_data_generators:
training_data_generator.generate_data_for_layout_document(
layout_document=layout_document,
document_context=document_context
)
def get_layout_document_for_source_filename(
source_filename: str,
sciencebeam_parser: ScienceBeamParser,
) -> LayoutDocument:
with sciencebeam_parser.get_new_session() as session:
with auto_download_input_file(
source_filename,
auto_decompress=True
) as local_source_filename:
source = session.get_source(local_source_filename, MediaTypes.PDF)
layout_document = source.get_layout_document()
return layout_document
def generate_training_data_for_source_filename(
source_filename: str,
output_path: str,
sciencebeam_parser: ScienceBeamParser,
use_model: bool,
use_directory_structure: bool,
gzip_enabled: bool
):
LOGGER.debug('use_model: %r', use_model)
layout_document = get_layout_document_for_source_filename(
source_filename,
sciencebeam_parser=sciencebeam_parser
)
generate_training_data_for_layout_document(
layout_document=layout_document,
output_path=output_path,
source_filename=source_filename,
document_features_context=DocumentFeaturesContext(
sciencebeam_parser.app_features_context
),
fulltext_models=sciencebeam_parser.fulltext_models,
use_model=use_model,
use_directory_structure=use_directory_structure,
gzip_enabled=gzip_enabled
)
def get_source_file_list_or_fail(
source_path_pattern: str
) -> Sequence[str]:
source_file_list = list(glob(source_path_pattern))
if not source_file_list:
raise FileNotFoundError('no files found for file pattern: %r' % source_path_pattern)
return source_file_list
def run(args: argparse.Namespace):
LOGGER.info('args: %r', args)
source_file_list = get_source_file_list_or_fail(args.source_path)
if args.limit:
source_file_list = source_file_list[:args.limit]
LOGGER.info('source files: %d', len(source_file_list))
output_path = args.output_path
config = AppConfig.load_yaml(
DEFAULT_CONFIG_FILE
)
sciencebeam_parser = ScienceBeamParser.from_config(config)
LOGGER.info('output_path: %r', output_path)
# Note: creating the directory may not be necessary, but provides early feedback
makedirs(output_path, exist_ok=True)
for source_filename in source_file_list:
generate_training_data_for_source_filename(
source_filename,
output_path=output_path,
sciencebeam_parser=sciencebeam_parser,
use_model=args.use_model,
use_directory_structure=args.use_directory_structure,
gzip_enabled=args.gzip
)
def main(argv: Optional[List[str]] = None):
LOGGER.debug('argv: %r', argv)
args = parse_args(argv)
if args.debug:
for name in [__name__, 'sciencebeam_parser', 'sciencebeam_trainer_delft']:
logging.getLogger(name).setLevel('DEBUG')
run(args)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/training/cli/generate_data.py
|
generate_data.py
|
from abc import ABC, abstractmethod
import argparse
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Iterable, List, NamedTuple, Optional, Sequence
from lxml import etree
from sciencebeam_trainer_delft.utils.io import (
auto_download_input_file
)
from sciencebeam_parser.utils.io import glob, makedirs, write_bytes, write_text
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.document.semantic_document import (
SemanticMixedContentWrapper,
SemanticRawAffiliationAddress,
SemanticRawAuthors,
SemanticRawFigure,
SemanticRawReference,
SemanticRawReferenceText,
SemanticRawTable
)
from sciencebeam_parser.models.data import (
DocumentFeaturesContext,
LabeledLayoutModelData,
LayoutModelData
)
from sciencebeam_parser.models.model import (
LabeledLayoutToken,
LayoutDocumentLabelResult,
LayoutModelLabel,
Model,
iter_data_lines_for_model_data_iterables,
iter_labeled_layout_token_for_layout_model_label
)
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator
from sciencebeam_parser.processors.fulltext.models import FullTextModels
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.app.parser import ScienceBeamParser
from sciencebeam_parser.utils.media_types import MediaTypes
LOGGER = logging.getLogger(__name__)
@dataclass
class ModelResultCache:
model_data_lists_by_key_map: Dict[
str, Sequence[Sequence[LabeledLayoutModelData]]
] = field(default_factory=dict)
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
'ScienceBeam Parser: Generate Training Data'
)
parser.add_argument(
'--source-path',
type=str,
required=True
)
parser.add_argument(
'--output-path',
type=str,
required=True
)
parser.add_argument(
'--limit',
type=int,
required=False
)
parser.add_argument(
'--use-model',
action='store_true',
help='Use configured models to pre-annotate training data'
)
parser.add_argument(
'--use-directory-structure',
action='store_true',
help='Output training data to a directory structure'
)
parser.add_argument(
'--gzip',
action='store_true',
help='Enable gzip compression for output files (with .gz suffix)'
)
parser.add_argument(
'--debug',
action='store_true',
help='Enable debug logging'
)
return parser.parse_args(argv)
def get_labeled_model_data_list_list(
model_data_list_list: Sequence[Sequence[LayoutModelData]],
model: Model
) -> Sequence[Sequence[LabeledLayoutModelData]]:
return list(
model.iter_labeled_model_data_list_for_model_data_list_iterable(
model_data_list_list
)
)
def get_labeled_model_data_list(
model_data_list: Sequence[LayoutModelData],
model: Model
) -> Sequence[LabeledLayoutModelData]:
return get_labeled_model_data_list_list(
[model_data_list],
model=model
)[0]
def get_labeled_model_data_list_for_layout_document(
layout_document: LayoutDocument,
model: Model,
document_features_context: DocumentFeaturesContext
) -> Sequence[LabeledLayoutModelData]:
data_generator = model.get_data_generator(
document_features_context=document_features_context
)
model_data_list: Sequence[LayoutModelData] = list(
data_generator.iter_model_data_for_layout_document(layout_document)
)
return get_labeled_model_data_list(
model_data_list,
model=model
)
def get_layout_model_label_for_labeled_model_data(
labeled_model_data: LabeledLayoutModelData
) -> LayoutModelLabel:
return LayoutModelLabel(
label=labeled_model_data.label or '',
label_token_text=labeled_model_data.label_token_text,
layout_line=labeled_model_data.layout_line,
layout_token=labeled_model_data.layout_token
)
def iter_layout_model_label_for_labeled_model_data_list(
labeled_model_data_iterable: Iterable[LabeledLayoutModelData],
) -> Iterable[LayoutModelLabel]:
return (
get_layout_model_label_for_labeled_model_data(labeled_model_data)
for labeled_model_data in labeled_model_data_iterable
)
def get_layout_document_label_result_for_labeled_model_data_list(
labeled_model_data_iterable: Iterable[LabeledLayoutModelData],
layout_document: LayoutDocument
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=iter_layout_model_label_for_labeled_model_data_list(
labeled_model_data_iterable
)
)
class TrainingDataDocumentContext(NamedTuple):
output_path: str
source_filename: str
document_features_context: DocumentFeaturesContext
fulltext_models: FullTextModels
use_model: bool
use_directory_structure: bool
model_result_cache: ModelResultCache
gzip_enabled: bool
@property
def source_name(self) -> str:
source_basename = os.path.basename(self.source_filename)
return os.path.splitext(source_basename)[0]
def iter_unlabeled_model_data_list_for_model_and_layout_documents(
model: Model,
model_layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
if not model_layout_documents:
return []
data_generator = model.get_data_generator(
document_features_context=document_context.document_features_context
)
return [
list(
data_generator.iter_model_data_for_layout_document(model_layout_document)
)
for model_layout_document in model_layout_documents
]
def iter_labeled_model_data_list_for_model_and_layout_documents(
model: Model,
model_layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LabeledLayoutModelData]]:
if not model_layout_documents:
return []
cache_key = f'{type(model).__name__}_{id(model)}'
LOGGER.debug('cache_key: %r', cache_key)
model_data_lists = document_context.model_result_cache.model_data_lists_by_key_map.get(
cache_key
)
if model_data_lists is not None:
return model_data_lists
unlabeled_model_data_lists = list(
iter_unlabeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
)
model_data_lists = get_labeled_model_data_list_list(
unlabeled_model_data_lists,
model=model
)
document_context.model_result_cache.model_data_lists_by_key_map[cache_key] = (
model_data_lists
)
return model_data_lists
def iter_model_data_list_for_model_and_layout_documents(
model: Model,
model_layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
if not document_context.use_model:
return iter_unlabeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
return iter_labeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
def get_labeled_layout_tokens_list_for_model_and_layout_documents(
model: Model,
layout_documents: Sequence[LayoutDocument],
document_context: TrainingDataDocumentContext
) -> Sequence[Sequence[LabeledLayoutToken]]:
model_data_lists = list(
iter_labeled_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=layout_documents,
document_context=document_context
)
)
assert len(model_data_lists) == len(layout_documents)
return [
list(iter_labeled_layout_token_for_layout_model_label(
iter_layout_model_label_for_labeled_model_data_list(
model_data_list
)
))
for model_data_list in model_data_lists
]
def get_labeled_layout_tokens_for_model_and_layout_document(
model: Model,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Sequence[LabeledLayoutToken]:
labeled_layout_tokens_list = get_labeled_layout_tokens_list_for_model_and_layout_documents(
model,
[layout_document],
document_context
)
assert len(labeled_layout_tokens_list) == 1
return labeled_layout_tokens_list[0]
def get_segmentation_label_result(
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> LayoutDocumentLabelResult:
segmentation_label_model_data_lists = list(
iter_labeled_model_data_list_for_model_and_layout_documents(
model=document_context.fulltext_models.segmentation_model,
model_layout_documents=[layout_document],
document_context=document_context
)
)
assert len(segmentation_label_model_data_lists) == 1
LOGGER.debug('segmentation_label_model_data_lists: %r', segmentation_label_model_data_lists)
return get_layout_document_label_result_for_labeled_model_data_list(
labeled_model_data_iterable=segmentation_label_model_data_lists[0],
layout_document=layout_document
)
class AbstractModelTrainingDataGenerator(ABC):
def get_pre_file_path_suffix(self) -> str:
return ''
def _get_file_path_with_suffix(
self,
suffix: Optional[str],
document_context: TrainingDataDocumentContext,
sub_directory: Optional[str] = None
) -> Optional[str]:
if not suffix:
return None
output_path = document_context.output_path
if sub_directory and document_context.use_directory_structure:
output_path = os.path.join(output_path, sub_directory)
if document_context.gzip_enabled:
suffix += '.gz'
return os.path.join(
output_path,
document_context.source_name + self.get_pre_file_path_suffix() + suffix
)
@abstractmethod
def get_tei_training_data_generator(
self,
document_context: TrainingDataDocumentContext
) -> TeiTrainingDataGenerator:
pass
@abstractmethod
def iter_model_data_list(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
return []
def get_default_tei_sub_directory(
self,
tei_training_data_generator: TeiTrainingDataGenerator
) -> Optional[str]:
return tei_training_data_generator.get_default_tei_sub_directory()
def generate_data_for_layout_document(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
):
tei_training_data_generator = self.get_tei_training_data_generator(document_context)
tei_file_path = self._get_file_path_with_suffix(
tei_training_data_generator.get_default_tei_filename_suffix(),
document_context=document_context,
sub_directory=self.get_default_tei_sub_directory(tei_training_data_generator)
)
data_file_path = self._get_file_path_with_suffix(
tei_training_data_generator.get_default_data_filename_suffix(),
document_context=document_context,
sub_directory=tei_training_data_generator.get_default_data_sub_directory()
)
assert tei_file_path
model_data_list_list = list(self.iter_model_data_list(
layout_document=layout_document,
document_context=document_context
))
if not model_data_list_list:
LOGGER.info('no entities found, skipping (%r)', tei_file_path)
return
training_tei_root = (
tei_training_data_generator
.get_training_tei_xml_for_multiple_model_data_iterables(
model_data_list_list
)
)
LOGGER.info('writing training tei to: %r', tei_file_path)
write_bytes(
tei_file_path,
etree.tostring(training_tei_root, pretty_print=True)
)
if data_file_path:
LOGGER.info('writing training raw data to: %r', data_file_path)
write_text(
data_file_path,
'\n'.join(
iter_data_lines_for_model_data_iterables(model_data_list_list)
),
encoding='utf-8'
)
class AbstractDocumentModelTrainingDataGenerator(AbstractModelTrainingDataGenerator):
@abstractmethod
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
pass
def get_tei_training_data_generator(
self,
document_context: TrainingDataDocumentContext
) -> TeiTrainingDataGenerator:
return self.get_main_model(document_context).get_tei_training_data_generator()
@abstractmethod
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
pass
def iter_model_data_list(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[Sequence[LayoutModelData]]:
model = self.get_main_model(document_context)
model_layout_documents = list(self.iter_model_layout_documents(
layout_document,
document_context=document_context
))
return iter_model_data_list_for_model_and_layout_documents(
model=model,
model_layout_documents=model_layout_documents,
document_context=document_context
)
class SegmentationModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.segmentation_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
return [layout_document]
class HeaderModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.header_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
LOGGER.debug('segmentation_label_result: %r', segmentation_label_result)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
LOGGER.debug('header_layout_document: %r', header_layout_document)
if not header_layout_document.pages:
return []
return [header_layout_document]
class AffiliationAddressModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.affiliation_address_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
header_model = document_context.fulltext_models.header_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
LOGGER.debug('header_layout_document: %r', header_layout_document)
if not header_layout_document.pages:
return []
header_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=header_model,
layout_document=header_layout_document,
document_context=document_context
)
semantic_raw_aff_address_list = list(
SemanticMixedContentWrapper(list(
header_model.iter_semantic_content_for_labeled_layout_tokens(
header_labeled_layout_tokens
)
)).iter_by_type(SemanticRawAffiliationAddress)
)
LOGGER.info('semantic_raw_aff_address_list count: %d', len(semantic_raw_aff_address_list))
if not semantic_raw_aff_address_list:
return []
return [
LayoutDocument.for_blocks(
list(semantic_raw_aff_address.iter_blocks())
)
for semantic_raw_aff_address in semantic_raw_aff_address_list
]
class NameHeaderModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.name_header_model
def get_default_tei_sub_directory(
self,
tei_training_data_generator: TeiTrainingDataGenerator
) -> str:
return 'name/header/corpus'
def get_pre_file_path_suffix(self) -> str:
return '.header'
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
header_model = document_context.fulltext_models.header_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
header_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<header>'
).remove_empty_blocks()
LOGGER.debug('header_layout_document: %r', header_layout_document)
if not header_layout_document.pages:
return []
header_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=header_model,
layout_document=header_layout_document,
document_context=document_context
)
semantic_raw_author_list = list(
SemanticMixedContentWrapper(list(
header_model.iter_semantic_content_for_labeled_layout_tokens(
header_labeled_layout_tokens
)
)).iter_by_type(SemanticRawAuthors)
)
LOGGER.info('semantic_raw_author_list count: %d', len(semantic_raw_author_list))
if not semantic_raw_author_list:
return []
return [
LayoutDocument.for_blocks([
block
for semantic_raw_author in semantic_raw_author_list
for block in semantic_raw_author.iter_blocks()
])
]
class NameCitationModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.name_citation_model
def get_default_tei_sub_directory(
self,
tei_training_data_generator: TeiTrainingDataGenerator
) -> str:
return 'name/citation/corpus'
def get_pre_file_path_suffix(self) -> str:
return '.citations'
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
reference_segmenter_model = document_context.fulltext_models.reference_segmenter_model
citation_model = document_context.fulltext_models.citation_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
references_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
reference_segmenter_labeled_layout_tokens = (
get_labeled_layout_tokens_for_model_and_layout_document(
model=reference_segmenter_model,
layout_document=references_layout_document,
document_context=document_context
)
)
raw_reference_text_list = [
raw_reference_text
for raw_reference in SemanticMixedContentWrapper(list(
reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
reference_segmenter_labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
for raw_reference_text in raw_reference.iter_by_type(SemanticRawReferenceText)
]
LOGGER.info('raw_reference_text_list count: %d', len(raw_reference_text_list))
if not raw_reference_text_list:
return []
citation_layout_documents = [
LayoutDocument.for_blocks(
list(semantic_raw_reference_text.iter_blocks())
)
for semantic_raw_reference_text in raw_reference_text_list
]
citation_labeled_layout_tokens_list = (
get_labeled_layout_tokens_list_for_model_and_layout_documents(
model=citation_model,
layout_documents=citation_layout_documents,
document_context=document_context
)
)
semantic_raw_author_list = [
raw_author
for citation_labeled_layout_tokens in citation_labeled_layout_tokens_list
for raw_author in SemanticMixedContentWrapper(list(
citation_model.iter_semantic_content_for_labeled_layout_tokens(
citation_labeled_layout_tokens
)
)).iter_by_type_recursively(SemanticRawAuthors)
]
LOGGER.info('semantic_raw_author_list count: %d', len(semantic_raw_author_list))
if not semantic_raw_author_list:
return []
return [
LayoutDocument.for_blocks([
block
for semantic_raw_author in semantic_raw_author_list
for block in semantic_raw_author.iter_blocks()
])
]
class FullTextModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.fulltext_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
body_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<body>'
).remove_empty_blocks()
if not body_layout_document.pages:
return []
return [body_layout_document]
class FigureModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.figure_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
fulltext_model = document_context.fulltext_models.fulltext_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
body_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<body>'
).remove_empty_blocks()
if not body_layout_document.pages:
return []
fulltext_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=fulltext_model,
layout_document=body_layout_document,
document_context=document_context
)
raw_figure_list = list(
SemanticMixedContentWrapper(list(
fulltext_model.iter_semantic_content_for_labeled_layout_tokens(
fulltext_labeled_layout_tokens
)
)).iter_by_type_recursively(SemanticRawFigure)
)
LOGGER.info('raw_figure_list count: %d', len(raw_figure_list))
if not raw_figure_list:
return []
return [
LayoutDocument.for_blocks(list(raw_figure.iter_blocks()))
for raw_figure in raw_figure_list
]
class TableModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.table_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
fulltext_model = document_context.fulltext_models.fulltext_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
body_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<body>'
).remove_empty_blocks()
if not body_layout_document.pages:
return []
fulltext_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(
model=fulltext_model,
layout_document=body_layout_document,
document_context=document_context
)
raw_table_list = list(
SemanticMixedContentWrapper(list(
fulltext_model.iter_semantic_content_for_labeled_layout_tokens(
fulltext_labeled_layout_tokens
)
)).iter_by_type_recursively(SemanticRawTable)
)
LOGGER.info('raw_table_list count: %d', len(raw_table_list))
if not raw_table_list:
return []
return [
LayoutDocument.for_blocks(list(raw_table.iter_blocks()))
for raw_table in raw_table_list
]
class ReferenceSegmenterModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.reference_segmenter_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
ref_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
if not ref_layout_document.pages:
return []
return [ref_layout_document]
class CitationModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.citation_model
def iter_model_layout_documents(
self,
layout_document: LayoutDocument,
document_context: TrainingDataDocumentContext
) -> Iterable[LayoutDocument]:
reference_segmenter_model = document_context.fulltext_models.reference_segmenter_model
segmentation_label_result = get_segmentation_label_result(
layout_document,
document_context=document_context
)
references_layout_document = segmentation_label_result.get_filtered_document_by_label(
'<references>'
).remove_empty_blocks()
reference_segmenter_labeled_layout_tokens = (
get_labeled_layout_tokens_for_model_and_layout_document(
model=reference_segmenter_model,
layout_document=references_layout_document,
document_context=document_context
)
)
raw_reference_text_list = [
raw_reference_text
for raw_reference in SemanticMixedContentWrapper(list(
reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
reference_segmenter_labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
for raw_reference_text in raw_reference.iter_by_type(SemanticRawReferenceText)
]
LOGGER.info('raw_reference_text_list count: %d', len(raw_reference_text_list))
if not raw_reference_text_list:
return []
return [
LayoutDocument.for_blocks(
list(semantic_raw_reference_text.iter_blocks())
)
for semantic_raw_reference_text in raw_reference_text_list
]
def generate_training_data_for_layout_document(
layout_document: LayoutDocument,
output_path: str,
source_filename: str,
document_features_context: DocumentFeaturesContext,
fulltext_models: FullTextModels,
use_model: bool,
use_directory_structure: bool,
gzip_enabled: bool = False
):
model_result_cache = ModelResultCache()
document_context = TrainingDataDocumentContext(
output_path=output_path,
source_filename=source_filename,
document_features_context=document_features_context,
fulltext_models=fulltext_models,
use_model=use_model,
use_directory_structure=use_directory_structure,
model_result_cache=model_result_cache,
gzip_enabled=gzip_enabled
)
training_data_generators = [
SegmentationModelTrainingDataGenerator(),
HeaderModelTrainingDataGenerator(),
AffiliationAddressModelTrainingDataGenerator(),
NameHeaderModelTrainingDataGenerator(),
FullTextModelTrainingDataGenerator(),
FigureModelTrainingDataGenerator(),
TableModelTrainingDataGenerator(),
ReferenceSegmenterModelTrainingDataGenerator(),
CitationModelTrainingDataGenerator(),
NameCitationModelTrainingDataGenerator()
]
for training_data_generator in training_data_generators:
training_data_generator.generate_data_for_layout_document(
layout_document=layout_document,
document_context=document_context
)
def get_layout_document_for_source_filename(
source_filename: str,
sciencebeam_parser: ScienceBeamParser,
) -> LayoutDocument:
with sciencebeam_parser.get_new_session() as session:
with auto_download_input_file(
source_filename,
auto_decompress=True
) as local_source_filename:
source = session.get_source(local_source_filename, MediaTypes.PDF)
layout_document = source.get_layout_document()
return layout_document
def generate_training_data_for_source_filename(
source_filename: str,
output_path: str,
sciencebeam_parser: ScienceBeamParser,
use_model: bool,
use_directory_structure: bool,
gzip_enabled: bool
):
LOGGER.debug('use_model: %r', use_model)
layout_document = get_layout_document_for_source_filename(
source_filename,
sciencebeam_parser=sciencebeam_parser
)
generate_training_data_for_layout_document(
layout_document=layout_document,
output_path=output_path,
source_filename=source_filename,
document_features_context=DocumentFeaturesContext(
sciencebeam_parser.app_features_context
),
fulltext_models=sciencebeam_parser.fulltext_models,
use_model=use_model,
use_directory_structure=use_directory_structure,
gzip_enabled=gzip_enabled
)
def get_source_file_list_or_fail(
source_path_pattern: str
) -> Sequence[str]:
source_file_list = list(glob(source_path_pattern))
if not source_file_list:
raise FileNotFoundError('no files found for file pattern: %r' % source_path_pattern)
return source_file_list
def run(args: argparse.Namespace):
LOGGER.info('args: %r', args)
source_file_list = get_source_file_list_or_fail(args.source_path)
if args.limit:
source_file_list = source_file_list[:args.limit]
LOGGER.info('source files: %d', len(source_file_list))
output_path = args.output_path
config = AppConfig.load_yaml(
DEFAULT_CONFIG_FILE
)
sciencebeam_parser = ScienceBeamParser.from_config(config)
LOGGER.info('output_path: %r', output_path)
# Note: creating the directory may not be necessary, but provides early feedback
makedirs(output_path, exist_ok=True)
for source_filename in source_file_list:
generate_training_data_for_source_filename(
source_filename,
output_path=output_path,
sciencebeam_parser=sciencebeam_parser,
use_model=args.use_model,
use_directory_structure=args.use_directory_structure,
gzip_enabled=args.gzip
)
def main(argv: Optional[List[str]] = None):
LOGGER.debug('argv: %r', argv)
args = parse_args(argv)
if args.debug:
for name in [__name__, 'sciencebeam_parser', 'sciencebeam_trainer_delft']:
logging.getLogger(name).setLevel('DEBUG')
run(args)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
| 0.804866 | 0.120232 |
import logging
from typing import Any, Mapping, Optional, Union
from lxml import etree
LOGGER = logging.getLogger(__name__)
T_XSLT_Input = Union[etree.ElementBase, etree.ElementTree]
class XsltTransformerWrapper:
def __init__(
self,
xslt_template: str,
xslt_template_parameters: Optional[Mapping[str, Any]] = None
):
self.xslt_template = xslt_template
if xslt_template_parameters is None:
xslt_template_parameters = {}
self.xslt_template_parameters = xslt_template_parameters
self.__transformer: Optional[etree.XSLT] = None
# validate the XSLT stylesheet
etree.fromstring(self.xslt_template)
@staticmethod
def from_template_string(xslt_template: str, **kwargs) -> 'XsltTransformerWrapper':
return XsltTransformerWrapper(xslt_template, **kwargs)
@staticmethod
def from_template_file(xslt_template_file: str, **kwargs) -> 'XsltTransformerWrapper':
return XsltTransformerWrapper.from_template_string(
etree.tostring(etree.parse(xslt_template_file)),
**kwargs
)
def _get_transformer(self) -> etree.XSLT:
if self.__transformer is None:
# The transform function cannot be pickled and needs to be loaded lazily
transform = etree.XSLT(
etree.fromstring(self.xslt_template)
)
self.__transformer = transform
return self.__transformer
def __call__(
self,
xslt_input: T_XSLT_Input,
xslt_template_parameters: Optional[Mapping[str, Any]] = None
):
xslt_template_parameters = {
**self.xslt_template_parameters,
**(xslt_template_parameters or {})
}
LOGGER.debug(
'xslt_input: %r (xslt_template_parameters=%r)',
xslt_input, xslt_template_parameters
)
_xslt_transformer = self._get_transformer()
return _xslt_transformer(
xslt_input,
**{
key: etree.XSLT.strparam(value)
for key, value in xslt_template_parameters.items()
}
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/transformers/xslt.py
|
xslt.py
|
import logging
from typing import Any, Mapping, Optional, Union
from lxml import etree
LOGGER = logging.getLogger(__name__)
T_XSLT_Input = Union[etree.ElementBase, etree.ElementTree]
class XsltTransformerWrapper:
def __init__(
self,
xslt_template: str,
xslt_template_parameters: Optional[Mapping[str, Any]] = None
):
self.xslt_template = xslt_template
if xslt_template_parameters is None:
xslt_template_parameters = {}
self.xslt_template_parameters = xslt_template_parameters
self.__transformer: Optional[etree.XSLT] = None
# validate the XSLT stylesheet
etree.fromstring(self.xslt_template)
@staticmethod
def from_template_string(xslt_template: str, **kwargs) -> 'XsltTransformerWrapper':
return XsltTransformerWrapper(xslt_template, **kwargs)
@staticmethod
def from_template_file(xslt_template_file: str, **kwargs) -> 'XsltTransformerWrapper':
return XsltTransformerWrapper.from_template_string(
etree.tostring(etree.parse(xslt_template_file)),
**kwargs
)
def _get_transformer(self) -> etree.XSLT:
if self.__transformer is None:
# The transform function cannot be pickled and needs to be loaded lazily
transform = etree.XSLT(
etree.fromstring(self.xslt_template)
)
self.__transformer = transform
return self.__transformer
def __call__(
self,
xslt_input: T_XSLT_Input,
xslt_template_parameters: Optional[Mapping[str, Any]] = None
):
xslt_template_parameters = {
**self.xslt_template_parameters,
**(xslt_template_parameters or {})
}
LOGGER.debug(
'xslt_input: %r (xslt_template_parameters=%r)',
xslt_input, xslt_template_parameters
)
_xslt_transformer = self._get_transformer()
return _xslt_transformer(
xslt_input,
**{
key: etree.XSLT.strparam(value)
for key, value in xslt_template_parameters.items()
}
)
| 0.848659 | 0.244386 |
import logging
import os
import socket
import time
from contextlib import closing
from threading import Lock, current_thread
from typing import Optional, Sequence
from sciencebeam_parser.utils.background_process import (
BackgroundProcess,
ChildProcessReturnCodeError,
CommandRestartableBackgroundProcess,
exec_with_logging
)
from .office_scripts import get_office_script_directory
from .office_scripts.office_utils import find_pyuno_office, get_start_listener_command
LOGGER = logging.getLogger(__name__)
def change_ext(path: str, old_ext: Optional[str], new_ext: str) -> str:
if old_ext is None:
old_ext = os.path.splitext(path)[1]
if old_ext == '.gz':
path = path[:-len(old_ext)]
old_ext = os.path.splitext(path)[1]
if old_ext and path.endswith(old_ext):
return path[:-len(old_ext)] + new_ext
return path + new_ext
class UnoConnectionError(ConnectionError):
pass
def _exec_pyuno_script(
script_filename: str,
args: Sequence[str],
process_timeout: Optional[float] = None,
daemon: bool = False
) -> BackgroundProcess:
if not os.path.exists(script_filename):
from glob import glob # pylint: disable=import-outside-toplevel
LOGGER.info(
'%s does not exist, found: %s',
script_filename,
list(glob('%s/**/*' % os.path.dirname(script_filename)))
)
raise RuntimeError('%s does not exist' % script_filename)
office = find_pyuno_office()
command = [
office.uno_python_path,
script_filename
] + list(args)
env = {'PYTHONPATH': office.uno_path}
LOGGER.info('executing: %s (env: %s)', command, env)
try:
p = exec_with_logging(
command,
env=env,
logging_prefix='converter',
process_timeout=process_timeout,
daemon=daemon
)
except ChildProcessReturnCodeError as e:
if e.returncode == 9:
raise UnoConnectionError('failed to connect to uno server: %s' % e.returncode) from e
raise type(e)(
'failed to run converter: %s' % e.returncode,
returncode=e.returncode
) from e
return p
def _exec_doc_converter(
args: Sequence[str],
enable_debug: bool = False,
process_timeout: Optional[float] = None,
daemon: bool = False
) -> BackgroundProcess:
office_scripts_directory = get_office_script_directory()
doc_converter_script_filename = os.path.abspath(os.path.join(
office_scripts_directory,
'doc_converter.py'
))
if enable_debug:
args = ['--debug'] + list(args)
return _exec_pyuno_script(
doc_converter_script_filename,
args,
process_timeout=process_timeout,
daemon=daemon
)
class ListenerProcess(CommandRestartableBackgroundProcess):
def __init__(self, port: int, host: str = '127.0.0.1', connect_timeout: int = 10):
super().__init__(
command=get_start_listener_command(port=port),
name='listener on port %s' % port,
logging_prefix='listener[port:%s]' % port,
stop_at_exit=True
)
self.port = port
self.host = host
self.connect_timeout = connect_timeout
def __repr__(self) -> str:
return (
'{type_name}('
'port={self.port}'
', host={self.host}'
', connect_timeout={self.connect_timeout}'
', command={command}'
', process={self.process}'
')'
).format(
type_name=type(self).__name__,
self=self,
command=repr(self.command)
)
def is_alive(self) -> bool:
if not self.is_running():
return False
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(self.connect_timeout) # pylint: disable=no-member
if sock.connect_ex((self.host, self.port)) == 0: # pylint: disable=no-member
return True
return False
def wait_for_is_alive(self, timeout: float) -> bool:
start_time = time.monotonic()
while not self.is_alive():
if not self.is_running():
return False
if time.monotonic() - start_time >= timeout:
return False
time.sleep(0.5)
return True
def start_and_check_alive(self, timeout=10, **kwargs) -> None:
super().start(**kwargs)
if self.wait_for_is_alive(timeout=timeout):
return
self.stop_if_running()
assert self.process is not None
if self.process.returncode == 81:
# see https://bugs.documentfoundation.org/show_bug.cgi?id=107912
# "headless firstrun crashes (exit code 81)"
LOGGER.info('detected first-run error code 81, re-trying..')
self.start_and_check_alive(timeout=timeout, **kwargs)
return
raise ConnectionError('failed to start listener (unable to connect)')
def start_listener_if_not_running(self, max_uptime: float = None, **kwargs) -> None:
if self.is_alive():
uptime = self.get_uptime()
if not max_uptime or uptime <= max_uptime:
return
LOGGER.info('stopping listener, exceeded max uptime: %.3f > %.3f', uptime, max_uptime)
self.stop()
self.start_and_check_alive(**kwargs)
class DocConverterWrapper: # pylint: disable=too-many-instance-attributes
def __init__(
self,
port: int = 2003,
enable_debug: bool = False,
no_launch: bool = True,
keep_listener_running: bool = True,
process_timeout: Optional[float] = None,
max_uptime: float = 10,
stop_listener_on_error: bool = True
):
self.port = port
self.enable_debug = enable_debug
self.no_launch = no_launch
self.keep_listener_running = keep_listener_running
self.process_timeout = process_timeout
self.max_uptime = max_uptime
self.stop_listener_on_error = stop_listener_on_error
self._listener_process = ListenerProcess(port=port)
self._lock = Lock()
self._concurrent_count = 0
def __repr__(self) -> str:
return (
'{type_name}('
'port={self.port}'
', keep_listener_running={self.keep_listener_running}'
', _listener_process={self._listener_process}'
')'
).format(
type_name=type(self).__name__,
self=self
)
def start_listener_if_not_running(self) -> None:
self._listener_process.start_listener_if_not_running(max_uptime=self.max_uptime)
def stop_listener_if_running(self) -> None:
self._listener_process.stop_if_running()
def _do_convert(
self,
temp_source_filename: str,
output_type: str = 'pdf',
remove_line_no: bool = True,
remove_header_footer: bool = True,
remove_redline: bool = True
) -> str:
if self.no_launch:
self.start_listener_if_not_running()
temp_target_filename = change_ext(
temp_source_filename, None, '-output.%s' % output_type
)
args = []
args.extend([
'convert',
'--format', output_type
])
if remove_line_no:
args.append('--remove-line-no')
if remove_header_footer:
args.append('--remove-header-footer')
if remove_redline:
args.append('--remove-redline')
args.extend([
'--port', str(self.port),
'--output-file', str(temp_target_filename),
temp_source_filename
])
if self.no_launch:
args.append('--no-launch')
if self.keep_listener_running:
args.append('--keep-listener-running')
try:
_exec_doc_converter(
args,
enable_debug=self.enable_debug,
process_timeout=self.process_timeout
)
except UnoConnectionError:
self.stop_listener_if_running()
raise
except Exception:
if self.stop_listener_on_error:
self.stop_listener_if_running()
raise
if not os.path.exists(temp_target_filename):
raise RuntimeError('temp target file missing: %s' % temp_target_filename)
return temp_target_filename
def convert(self, *args, **kwargs) -> str:
thread_id = current_thread().ident
try:
self._concurrent_count += 1
LOGGER.debug(
'attempting to aquire lock, thread id: %s, concurrent count: %s',
thread_id, self._concurrent_count
)
with self._lock:
LOGGER.debug(
'aquired lock, thread id: %s, concurrent count: %s',
thread_id, self._concurrent_count
)
return self._do_convert(*args, **kwargs)
finally:
self._concurrent_count -= 1
LOGGER.debug(
'exiting convert (released lock if it was aquired),'
' thread id: %s, concurrent count: %s',
thread_id, self._concurrent_count
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/transformers/doc_converter_wrapper.py
|
doc_converter_wrapper.py
|
import logging
import os
import socket
import time
from contextlib import closing
from threading import Lock, current_thread
from typing import Optional, Sequence
from sciencebeam_parser.utils.background_process import (
BackgroundProcess,
ChildProcessReturnCodeError,
CommandRestartableBackgroundProcess,
exec_with_logging
)
from .office_scripts import get_office_script_directory
from .office_scripts.office_utils import find_pyuno_office, get_start_listener_command
LOGGER = logging.getLogger(__name__)
def change_ext(path: str, old_ext: Optional[str], new_ext: str) -> str:
if old_ext is None:
old_ext = os.path.splitext(path)[1]
if old_ext == '.gz':
path = path[:-len(old_ext)]
old_ext = os.path.splitext(path)[1]
if old_ext and path.endswith(old_ext):
return path[:-len(old_ext)] + new_ext
return path + new_ext
class UnoConnectionError(ConnectionError):
pass
def _exec_pyuno_script(
script_filename: str,
args: Sequence[str],
process_timeout: Optional[float] = None,
daemon: bool = False
) -> BackgroundProcess:
if not os.path.exists(script_filename):
from glob import glob # pylint: disable=import-outside-toplevel
LOGGER.info(
'%s does not exist, found: %s',
script_filename,
list(glob('%s/**/*' % os.path.dirname(script_filename)))
)
raise RuntimeError('%s does not exist' % script_filename)
office = find_pyuno_office()
command = [
office.uno_python_path,
script_filename
] + list(args)
env = {'PYTHONPATH': office.uno_path}
LOGGER.info('executing: %s (env: %s)', command, env)
try:
p = exec_with_logging(
command,
env=env,
logging_prefix='converter',
process_timeout=process_timeout,
daemon=daemon
)
except ChildProcessReturnCodeError as e:
if e.returncode == 9:
raise UnoConnectionError('failed to connect to uno server: %s' % e.returncode) from e
raise type(e)(
'failed to run converter: %s' % e.returncode,
returncode=e.returncode
) from e
return p
def _exec_doc_converter(
args: Sequence[str],
enable_debug: bool = False,
process_timeout: Optional[float] = None,
daemon: bool = False
) -> BackgroundProcess:
office_scripts_directory = get_office_script_directory()
doc_converter_script_filename = os.path.abspath(os.path.join(
office_scripts_directory,
'doc_converter.py'
))
if enable_debug:
args = ['--debug'] + list(args)
return _exec_pyuno_script(
doc_converter_script_filename,
args,
process_timeout=process_timeout,
daemon=daemon
)
class ListenerProcess(CommandRestartableBackgroundProcess):
def __init__(self, port: int, host: str = '127.0.0.1', connect_timeout: int = 10):
super().__init__(
command=get_start_listener_command(port=port),
name='listener on port %s' % port,
logging_prefix='listener[port:%s]' % port,
stop_at_exit=True
)
self.port = port
self.host = host
self.connect_timeout = connect_timeout
def __repr__(self) -> str:
return (
'{type_name}('
'port={self.port}'
', host={self.host}'
', connect_timeout={self.connect_timeout}'
', command={command}'
', process={self.process}'
')'
).format(
type_name=type(self).__name__,
self=self,
command=repr(self.command)
)
def is_alive(self) -> bool:
if not self.is_running():
return False
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(self.connect_timeout) # pylint: disable=no-member
if sock.connect_ex((self.host, self.port)) == 0: # pylint: disable=no-member
return True
return False
def wait_for_is_alive(self, timeout: float) -> bool:
start_time = time.monotonic()
while not self.is_alive():
if not self.is_running():
return False
if time.monotonic() - start_time >= timeout:
return False
time.sleep(0.5)
return True
def start_and_check_alive(self, timeout=10, **kwargs) -> None:
super().start(**kwargs)
if self.wait_for_is_alive(timeout=timeout):
return
self.stop_if_running()
assert self.process is not None
if self.process.returncode == 81:
# see https://bugs.documentfoundation.org/show_bug.cgi?id=107912
# "headless firstrun crashes (exit code 81)"
LOGGER.info('detected first-run error code 81, re-trying..')
self.start_and_check_alive(timeout=timeout, **kwargs)
return
raise ConnectionError('failed to start listener (unable to connect)')
def start_listener_if_not_running(self, max_uptime: float = None, **kwargs) -> None:
if self.is_alive():
uptime = self.get_uptime()
if not max_uptime or uptime <= max_uptime:
return
LOGGER.info('stopping listener, exceeded max uptime: %.3f > %.3f', uptime, max_uptime)
self.stop()
self.start_and_check_alive(**kwargs)
class DocConverterWrapper: # pylint: disable=too-many-instance-attributes
def __init__(
self,
port: int = 2003,
enable_debug: bool = False,
no_launch: bool = True,
keep_listener_running: bool = True,
process_timeout: Optional[float] = None,
max_uptime: float = 10,
stop_listener_on_error: bool = True
):
self.port = port
self.enable_debug = enable_debug
self.no_launch = no_launch
self.keep_listener_running = keep_listener_running
self.process_timeout = process_timeout
self.max_uptime = max_uptime
self.stop_listener_on_error = stop_listener_on_error
self._listener_process = ListenerProcess(port=port)
self._lock = Lock()
self._concurrent_count = 0
def __repr__(self) -> str:
return (
'{type_name}('
'port={self.port}'
', keep_listener_running={self.keep_listener_running}'
', _listener_process={self._listener_process}'
')'
).format(
type_name=type(self).__name__,
self=self
)
def start_listener_if_not_running(self) -> None:
self._listener_process.start_listener_if_not_running(max_uptime=self.max_uptime)
def stop_listener_if_running(self) -> None:
self._listener_process.stop_if_running()
def _do_convert(
self,
temp_source_filename: str,
output_type: str = 'pdf',
remove_line_no: bool = True,
remove_header_footer: bool = True,
remove_redline: bool = True
) -> str:
if self.no_launch:
self.start_listener_if_not_running()
temp_target_filename = change_ext(
temp_source_filename, None, '-output.%s' % output_type
)
args = []
args.extend([
'convert',
'--format', output_type
])
if remove_line_no:
args.append('--remove-line-no')
if remove_header_footer:
args.append('--remove-header-footer')
if remove_redline:
args.append('--remove-redline')
args.extend([
'--port', str(self.port),
'--output-file', str(temp_target_filename),
temp_source_filename
])
if self.no_launch:
args.append('--no-launch')
if self.keep_listener_running:
args.append('--keep-listener-running')
try:
_exec_doc_converter(
args,
enable_debug=self.enable_debug,
process_timeout=self.process_timeout
)
except UnoConnectionError:
self.stop_listener_if_running()
raise
except Exception:
if self.stop_listener_on_error:
self.stop_listener_if_running()
raise
if not os.path.exists(temp_target_filename):
raise RuntimeError('temp target file missing: %s' % temp_target_filename)
return temp_target_filename
def convert(self, *args, **kwargs) -> str:
thread_id = current_thread().ident
try:
self._concurrent_count += 1
LOGGER.debug(
'attempting to aquire lock, thread id: %s, concurrent count: %s',
thread_id, self._concurrent_count
)
with self._lock:
LOGGER.debug(
'aquired lock, thread id: %s, concurrent count: %s',
thread_id, self._concurrent_count
)
return self._do_convert(*args, **kwargs)
finally:
self._concurrent_count -= 1
LOGGER.debug(
'exiting convert (released lock if it was aquired),'
' thread id: %s, concurrent count: %s',
thread_id, self._concurrent_count
)
| 0.642881 | 0.085862 |
import os
from typing import NamedTuple, Sequence
import subprocess
class UnoOfficePaths(NamedTuple):
uno_path: str
uno_python_path: str
class EnvironmentVariables:
UNO_PATH = 'UNO_PATH'
UNO_PYTHON_PATH = 'UNO_PYTHON_PATH'
UNO_OFFICE_BINARY_PATH = 'UNO_OFFICE_BINARY_PATH'
class DefaultValues:
UNO_PATH = '/usr/lib/python3/dist-packages'
UNO_PYTHON_PATH = 'python3'
UNO_OFFICE_BINARY_PATH = '/usr/lib/libreoffice/program/soffice.bin'
def get_uno_path() -> str:
return os.environ.get('UNO_PATH') or DefaultValues.UNO_PATH
def get_uno_python_path() -> str:
return os.environ.get('UNO_PYTHON_PATH') or DefaultValues.UNO_PYTHON_PATH
def get_uno_office_binary_path() -> str:
return os.environ.get('UNO_OFFICE_BINARY_PATH') or DefaultValues.UNO_OFFICE_BINARY_PATH
def find_offices() -> Sequence[UnoOfficePaths]:
return [UnoOfficePaths(
uno_path=get_uno_path(),
uno_python_path=get_uno_python_path()
)]
def find_pyuno_office() -> UnoOfficePaths:
offices = find_offices()
if not offices:
raise RuntimeError('no suitable office installation found')
for office in offices:
try:
subprocess.check_output(
[office.uno_python_path, '-c', 'import uno, unohelper'],
env={'PYTHONPATH': office.uno_path}
)
return office
except subprocess.CalledProcessError:
pass
except OSError:
pass
raise RuntimeError(
'none of the potential office installations seem to function, tried: %s' % offices
)
def get_start_listener_command(port: int) -> Sequence[str]:
return [
get_uno_office_binary_path(),
'--headless',
'--invisible',
'--nocrashreport',
'--nodefault',
'--nofirststartwizard',
'--nologo',
'--norestore',
'--accept=socket,host=localhost,port={port};urp;StarOffice.ServiceManager'.format(
port=port
)
]
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/transformers/office_scripts/office_utils.py
|
office_utils.py
|
import os
from typing import NamedTuple, Sequence
import subprocess
class UnoOfficePaths(NamedTuple):
uno_path: str
uno_python_path: str
class EnvironmentVariables:
UNO_PATH = 'UNO_PATH'
UNO_PYTHON_PATH = 'UNO_PYTHON_PATH'
UNO_OFFICE_BINARY_PATH = 'UNO_OFFICE_BINARY_PATH'
class DefaultValues:
UNO_PATH = '/usr/lib/python3/dist-packages'
UNO_PYTHON_PATH = 'python3'
UNO_OFFICE_BINARY_PATH = '/usr/lib/libreoffice/program/soffice.bin'
def get_uno_path() -> str:
return os.environ.get('UNO_PATH') or DefaultValues.UNO_PATH
def get_uno_python_path() -> str:
return os.environ.get('UNO_PYTHON_PATH') or DefaultValues.UNO_PYTHON_PATH
def get_uno_office_binary_path() -> str:
return os.environ.get('UNO_OFFICE_BINARY_PATH') or DefaultValues.UNO_OFFICE_BINARY_PATH
def find_offices() -> Sequence[UnoOfficePaths]:
return [UnoOfficePaths(
uno_path=get_uno_path(),
uno_python_path=get_uno_python_path()
)]
def find_pyuno_office() -> UnoOfficePaths:
offices = find_offices()
if not offices:
raise RuntimeError('no suitable office installation found')
for office in offices:
try:
subprocess.check_output(
[office.uno_python_path, '-c', 'import uno, unohelper'],
env={'PYTHONPATH': office.uno_path}
)
return office
except subprocess.CalledProcessError:
pass
except OSError:
pass
raise RuntimeError(
'none of the potential office installations seem to function, tried: %s' % offices
)
def get_start_listener_command(port: int) -> Sequence[str]:
return [
get_uno_office_binary_path(),
'--headless',
'--invisible',
'--nocrashreport',
'--nodefault',
'--nofirststartwizard',
'--nologo',
'--norestore',
'--accept=socket,host=localhost,port={port};urp;StarOffice.ServiceManager'.format(
port=port
)
]
| 0.492676 | 0.06767 |
# This script will run using the default Python 3 environment
# where LibreOffice's scripts are installed to (at least in Ubuntu).
# This converter is very similar to unoconv but has an option to remove
# line numbers, it is also simpler by being more tailored to the use-case.
# https://github.com/dagwieers/unoconv
from __future__ import absolute_import, print_function
import argparse
import os
import sys
import logging
import subprocess
import atexit
from time import sleep
from contextlib import contextmanager
from typing import Optional, Sequence
# pylint: disable=import-error
import uno # type: ignore
from com.sun.star.beans import PropertyValue # type: ignore
from com.sun.star.connection import NoConnectException # type: ignore
from com.sun.star.document import RedlineDisplayType # type: ignore
# pylint: enable=import-error
LOGGER = logging.getLogger(__name__)
FILTER_NAME_BY_EXT = {
'doc': 'MS Word 97',
'docx': 'Office Open XML Text',
'dotx': 'Office Open XML Text',
'rtf': 'Rich Text Format',
'pdf': 'writer_web_pdf_Export'
}
VALID_OUTPUT_FORMATS = sorted(FILTER_NAME_BY_EXT.keys())
def parse_args(argv: Optional[Sequence[str]] = None):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
convert_parser = subparsers.add_parser('convert')
convert_parser.add_argument(
'-f', '--format', type=str, required=True,
choices=VALID_OUTPUT_FORMATS,
help='Output format (ext)'
)
convert_parser.add_argument(
'-p', '--port', type=int, default=2002,
help='Port to the uno listener'
)
convert_parser.add_argument(
'--output-file', type=str,
help='Output file (if specified, only one input file should be used)'
)
convert_parser.add_argument(
'input_file', type=str, nargs='+',
help='Input files (does not support pdf)'
)
convert_parser.add_argument(
'--remove-line-no', action='store_true', default=False,
help='remove line number'
)
convert_parser.add_argument(
'--remove-header-footer', action='store_true', default=False,
help='remove header and footer (including page number)'
)
convert_parser.add_argument(
'--remove-redline', action='store_true', default=False,
help='remove redlines (track changes, by accepting all changes)'
)
convert_parser.add_argument(
'--keep-listener-running', action='store_true', default=False,
help='keep listener running in the background'
)
convert_parser.add_argument(
'-n', '--no-launch', action='store_true', default=False,
help='fail if no listener is found (default: launch one)'
)
start_listener_parser = subparsers.add_parser('start-listener')
start_listener_parser.add_argument(
'-p', '--port', type=int, default=2002,
help='Port to the uno listener'
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='enable debug output'
)
args = parser.parse_args(argv)
if args.debug:
logging.getLogger().setLevel('DEBUG')
LOGGER.debug('args: %s', args)
return args
def get_start_listener_command(port: int) -> Sequence[str]:
return [
'soffice',
'--headless',
'--invisible',
'--nocrashreport',
'--nodefault',
'--nofirststartwizard',
'--nologo',
'--norestore',
'--accept=socket,host=localhost,port={port};urp;StarOffice.ServiceManager'.format(
port=port
)
]
def get_resolver():
local_context = uno.getComponentContext()
resolver = local_context.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", local_context
)
return resolver
def connect(resolver, port: int):
return resolver.resolve(
"uno:socket,host=localhost,port={port};urp;StarOffice.ComponentContext".format(
port=port
)
)
def connect_with_timeout(resolver, port: int, timeout: float):
delay = 0.5
elapsed = 0.0
while True:
try:
connect_result = connect(resolver, port)
LOGGER.debug('connected to port %s', port)
return connect_result
except NoConnectException as e:
if elapsed >= timeout:
LOGGER.debug(
'connection failed, timeout exceeded (%.1f >= %s)',
elapsed, timeout
)
raise e
LOGGER.debug('connection failed, try again in %.1f (%.1f)', delay, elapsed)
sleep(delay)
elapsed += delay
def start_listener(port: int) -> subprocess.Popen:
LOGGER.debug('starting listener on port %d', port)
return subprocess.Popen(
get_start_listener_command(port)
)
def stop_listener(listener_process: subprocess.Popen):
LOGGER.debug('stopping listener process with pid: %s', listener_process.pid)
return listener_process.terminate()
@contextmanager
def managed_connection(resolver, port: int, no_launch: bool, keep_listener_running: bool):
timeout = 10
try:
yield connect_with_timeout(resolver, port, timeout)
except NoConnectException as e:
if no_launch:
raise e
LOGGER.debug('failed to connect, try to start listener')
listener_process = start_listener(port)
try:
yield connect_with_timeout(resolver, port, timeout)
finally:
if not keep_listener_running:
stop_listener(listener_process)
@contextmanager
def managed_desktop(connection, keep_listener_running: bool):
LOGGER.debug('starting desktop session')
desktop = connection.ServiceManager.createInstanceWithContext(
"com.sun.star.frame.Desktop", connection
)
try:
yield desktop
finally:
try:
if not keep_listener_running:
LOGGER.debug('terminate desktop session')
desktop.terminate()
except Exception as e: # pylint: disable=broad-except
LOGGER.warning('caught exception while terminating desktop: %s', e)
def create_property_value(name, value):
property_value = PropertyValue()
property_value.Name = name
property_value.Value = value
return property_value
def dict_to_property_values(d):
return tuple((
create_property_value(key, value)
for key, value in d.items()
))
def property_set_to_dict(property_set):
return {
prop.Name: property_set.getPropertyValue(prop.Name)
for prop in property_set.getPropertySetInfo().getProperties()
}
def disable_document_header_footer(document):
styleFamilies = document.getStyleFamilies()
pageStyles = styleFamilies.getByName('PageStyles')
if not styleFamilies.hasByName('PageStyles'):
return
for styleName in pageStyles.getElementNames():
pageStyle = pageStyles.getByName(styleName)
pageStyle.setPropertyValue('HeaderIsOn', False)
pageStyle.setPropertyValue('FooterIsOn', False)
def convert_document_file(
desktop,
input_file: str,
output_file: str,
output_ext: str,
remove_line_no: bool = False,
remove_redline: bool = False,
remove_header_footer: bool = False
):
output_filter_name = FILTER_NAME_BY_EXT[output_ext]
input_file_url = uno.systemPathToFileUrl(os.path.realpath(input_file))
document = desktop.loadComponentFromURL(
input_file_url,
"_blank", 0,
dict_to_property_values({'Hidden': True, 'ReadOnly': True})
)
if not document:
raise RuntimeError('failed to load document: %s' % input_file_url)
try:
if remove_line_no:
document.getLineNumberingProperties().IsOn = False
if remove_header_footer:
disable_document_header_footer(document)
if remove_redline:
document.setPropertyValue('RedlineDisplayType', RedlineDisplayType.NONE)
output_url = "file://" + os.path.abspath(output_file)
LOGGER.debug("output_url: %s", output_url)
document.storeToURL(
output_url,
dict_to_property_values({'FilterName': output_filter_name})
)
finally:
# close, parameter: DeliverOwnership
# "true: delegates the ownership of ths closing object to any one
# which throw the CloseVetoException.
# This new owner has to close the closing object again
# if his still running processes will be finished."
document.close(True)
def convert(desktop, args: argparse.Namespace):
if args.output_file and len(args.input_file) > 1:
raise RuntimeError(
''.join([
'only one input field should be specified together with --output-file.'
' (input files: %s)'
]) % args.input_file
)
for input_filename in args.input_file:
LOGGER.info(
'processing: %s (%s)',
input_filename,
'{:,d}'.format(os.path.getsize(input_filename))
)
name, input_ext = os.path.splitext(input_filename)
if input_ext.startswith('.'):
input_ext = input_ext[1:]
if not args.output_file and input_ext == args.format:
raise RuntimeError(
''.join([
'input and output format should not be the same',
' (unless --output-file was specified): %s -> %s'
]) % (
input_ext, args.format
)
)
if args.output_file:
output_filename = args.output_file
else:
output_filename = name + '.' + args.format
convert_document_file(
desktop,
input_filename,
output_filename,
args.format,
remove_line_no=args.remove_line_no,
remove_header_footer=args.remove_header_footer,
remove_redline=args.remove_redline
)
def run(args: argparse.Namespace):
if args.command == 'convert':
resolver = get_resolver()
with managed_connection(
resolver, args.port,
no_launch=args.no_launch,
keep_listener_running=args.keep_listener_running) as connection:
with managed_desktop(connection, args.keep_listener_running) as desktop:
convert(desktop, args)
elif args.command == 'start-listener':
p = start_listener(args.port)
atexit.register(
lambda: stop_listener(p)
)
p.wait()
else:
raise RuntimeError('invalid command: %s' % args.command)
class ExitCodes:
UNO_CONNECTION_ERROR = 9
def main(argv: Optional[Sequence] = None):
args = parse_args(argv)
try:
run(args)
except NoConnectException as e:
LOGGER.error('failed to connect to uno service: %s', e, exc_info=e)
sys.exit(ExitCodes.UNO_CONNECTION_ERROR)
except Exception as e:
LOGGER.error('failed to to run: %s (%s)', e, type(e), exc_info=e)
raise
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/transformers/office_scripts/doc_converter.py
|
doc_converter.py
|
# This script will run using the default Python 3 environment
# where LibreOffice's scripts are installed to (at least in Ubuntu).
# This converter is very similar to unoconv but has an option to remove
# line numbers, it is also simpler by being more tailored to the use-case.
# https://github.com/dagwieers/unoconv
from __future__ import absolute_import, print_function
import argparse
import os
import sys
import logging
import subprocess
import atexit
from time import sleep
from contextlib import contextmanager
from typing import Optional, Sequence
# pylint: disable=import-error
import uno # type: ignore
from com.sun.star.beans import PropertyValue # type: ignore
from com.sun.star.connection import NoConnectException # type: ignore
from com.sun.star.document import RedlineDisplayType # type: ignore
# pylint: enable=import-error
LOGGER = logging.getLogger(__name__)
FILTER_NAME_BY_EXT = {
'doc': 'MS Word 97',
'docx': 'Office Open XML Text',
'dotx': 'Office Open XML Text',
'rtf': 'Rich Text Format',
'pdf': 'writer_web_pdf_Export'
}
VALID_OUTPUT_FORMATS = sorted(FILTER_NAME_BY_EXT.keys())
def parse_args(argv: Optional[Sequence[str]] = None):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
convert_parser = subparsers.add_parser('convert')
convert_parser.add_argument(
'-f', '--format', type=str, required=True,
choices=VALID_OUTPUT_FORMATS,
help='Output format (ext)'
)
convert_parser.add_argument(
'-p', '--port', type=int, default=2002,
help='Port to the uno listener'
)
convert_parser.add_argument(
'--output-file', type=str,
help='Output file (if specified, only one input file should be used)'
)
convert_parser.add_argument(
'input_file', type=str, nargs='+',
help='Input files (does not support pdf)'
)
convert_parser.add_argument(
'--remove-line-no', action='store_true', default=False,
help='remove line number'
)
convert_parser.add_argument(
'--remove-header-footer', action='store_true', default=False,
help='remove header and footer (including page number)'
)
convert_parser.add_argument(
'--remove-redline', action='store_true', default=False,
help='remove redlines (track changes, by accepting all changes)'
)
convert_parser.add_argument(
'--keep-listener-running', action='store_true', default=False,
help='keep listener running in the background'
)
convert_parser.add_argument(
'-n', '--no-launch', action='store_true', default=False,
help='fail if no listener is found (default: launch one)'
)
start_listener_parser = subparsers.add_parser('start-listener')
start_listener_parser.add_argument(
'-p', '--port', type=int, default=2002,
help='Port to the uno listener'
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='enable debug output'
)
args = parser.parse_args(argv)
if args.debug:
logging.getLogger().setLevel('DEBUG')
LOGGER.debug('args: %s', args)
return args
def get_start_listener_command(port: int) -> Sequence[str]:
return [
'soffice',
'--headless',
'--invisible',
'--nocrashreport',
'--nodefault',
'--nofirststartwizard',
'--nologo',
'--norestore',
'--accept=socket,host=localhost,port={port};urp;StarOffice.ServiceManager'.format(
port=port
)
]
def get_resolver():
local_context = uno.getComponentContext()
resolver = local_context.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", local_context
)
return resolver
def connect(resolver, port: int):
return resolver.resolve(
"uno:socket,host=localhost,port={port};urp;StarOffice.ComponentContext".format(
port=port
)
)
def connect_with_timeout(resolver, port: int, timeout: float):
delay = 0.5
elapsed = 0.0
while True:
try:
connect_result = connect(resolver, port)
LOGGER.debug('connected to port %s', port)
return connect_result
except NoConnectException as e:
if elapsed >= timeout:
LOGGER.debug(
'connection failed, timeout exceeded (%.1f >= %s)',
elapsed, timeout
)
raise e
LOGGER.debug('connection failed, try again in %.1f (%.1f)', delay, elapsed)
sleep(delay)
elapsed += delay
def start_listener(port: int) -> subprocess.Popen:
LOGGER.debug('starting listener on port %d', port)
return subprocess.Popen(
get_start_listener_command(port)
)
def stop_listener(listener_process: subprocess.Popen):
LOGGER.debug('stopping listener process with pid: %s', listener_process.pid)
return listener_process.terminate()
@contextmanager
def managed_connection(resolver, port: int, no_launch: bool, keep_listener_running: bool):
timeout = 10
try:
yield connect_with_timeout(resolver, port, timeout)
except NoConnectException as e:
if no_launch:
raise e
LOGGER.debug('failed to connect, try to start listener')
listener_process = start_listener(port)
try:
yield connect_with_timeout(resolver, port, timeout)
finally:
if not keep_listener_running:
stop_listener(listener_process)
@contextmanager
def managed_desktop(connection, keep_listener_running: bool):
LOGGER.debug('starting desktop session')
desktop = connection.ServiceManager.createInstanceWithContext(
"com.sun.star.frame.Desktop", connection
)
try:
yield desktop
finally:
try:
if not keep_listener_running:
LOGGER.debug('terminate desktop session')
desktop.terminate()
except Exception as e: # pylint: disable=broad-except
LOGGER.warning('caught exception while terminating desktop: %s', e)
def create_property_value(name, value):
property_value = PropertyValue()
property_value.Name = name
property_value.Value = value
return property_value
def dict_to_property_values(d):
return tuple((
create_property_value(key, value)
for key, value in d.items()
))
def property_set_to_dict(property_set):
return {
prop.Name: property_set.getPropertyValue(prop.Name)
for prop in property_set.getPropertySetInfo().getProperties()
}
def disable_document_header_footer(document):
styleFamilies = document.getStyleFamilies()
pageStyles = styleFamilies.getByName('PageStyles')
if not styleFamilies.hasByName('PageStyles'):
return
for styleName in pageStyles.getElementNames():
pageStyle = pageStyles.getByName(styleName)
pageStyle.setPropertyValue('HeaderIsOn', False)
pageStyle.setPropertyValue('FooterIsOn', False)
def convert_document_file(
desktop,
input_file: str,
output_file: str,
output_ext: str,
remove_line_no: bool = False,
remove_redline: bool = False,
remove_header_footer: bool = False
):
output_filter_name = FILTER_NAME_BY_EXT[output_ext]
input_file_url = uno.systemPathToFileUrl(os.path.realpath(input_file))
document = desktop.loadComponentFromURL(
input_file_url,
"_blank", 0,
dict_to_property_values({'Hidden': True, 'ReadOnly': True})
)
if not document:
raise RuntimeError('failed to load document: %s' % input_file_url)
try:
if remove_line_no:
document.getLineNumberingProperties().IsOn = False
if remove_header_footer:
disable_document_header_footer(document)
if remove_redline:
document.setPropertyValue('RedlineDisplayType', RedlineDisplayType.NONE)
output_url = "file://" + os.path.abspath(output_file)
LOGGER.debug("output_url: %s", output_url)
document.storeToURL(
output_url,
dict_to_property_values({'FilterName': output_filter_name})
)
finally:
# close, parameter: DeliverOwnership
# "true: delegates the ownership of ths closing object to any one
# which throw the CloseVetoException.
# This new owner has to close the closing object again
# if his still running processes will be finished."
document.close(True)
def convert(desktop, args: argparse.Namespace):
if args.output_file and len(args.input_file) > 1:
raise RuntimeError(
''.join([
'only one input field should be specified together with --output-file.'
' (input files: %s)'
]) % args.input_file
)
for input_filename in args.input_file:
LOGGER.info(
'processing: %s (%s)',
input_filename,
'{:,d}'.format(os.path.getsize(input_filename))
)
name, input_ext = os.path.splitext(input_filename)
if input_ext.startswith('.'):
input_ext = input_ext[1:]
if not args.output_file and input_ext == args.format:
raise RuntimeError(
''.join([
'input and output format should not be the same',
' (unless --output-file was specified): %s -> %s'
]) % (
input_ext, args.format
)
)
if args.output_file:
output_filename = args.output_file
else:
output_filename = name + '.' + args.format
convert_document_file(
desktop,
input_filename,
output_filename,
args.format,
remove_line_no=args.remove_line_no,
remove_header_footer=args.remove_header_footer,
remove_redline=args.remove_redline
)
def run(args: argparse.Namespace):
if args.command == 'convert':
resolver = get_resolver()
with managed_connection(
resolver, args.port,
no_launch=args.no_launch,
keep_listener_running=args.keep_listener_running) as connection:
with managed_desktop(connection, args.keep_listener_running) as desktop:
convert(desktop, args)
elif args.command == 'start-listener':
p = start_listener(args.port)
atexit.register(
lambda: stop_listener(p)
)
p.wait()
else:
raise RuntimeError('invalid command: %s' % args.command)
class ExitCodes:
UNO_CONNECTION_ERROR = 9
def main(argv: Optional[Sequence] = None):
args = parse_args(argv)
try:
run(args)
except NoConnectException as e:
LOGGER.error('failed to connect to uno service: %s', e, exc_info=e)
sys.exit(ExitCodes.UNO_CONNECTION_ERROR)
except Exception as e:
LOGGER.error('failed to to run: %s (%s)', e, type(e), exc_info=e)
raise
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
| 0.600071 | 0.130285 |
from typing import Dict, List
from lxml import etree
from sciencebeam_parser.document.layout_document import (
LayoutGraphic,
LayoutLineMeta,
LayoutPageCoordinates,
LayoutFont,
LayoutPageMeta,
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument,
EMPTY_FONT
)
ALTO_NS = 'http://www.loc.gov/standards/alto/ns-v3#'
ALTO_NS_MAP = {
'alto': ALTO_NS
}
def alto_xpath(parent: etree.ElementBase, xpath: str) -> List[etree.ElementBase]:
return parent.xpath(xpath, namespaces=ALTO_NS_MAP)
class AltoParser:
def __init__(self):
self.font_by_id_map: Dict[str, LayoutFont] = {}
def parse_page_coordinates(
self,
node: etree.ElementBase,
page_number: int
) -> LayoutPageCoordinates:
return LayoutPageCoordinates(
x=float(node.attrib.get('HPOS', 0)),
y=float(node.attrib.get('VPOS', 0)),
width=float(node.attrib.get('WIDTH', 0)),
height=float(node.attrib.get('HEIGHT', 0)),
page_number=page_number
)
def parse_token(
self,
token_node: etree.ElementBase,
page_number: int,
layout_line_meta: LayoutLineMeta
) -> LayoutToken:
return LayoutToken(
text=token_node.attrib.get('CONTENT') or '',
font=self.font_by_id_map.get(
token_node.attrib.get('STYLEREFS'),
EMPTY_FONT
),
coordinates=self.parse_page_coordinates(token_node, page_number=page_number),
line_meta=layout_line_meta
)
def parse_line(
self,
line_node: etree.ElementBase,
page_number: int,
page_meta: LayoutPageMeta
) -> LayoutLine:
return LayoutLine(tokens=[
self.parse_token(
token_node,
page_number=page_number,
layout_line_meta=LayoutLineMeta(
line_id=id(line_node),
page_meta=page_meta
)
)
for token_node in alto_xpath(line_node, './/alto:String')
])
def parse_block(
self,
block_node: etree.ElementBase,
page_number: int,
page_meta: LayoutPageMeta
) -> LayoutBlock:
return LayoutBlock(lines=[
self.parse_line(line_node, page_number=page_number, page_meta=page_meta)
for line_node in alto_xpath(block_node, './/alto:TextLine[alto:String]')
])
def parse_graphic(
self,
graphic_node: etree.ElementBase,
page_number: int,
page_meta: LayoutPageMeta
) -> LayoutGraphic:
attrib = graphic_node.attrib
return LayoutGraphic(
local_file_path=attrib.get('FILEID'),
coordinates=self.parse_page_coordinates(graphic_node, page_number=page_number),
graphic_type=attrib.get('TYPE'),
page_meta=page_meta
)
def parse_page(
self,
page_node: etree.ElementBase,
page_index: int
) -> LayoutPage:
page_number_str = page_node.attrib.get('PHYSICAL_IMG_NR')
page_number = int(page_number_str) if page_number_str else 1 + page_index
width_str = page_node.attrib.get('WIDTH')
height_str = page_node.attrib.get('HEIGHT')
coordinates = (
LayoutPageCoordinates(
x=0,
y=0,
width=float(width_str),
height=float(height_str),
page_number=page_number
)
if width_str and height_str
else None
)
page_meta = LayoutPageMeta(
page_number=page_number,
coordinates=coordinates
)
return LayoutPage(
meta=page_meta,
blocks=[
self.parse_block(block_node, page_number=page_number, page_meta=page_meta)
for block_node in alto_xpath(page_node, './/alto:TextBlock')
],
graphics=[
self.parse_graphic(graphic_node, page_number=page_number, page_meta=page_meta)
for graphic_node in alto_xpath(page_node, './/alto:Illustration')
]
)
def parse_font(self, font_node: etree.ElementBase) -> LayoutFont:
font_styles = (font_node.attrib.get('FONTSTYLE') or '').split(' ')
return LayoutFont(
font_id=font_node.attrib.get('ID'),
font_family=font_node.attrib.get('FONTFAMILY'),
font_size=float(font_node.attrib.get('FONTSIZE')),
is_bold='bold' in font_styles,
is_italics='italics' in font_styles,
is_subscript='subscript' in font_styles,
is_superscript='superscript' in font_styles
)
def parse_font_by_id_map(self, root: etree.ElementBase) -> Dict[str, LayoutFont]:
fonts = [
self.parse_font(font_node)
for font_node in alto_xpath(root, './alto:Styles/alto:TextStyle')
]
return {
font.font_id: font
for font in fonts
}
def parse_root(self, root: etree.ElementBase) -> LayoutDocument:
self.font_by_id_map = self.parse_font_by_id_map(root)
return LayoutDocument(pages=[
self.parse_page(page_node, page_index=page_index)
for page_index, page_node in enumerate(alto_xpath(root, './/alto:Page'))
])
def parse_alto_root(root: etree.ElementBase) -> LayoutDocument:
return AltoParser().parse_root(root)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/external/pdfalto/parser.py
|
parser.py
|
from typing import Dict, List
from lxml import etree
from sciencebeam_parser.document.layout_document import (
LayoutGraphic,
LayoutLineMeta,
LayoutPageCoordinates,
LayoutFont,
LayoutPageMeta,
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument,
EMPTY_FONT
)
ALTO_NS = 'http://www.loc.gov/standards/alto/ns-v3#'
ALTO_NS_MAP = {
'alto': ALTO_NS
}
def alto_xpath(parent: etree.ElementBase, xpath: str) -> List[etree.ElementBase]:
return parent.xpath(xpath, namespaces=ALTO_NS_MAP)
class AltoParser:
def __init__(self):
self.font_by_id_map: Dict[str, LayoutFont] = {}
def parse_page_coordinates(
self,
node: etree.ElementBase,
page_number: int
) -> LayoutPageCoordinates:
return LayoutPageCoordinates(
x=float(node.attrib.get('HPOS', 0)),
y=float(node.attrib.get('VPOS', 0)),
width=float(node.attrib.get('WIDTH', 0)),
height=float(node.attrib.get('HEIGHT', 0)),
page_number=page_number
)
def parse_token(
self,
token_node: etree.ElementBase,
page_number: int,
layout_line_meta: LayoutLineMeta
) -> LayoutToken:
return LayoutToken(
text=token_node.attrib.get('CONTENT') or '',
font=self.font_by_id_map.get(
token_node.attrib.get('STYLEREFS'),
EMPTY_FONT
),
coordinates=self.parse_page_coordinates(token_node, page_number=page_number),
line_meta=layout_line_meta
)
def parse_line(
self,
line_node: etree.ElementBase,
page_number: int,
page_meta: LayoutPageMeta
) -> LayoutLine:
return LayoutLine(tokens=[
self.parse_token(
token_node,
page_number=page_number,
layout_line_meta=LayoutLineMeta(
line_id=id(line_node),
page_meta=page_meta
)
)
for token_node in alto_xpath(line_node, './/alto:String')
])
def parse_block(
self,
block_node: etree.ElementBase,
page_number: int,
page_meta: LayoutPageMeta
) -> LayoutBlock:
return LayoutBlock(lines=[
self.parse_line(line_node, page_number=page_number, page_meta=page_meta)
for line_node in alto_xpath(block_node, './/alto:TextLine[alto:String]')
])
def parse_graphic(
self,
graphic_node: etree.ElementBase,
page_number: int,
page_meta: LayoutPageMeta
) -> LayoutGraphic:
attrib = graphic_node.attrib
return LayoutGraphic(
local_file_path=attrib.get('FILEID'),
coordinates=self.parse_page_coordinates(graphic_node, page_number=page_number),
graphic_type=attrib.get('TYPE'),
page_meta=page_meta
)
def parse_page(
self,
page_node: etree.ElementBase,
page_index: int
) -> LayoutPage:
page_number_str = page_node.attrib.get('PHYSICAL_IMG_NR')
page_number = int(page_number_str) if page_number_str else 1 + page_index
width_str = page_node.attrib.get('WIDTH')
height_str = page_node.attrib.get('HEIGHT')
coordinates = (
LayoutPageCoordinates(
x=0,
y=0,
width=float(width_str),
height=float(height_str),
page_number=page_number
)
if width_str and height_str
else None
)
page_meta = LayoutPageMeta(
page_number=page_number,
coordinates=coordinates
)
return LayoutPage(
meta=page_meta,
blocks=[
self.parse_block(block_node, page_number=page_number, page_meta=page_meta)
for block_node in alto_xpath(page_node, './/alto:TextBlock')
],
graphics=[
self.parse_graphic(graphic_node, page_number=page_number, page_meta=page_meta)
for graphic_node in alto_xpath(page_node, './/alto:Illustration')
]
)
def parse_font(self, font_node: etree.ElementBase) -> LayoutFont:
font_styles = (font_node.attrib.get('FONTSTYLE') or '').split(' ')
return LayoutFont(
font_id=font_node.attrib.get('ID'),
font_family=font_node.attrib.get('FONTFAMILY'),
font_size=float(font_node.attrib.get('FONTSIZE')),
is_bold='bold' in font_styles,
is_italics='italics' in font_styles,
is_subscript='subscript' in font_styles,
is_superscript='superscript' in font_styles
)
def parse_font_by_id_map(self, root: etree.ElementBase) -> Dict[str, LayoutFont]:
fonts = [
self.parse_font(font_node)
for font_node in alto_xpath(root, './alto:Styles/alto:TextStyle')
]
return {
font.font_id: font
for font in fonts
}
def parse_root(self, root: etree.ElementBase) -> LayoutDocument:
self.font_by_id_map = self.parse_font_by_id_map(root)
return LayoutDocument(pages=[
self.parse_page(page_node, page_index=page_index)
for page_index, page_node in enumerate(alto_xpath(root, './/alto:Page'))
])
def parse_alto_root(root: etree.ElementBase) -> LayoutDocument:
return AltoParser().parse_root(root)
| 0.809577 | 0.132346 |
import argparse
import logging
import os
from logging.config import dictConfig
from flask import Flask
from sciencebeam_parser.app.parser import ScienceBeamParser
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.service.blueprints.index import IndexBlueprint
from sciencebeam_parser.service.blueprints.api import ApiBlueprint
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
LOGGER = logging.getLogger(__name__)
def create_app_for_parser(
sciencebeam_parser: ScienceBeamParser
):
app = Flask(__name__)
index = IndexBlueprint()
app.register_blueprint(index, url_prefix='/')
api = ApiBlueprint(sciencebeam_parser)
app.register_blueprint(api, url_prefix='/api')
return app
def create_app_for_config(config: AppConfig):
return create_app_for_parser(
ScienceBeamParser.from_config(config)
)
def create_app(config: AppConfig):
return create_app_for_config(config)
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--host', required=False,
help='Host to bind server to.'
)
parser.add_argument(
'--port', type=int, default=8080,
help='The port to listen to.'
)
parsed_args = parser.parse_args(argv)
return parsed_args
def main(argv=None):
args = parse_args(argv)
config = AppConfig.load_yaml(DEFAULT_CONFIG_FILE).apply_environment_variables()
logging_config = config.get('logging')
if logging_config:
for handler_config in logging_config.get('handlers', {}).values():
filename = handler_config.get('filename')
if not filename:
continue
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
try:
dictConfig(logging_config)
except ValueError:
LOGGER.info('logging_config: %r', logging_config)
raise
LOGGER.info('app config: %s', config)
app = create_app_for_config(config)
app.run(port=args.port, host=args.host, threaded=True)
if __name__ == "__main__":
logging.basicConfig(level='INFO')
main()
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/service/server.py
|
server.py
|
import argparse
import logging
import os
from logging.config import dictConfig
from flask import Flask
from sciencebeam_parser.app.parser import ScienceBeamParser
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.service.blueprints.index import IndexBlueprint
from sciencebeam_parser.service.blueprints.api import ApiBlueprint
from sciencebeam_parser.resources.default_config import DEFAULT_CONFIG_FILE
LOGGER = logging.getLogger(__name__)
def create_app_for_parser(
sciencebeam_parser: ScienceBeamParser
):
app = Flask(__name__)
index = IndexBlueprint()
app.register_blueprint(index, url_prefix='/')
api = ApiBlueprint(sciencebeam_parser)
app.register_blueprint(api, url_prefix='/api')
return app
def create_app_for_config(config: AppConfig):
return create_app_for_parser(
ScienceBeamParser.from_config(config)
)
def create_app(config: AppConfig):
return create_app_for_config(config)
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--host', required=False,
help='Host to bind server to.'
)
parser.add_argument(
'--port', type=int, default=8080,
help='The port to listen to.'
)
parsed_args = parser.parse_args(argv)
return parsed_args
def main(argv=None):
args = parse_args(argv)
config = AppConfig.load_yaml(DEFAULT_CONFIG_FILE).apply_environment_variables()
logging_config = config.get('logging')
if logging_config:
for handler_config in logging_config.get('handlers', {}).values():
filename = handler_config.get('filename')
if not filename:
continue
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
try:
dictConfig(logging_config)
except ValueError:
LOGGER.info('logging_config: %r', logging_config)
raise
LOGGER.info('app config: %s', config)
app = create_app_for_config(config)
app.run(port=args.port, host=args.host, threaded=True)
if __name__ == "__main__":
logging.basicConfig(level='INFO')
main()
| 0.290981 | 0.063366 |
import logging
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from pathlib import Path
from typing import Iterable, Iterator, List, Type, TypeVar
from flask import Blueprint, jsonify, request, Response, url_for
from flask.helpers import send_file
from lxml import etree
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
iter_format_tag_result
)
from werkzeug.exceptions import BadRequest
from sciencebeam_parser.app.parser import (
BadRequestScienceBeamParserError,
ScienceBeamParser,
ScienceBeamParserSession,
ScienceBeamParserSessionSource
)
from sciencebeam_parser.external.pdfalto.wrapper import PdfAltoWrapper
from sciencebeam_parser.external.pdfalto.parser import parse_alto_root
from sciencebeam_parser.models.data import AppFeaturesContext, DocumentFeaturesContext
from sciencebeam_parser.models.model import Model
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.document.semantic_document import (
SemanticMixedContentWrapper,
SemanticRawAffiliationAddress,
SemanticRawAuthors,
SemanticRawFigure,
SemanticRawReference,
SemanticRawReferenceText,
SemanticRawTable,
SemanticReference,
T_SemanticContentWrapper,
iter_by_semantic_type_recursively
)
from sciencebeam_parser.processors.fulltext.config import RequestFieldNames
from sciencebeam_parser.utils.data_wrapper import (
get_data_wrapper_with_improved_media_type_or_filename
)
from sciencebeam_parser.utils.flask import (
assert_and_get_first_accept_matching_media_type,
get_bool_request_arg,
get_int_request_arg,
get_required_post_data,
get_required_post_data_wrapper,
get_str_request_arg
)
from sciencebeam_parser.utils.media_types import (
MediaTypes
)
from sciencebeam_parser.utils.text import normalize_text, parse_comma_separated_value
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.processors.fulltext.api import (
FullTextProcessorConfig
)
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
class RequestArgs:
FIRST_PAGE = 'first_page'
LAST_PAGE = 'last_page'
OUTPUT_FORMAT = 'output_format'
NO_USE_SEGMENTATION = 'no_use_segmentation'
INCLUDES = 'includes'
class ModelOutputFormats:
RAW_DATA = 'raw_data'
DEFAULT_MODEL_OUTPUT_FORMAT = TagOutputFormats.JSON
VALID_MODEL_OUTPUT_FORMATS = {
ModelOutputFormats.RAW_DATA,
TagOutputFormats.JSON,
TagOutputFormats.DATA,
TagOutputFormats.XML
}
def _get_file_upload_form(title: str):
return (
'''
<!doctype html>
<title>{title}</title>
<h1>{title}</h1>
<form target=_blank method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
).format(title=title)
def normalize_and_tokenize_text(text: str) -> List[str]:
return get_tokenized_tokens(
normalize_text(text),
keep_whitespace=True
)
def normalize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return (
layout_document
.retokenize(tokenize_fn=normalize_and_tokenize_text)
.remove_empty_blocks(**kwargs)
)
class ModelNestedBluePrint:
def __init__(
self,
name: str,
model: Model,
pdfalto_wrapper: PdfAltoWrapper,
app_features_context: AppFeaturesContext,
model_name: str = 'dummy'
):
self.name = name
self.model = model
self.pdfalto_wrapper = pdfalto_wrapper
self.app_features_context = app_features_context
self.model_name = model_name
def add_routes(self, parent_blueprint: Blueprint, url_prefix: str):
parent_blueprint.route(
url_prefix, methods=['GET'], endpoint=f'{url_prefix}_get'
)(self.handle_get)
parent_blueprint.route(
url_prefix, methods=['POST'], endpoint=f'{url_prefix}_post'
)(self.handle_post)
def handle_get(self):
return _get_file_upload_form(f'{self.name} Model: convert PDF to data')
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
return [layout_document]
def handle_post(self): # pylint: disable=too-many-locals
data = get_required_post_data()
with TemporaryDirectory(suffix='-request') as temp_dir:
temp_path = Path(temp_dir)
pdf_path = temp_path / 'test.pdf'
output_path = temp_path / 'test.lxml'
first_page = get_int_request_arg(RequestArgs.FIRST_PAGE)
last_page = get_int_request_arg(RequestArgs.LAST_PAGE)
output_format = (
request.args.get(RequestArgs.OUTPUT_FORMAT) or DEFAULT_MODEL_OUTPUT_FORMAT
)
assert output_format in VALID_MODEL_OUTPUT_FORMATS, \
f'{output_format} not in {VALID_MODEL_OUTPUT_FORMATS}'
pdf_path.write_bytes(data)
self.pdfalto_wrapper.convert_pdf_to_pdfalto_xml(
str(pdf_path),
str(output_path),
first_page=first_page,
last_page=last_page
)
xml_content = output_path.read_bytes()
root = etree.fromstring(xml_content)
layout_document_iterable = self.iter_filter_layout_document(
normalize_layout_document(
parse_alto_root(root)
)
)
data_generator = self.model.get_data_generator(
DocumentFeaturesContext(
app_features_context=self.app_features_context
)
)
data_lines = data_generator.iter_data_lines_for_layout_documents(
layout_document_iterable
)
response_type = 'text/plain'
if output_format == ModelOutputFormats.RAW_DATA:
response_content = '\n'.join(data_lines) + '\n'
else:
texts, features = load_data_crf_lines(data_lines)
LOGGER.info('texts length: %d', len(texts))
if not len(texts): # pylint: disable=len-as-condition
tag_result = []
else:
texts = texts.tolist()
tag_result = self.model.predict_labels(
texts=texts, features=features, output_format=None
)
LOGGER.debug('tag_result: %s', tag_result)
formatted_tag_result_iterable = iter_format_tag_result(
tag_result,
output_format=output_format,
expected_tag_result=None,
texts=texts,
features=features,
model_name=self.model_name
)
response_content = ''.join(formatted_tag_result_iterable)
if output_format == TagOutputFormats.JSON:
response_type = 'application/json'
LOGGER.debug('response_content: %r', response_content)
headers = None
return Response(response_content, headers=headers, mimetype=response_type)
class SegmentedModelNestedBluePrint(ModelNestedBluePrint):
def __init__(
self,
*args,
segmentation_model: Model,
segmentation_labels: List[str],
**kwargs
):
super().__init__(*args, **kwargs)
self.segmentation_model = segmentation_model
self.segmentation_labels = segmentation_labels
def iter_filter_layout_document_by_segmentation_labels(
self,
layout_document: LayoutDocument,
segmentation_labels: List[str]
) -> Iterable[LayoutDocument]:
assert self.segmentation_model is not None
segmentation_label_result = (
self.segmentation_model.get_label_layout_document_result(
layout_document,
app_features_context=self.app_features_context
)
)
for segmentation_label in segmentation_labels:
layout_document = segmentation_label_result.get_filtered_document_by_label(
segmentation_label
).remove_empty_blocks()
if not layout_document:
LOGGER.info(
'empty document for segmentation label %r, available labels: %r',
segmentation_label,
segmentation_label_result.get_available_labels()
)
continue
yield layout_document
def filter_layout_document_by_segmentation_label(
self,
layout_document: LayoutDocument,
segmentation_label: str
) -> LayoutDocument:
for filtered_layout_document in self.iter_filter_layout_document_by_segmentation_labels(
layout_document,
segmentation_labels=[segmentation_label]
):
return filtered_layout_document
return LayoutDocument(pages=[])
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
if get_bool_request_arg(RequestArgs.NO_USE_SEGMENTATION, default_value=False):
return [layout_document]
return self.iter_filter_layout_document_by_segmentation_labels(
layout_document, segmentation_labels=self.segmentation_labels
)
class NameHeaderModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(
self,
*args,
header_model: Model,
merge_raw_authors: bool,
**kwargs
):
super().__init__(*args, **kwargs)
self.header_model = header_model
self.merge_raw_authors = merge_raw_authors
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
header_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<header>'
)
labeled_layout_tokens = self.header_model.predict_labels_for_layout_document(
header_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_authors_list = list(
SemanticMixedContentWrapper(list(
self.header_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawAuthors)
)
LOGGER.info('semantic_raw_authors_list count: %d', len(semantic_raw_authors_list))
LOGGER.info('merge_raw_authors: %s', self.merge_raw_authors)
if self.merge_raw_authors:
return [
LayoutDocument.for_blocks([
block
for semantic_raw_authors in semantic_raw_authors_list
for block in semantic_raw_authors.iter_blocks()
]).remove_empty_blocks()
]
return [
LayoutDocument.for_blocks(
list(semantic_raw_authors.iter_blocks())
).remove_empty_blocks()
for semantic_raw_authors in semantic_raw_authors_list
]
class AffiliationAddressModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(self, *args, header_model: Model, **kwargs):
super().__init__(*args, **kwargs)
self.header_model = header_model
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
header_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<header>'
)
labeled_layout_tokens = self.header_model.predict_labels_for_layout_document(
header_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_aff_address_list = list(
SemanticMixedContentWrapper(list(
self.header_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawAffiliationAddress)
)
LOGGER.info('semantic_raw_aff_address_list count: %d', len(semantic_raw_aff_address_list))
return [
LayoutDocument.for_blocks(
list(semantic_raw_aff_address.iter_blocks())
).remove_empty_blocks()
for semantic_raw_aff_address in semantic_raw_aff_address_list
]
class CitationModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(self, *args, reference_segmenter_model: Model, **kwargs):
super().__init__(*args, **kwargs)
self.reference_segmenter_model = reference_segmenter_model
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
references_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<references>'
)
labeled_layout_tokens = self.reference_segmenter_model.predict_labels_for_layout_document(
references_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_references = list(
SemanticMixedContentWrapper(list(
self.reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
)
LOGGER.info('semantic_raw_references count: %d', len(semantic_raw_references))
return [
LayoutDocument.for_blocks(
[semantic_raw_reference.view_by_type(SemanticRawReferenceText).merged_block]
).remove_empty_blocks()
for semantic_raw_reference in semantic_raw_references
]
class NameCitationModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(
self,
*args,
reference_segmenter_model: Model,
citation_model: Model,
**kwargs
):
super().__init__(*args, **kwargs)
self.reference_segmenter_model = reference_segmenter_model
self.citation_model = citation_model
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
references_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<references>'
)
labeled_layout_tokens = self.reference_segmenter_model.predict_labels_for_layout_document(
references_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_references = list(
SemanticMixedContentWrapper(list(
self.reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
)
LOGGER.info('semantic_raw_references count: %d', len(semantic_raw_references))
raw_reference_documents = [
LayoutDocument.for_blocks(
[semantic_raw_reference.view_by_type(SemanticRawReferenceText).merged_block]
).remove_empty_blocks()
for semantic_raw_reference in semantic_raw_references
]
citation_labeled_layout_tokens_list = (
self.citation_model.predict_labels_for_layout_documents(
raw_reference_documents,
app_features_context=self.app_features_context
)
)
raw_authors = [
raw_author
for citation_labeled_layout_tokens in citation_labeled_layout_tokens_list
for ref in (
self.citation_model.iter_semantic_content_for_labeled_layout_tokens(
citation_labeled_layout_tokens
)
)
if isinstance(ref, SemanticReference)
for raw_author in ref.iter_by_type(SemanticRawAuthors)
]
return [
LayoutDocument.for_blocks([raw_author.merged_block]).remove_empty_blocks()
for raw_author in raw_authors
]
class FullTextChildModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(
self,
*args,
fulltext_model: Model,
semantic_type: Type[T_SemanticContentWrapper],
**kwargs
):
super().__init__(*args, **kwargs)
self.fulltext_model = fulltext_model
self.semantic_type = semantic_type
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
fulltext_layout_documents = list(self.iter_filter_layout_document_by_segmentation_labels(
layout_document, self.segmentation_labels
))
fulltext_labeled_layout_tokens_list = (
self.fulltext_model.predict_labels_for_layout_documents(
fulltext_layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('fulltext_labeled_layout_tokens_list: %r', fulltext_labeled_layout_tokens_list)
semanti_content_list = [
semantic_content
for fulltext_labeled_layout_tokens in fulltext_labeled_layout_tokens_list
for semantic_content in iter_by_semantic_type_recursively(
self.fulltext_model.iter_semantic_content_for_labeled_layout_tokens(
fulltext_labeled_layout_tokens
),
self.semantic_type
)
]
LOGGER.debug('semanti_content_list: %s', semanti_content_list)
return [
LayoutDocument.for_blocks([semanti_content.merged_block]).remove_empty_blocks()
for semanti_content in semanti_content_list
]
class FigureModelNestedBluePrint(FullTextChildModelNestedBluePrint):
def __init__(self, *args, **kwargs):
super().__init__(*args, semantic_type=SemanticRawFigure, **kwargs)
class TableModelNestedBluePrint(FullTextChildModelNestedBluePrint):
def __init__(self, *args, **kwargs):
super().__init__(*args, semantic_type=SemanticRawTable, **kwargs)
class ApiBlueprint(Blueprint):
def __init__(
self,
sciencebeam_parser: ScienceBeamParser
):
super().__init__('api', __name__)
LOGGER.debug('sciencebeam_parser: %r', sciencebeam_parser)
self.sciencebeam_parser = sciencebeam_parser
self.route('/')(self.api_root)
self.route("/pdfalto", methods=['GET'])(self.pdfalto_form)
self.route("/pdfalto", methods=['POST'])(self.pdfalto)
self.route("/processHeaderDocument", methods=['GET'])(
self.process_header_document_api_form
)
self.route("/processHeaderDocument", methods=['POST'])(
self.process_header_document_api
)
self.route("/processFulltextDocument", methods=['GET'])(
self.process_fulltext_document_api_form
)
self.route("/processFulltextDocument", methods=['POST'])(
self.process_fulltext_document_api
)
self.route("/processReferences", methods=['GET'])(
self.process_references_api_form
)
self.route("/processReferences", methods=['POST'])(
self.process_references_api
)
self.route("/processFulltextAssetDocument", methods=['GET'])(
self.process_pdf_to_tei_assets_zip_form
)
self.route("/processFulltextAssetDocument", methods=['POST'])(
self.process_pdf_to_tei_assets_zip
)
self.route("/convert", methods=['GET'])(self.process_convert_api_form)
self.route("/convert", methods=['POST'])(self.process_convert_api)
self.pdfalto_wrapper = sciencebeam_parser.pdfalto_wrapper
self.app_context = sciencebeam_parser.app_context
self.fulltext_processor_config = sciencebeam_parser.fulltext_processor_config
self.fulltext_models = sciencebeam_parser.fulltext_models
self.app_features_context = sciencebeam_parser.app_features_context
fulltext_models = self.fulltext_models
app_features_context = self.app_features_context
ModelNestedBluePrint(
'Segmentation',
model=fulltext_models.segmentation_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context
).add_routes(self, '/models/segmentation')
SegmentedModelNestedBluePrint(
'Header',
model=fulltext_models.header_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<header>']
).add_routes(self, '/models/header')
NameHeaderModelNestedBluePrint(
'Name Header',
model=fulltext_models.name_header_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<header>'],
header_model=fulltext_models.header_model,
merge_raw_authors=self.fulltext_processor_config.merge_raw_authors
).add_routes(self, '/models/name-header')
AffiliationAddressModelNestedBluePrint(
'Affiliation Address',
model=fulltext_models.affiliation_address_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<header>'],
header_model=fulltext_models.header_model
).add_routes(self, '/models/affiliation-address')
fulltext_segmentation_labels = ['<body>', '<acknowledgement>', '<annex>']
SegmentedModelNestedBluePrint(
'FullText',
model=fulltext_models.fulltext_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=fulltext_segmentation_labels
).add_routes(self, '/models/fulltext')
FigureModelNestedBluePrint(
'Figure',
model=fulltext_models.figure_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=fulltext_segmentation_labels,
fulltext_model=fulltext_models.fulltext_model
).add_routes(self, '/models/figure')
TableModelNestedBluePrint(
'Table',
model=fulltext_models.table_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=fulltext_segmentation_labels,
fulltext_model=fulltext_models.fulltext_model
).add_routes(self, '/models/table')
SegmentedModelNestedBluePrint(
'Reference Segmenter',
model=fulltext_models.reference_segmenter_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<references>']
).add_routes(self, '/models/reference-segmenter')
CitationModelNestedBluePrint(
'Citation (Reference)',
model=fulltext_models.citation_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<references>'],
reference_segmenter_model=fulltext_models.reference_segmenter_model
).add_routes(self, '/models/citation')
NameCitationModelNestedBluePrint(
'Name Citaton',
model=fulltext_models.name_citation_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<references>'],
reference_segmenter_model=fulltext_models.reference_segmenter_model,
citation_model=fulltext_models.citation_model
).add_routes(self, '/models/name-citation')
def api_root(self):
return jsonify({
'links': {
'pdfalto': url_for('.pdfalto')
}
})
def pdfalto_form(self):
return _get_file_upload_form('PdfAlto convert PDF to LXML')
@contextmanager
def _get_new_sciencebeam_parser_session(
self, **kwargs
) -> Iterator[ScienceBeamParserSession]:
with self.sciencebeam_parser.get_new_session(**kwargs) as session:
first_page = get_int_request_arg(RequestArgs.FIRST_PAGE)
last_page = get_int_request_arg(RequestArgs.LAST_PAGE)
session.document_request_parameters.first_page = first_page
session.document_request_parameters.last_page = last_page
yield session
@contextmanager
def _get_new_sciencebeam_parser_source(
self, **kwargs
) -> Iterator[ScienceBeamParserSessionSource]:
data_wrapper = get_data_wrapper_with_improved_media_type_or_filename(
get_required_post_data_wrapper()
)
with self._get_new_sciencebeam_parser_session(**kwargs) as session:
source_path = session.temp_path / 'source.file'
source_path.write_bytes(data_wrapper.data)
yield session.get_source(
source_path=str(source_path),
source_media_type=data_wrapper.media_type
)
def pdfalto(self):
response_type = 'text/xml'
with self._get_new_sciencebeam_parser_source() as source:
return send_file(
source.get_local_file_for_response_media_type(
MediaTypes.ALTO_XML
),
mimetype=response_type
)
def _process_pdf_to_response_media_type(
self,
response_media_type: str,
fulltext_processor_config: FullTextProcessorConfig
):
try:
LOGGER.debug('creating session source')
with self._get_new_sciencebeam_parser_source(
fulltext_processor_config=fulltext_processor_config
) as source:
LOGGER.debug('created session source: %r', source)
actual_response_media_type = response_media_type
if response_media_type in {
MediaTypes.TEI_XML, MediaTypes.JATS_XML
}:
actual_response_media_type = MediaTypes.XML
elif response_media_type in {
MediaTypes.TEI_ZIP, MediaTypes.JATS_ZIP
}:
actual_response_media_type = MediaTypes.ZIP
result_file = source.get_local_file_for_response_media_type(
response_media_type
)
LOGGER.debug('result_file: %r', result_file)
assert isinstance(result_file, str)
return send_file(
result_file,
mimetype=actual_response_media_type
)
except BadRequestScienceBeamParserError as exc:
LOGGER.warning('bad request: %r', exc)
return BadRequest(str(exc))
def process_header_document_api_form(self):
return _get_file_upload_form('Convert PDF to TEI (Header)')
def process_header_document_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_XML, MediaTypes.JATS_XML]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config.get_for_header_document()
)
def process_fulltext_document_api_form(self):
return _get_file_upload_form('Convert PDF to TEI (Full Text)')
def process_fulltext_document_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_XML, MediaTypes.JATS_XML]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config
)
def process_references_api_form(self):
return _get_file_upload_form('Convert PDF to TEI (References)')
def process_references_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_XML, MediaTypes.JATS_XML]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config.get_for_requested_field_names({
RequestFieldNames.REFERENCES
})
)
def process_pdf_to_tei_assets_zip_form(self):
return _get_file_upload_form('Convert PDF to TEI Assets ZIP')
def process_pdf_to_tei_assets_zip(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_ZIP, MediaTypes.JATS_ZIP]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config
)
def process_convert_api_form(self):
return _get_file_upload_form('Convert API')
def process_convert_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[
MediaTypes.JATS_XML, MediaTypes.TEI_XML,
MediaTypes.JATS_ZIP, MediaTypes.TEI_ZIP,
MediaTypes.PDF
]
)
includes_list = parse_comma_separated_value(
get_str_request_arg(RequestArgs.INCLUDES) or ''
)
LOGGER.info('includes_list: %r', includes_list)
fulltext_processor_config = self.fulltext_processor_config.get_for_requested_field_names(
set(includes_list)
)
LOGGER.info('fulltext_processor_config: %r', fulltext_processor_config)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=fulltext_processor_config
)
|
sciencebeam-parser
|
/sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/service/blueprints/api.py
|
api.py
|
import logging
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from pathlib import Path
from typing import Iterable, Iterator, List, Type, TypeVar
from flask import Blueprint, jsonify, request, Response, url_for
from flask.helpers import send_file
from lxml import etree
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
iter_format_tag_result
)
from werkzeug.exceptions import BadRequest
from sciencebeam_parser.app.parser import (
BadRequestScienceBeamParserError,
ScienceBeamParser,
ScienceBeamParserSession,
ScienceBeamParserSessionSource
)
from sciencebeam_parser.external.pdfalto.wrapper import PdfAltoWrapper
from sciencebeam_parser.external.pdfalto.parser import parse_alto_root
from sciencebeam_parser.models.data import AppFeaturesContext, DocumentFeaturesContext
from sciencebeam_parser.models.model import Model
from sciencebeam_parser.document.layout_document import LayoutDocument
from sciencebeam_parser.document.semantic_document import (
SemanticMixedContentWrapper,
SemanticRawAffiliationAddress,
SemanticRawAuthors,
SemanticRawFigure,
SemanticRawReference,
SemanticRawReferenceText,
SemanticRawTable,
SemanticReference,
T_SemanticContentWrapper,
iter_by_semantic_type_recursively
)
from sciencebeam_parser.processors.fulltext.config import RequestFieldNames
from sciencebeam_parser.utils.data_wrapper import (
get_data_wrapper_with_improved_media_type_or_filename
)
from sciencebeam_parser.utils.flask import (
assert_and_get_first_accept_matching_media_type,
get_bool_request_arg,
get_int_request_arg,
get_required_post_data,
get_required_post_data_wrapper,
get_str_request_arg
)
from sciencebeam_parser.utils.media_types import (
MediaTypes
)
from sciencebeam_parser.utils.text import normalize_text, parse_comma_separated_value
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.processors.fulltext.api import (
FullTextProcessorConfig
)
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
class RequestArgs:
FIRST_PAGE = 'first_page'
LAST_PAGE = 'last_page'
OUTPUT_FORMAT = 'output_format'
NO_USE_SEGMENTATION = 'no_use_segmentation'
INCLUDES = 'includes'
class ModelOutputFormats:
RAW_DATA = 'raw_data'
DEFAULT_MODEL_OUTPUT_FORMAT = TagOutputFormats.JSON
VALID_MODEL_OUTPUT_FORMATS = {
ModelOutputFormats.RAW_DATA,
TagOutputFormats.JSON,
TagOutputFormats.DATA,
TagOutputFormats.XML
}
def _get_file_upload_form(title: str):
return (
'''
<!doctype html>
<title>{title}</title>
<h1>{title}</h1>
<form target=_blank method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
).format(title=title)
def normalize_and_tokenize_text(text: str) -> List[str]:
return get_tokenized_tokens(
normalize_text(text),
keep_whitespace=True
)
def normalize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return (
layout_document
.retokenize(tokenize_fn=normalize_and_tokenize_text)
.remove_empty_blocks(**kwargs)
)
class ModelNestedBluePrint:
def __init__(
self,
name: str,
model: Model,
pdfalto_wrapper: PdfAltoWrapper,
app_features_context: AppFeaturesContext,
model_name: str = 'dummy'
):
self.name = name
self.model = model
self.pdfalto_wrapper = pdfalto_wrapper
self.app_features_context = app_features_context
self.model_name = model_name
def add_routes(self, parent_blueprint: Blueprint, url_prefix: str):
parent_blueprint.route(
url_prefix, methods=['GET'], endpoint=f'{url_prefix}_get'
)(self.handle_get)
parent_blueprint.route(
url_prefix, methods=['POST'], endpoint=f'{url_prefix}_post'
)(self.handle_post)
def handle_get(self):
return _get_file_upload_form(f'{self.name} Model: convert PDF to data')
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
return [layout_document]
def handle_post(self): # pylint: disable=too-many-locals
data = get_required_post_data()
with TemporaryDirectory(suffix='-request') as temp_dir:
temp_path = Path(temp_dir)
pdf_path = temp_path / 'test.pdf'
output_path = temp_path / 'test.lxml'
first_page = get_int_request_arg(RequestArgs.FIRST_PAGE)
last_page = get_int_request_arg(RequestArgs.LAST_PAGE)
output_format = (
request.args.get(RequestArgs.OUTPUT_FORMAT) or DEFAULT_MODEL_OUTPUT_FORMAT
)
assert output_format in VALID_MODEL_OUTPUT_FORMATS, \
f'{output_format} not in {VALID_MODEL_OUTPUT_FORMATS}'
pdf_path.write_bytes(data)
self.pdfalto_wrapper.convert_pdf_to_pdfalto_xml(
str(pdf_path),
str(output_path),
first_page=first_page,
last_page=last_page
)
xml_content = output_path.read_bytes()
root = etree.fromstring(xml_content)
layout_document_iterable = self.iter_filter_layout_document(
normalize_layout_document(
parse_alto_root(root)
)
)
data_generator = self.model.get_data_generator(
DocumentFeaturesContext(
app_features_context=self.app_features_context
)
)
data_lines = data_generator.iter_data_lines_for_layout_documents(
layout_document_iterable
)
response_type = 'text/plain'
if output_format == ModelOutputFormats.RAW_DATA:
response_content = '\n'.join(data_lines) + '\n'
else:
texts, features = load_data_crf_lines(data_lines)
LOGGER.info('texts length: %d', len(texts))
if not len(texts): # pylint: disable=len-as-condition
tag_result = []
else:
texts = texts.tolist()
tag_result = self.model.predict_labels(
texts=texts, features=features, output_format=None
)
LOGGER.debug('tag_result: %s', tag_result)
formatted_tag_result_iterable = iter_format_tag_result(
tag_result,
output_format=output_format,
expected_tag_result=None,
texts=texts,
features=features,
model_name=self.model_name
)
response_content = ''.join(formatted_tag_result_iterable)
if output_format == TagOutputFormats.JSON:
response_type = 'application/json'
LOGGER.debug('response_content: %r', response_content)
headers = None
return Response(response_content, headers=headers, mimetype=response_type)
class SegmentedModelNestedBluePrint(ModelNestedBluePrint):
def __init__(
self,
*args,
segmentation_model: Model,
segmentation_labels: List[str],
**kwargs
):
super().__init__(*args, **kwargs)
self.segmentation_model = segmentation_model
self.segmentation_labels = segmentation_labels
def iter_filter_layout_document_by_segmentation_labels(
self,
layout_document: LayoutDocument,
segmentation_labels: List[str]
) -> Iterable[LayoutDocument]:
assert self.segmentation_model is not None
segmentation_label_result = (
self.segmentation_model.get_label_layout_document_result(
layout_document,
app_features_context=self.app_features_context
)
)
for segmentation_label in segmentation_labels:
layout_document = segmentation_label_result.get_filtered_document_by_label(
segmentation_label
).remove_empty_blocks()
if not layout_document:
LOGGER.info(
'empty document for segmentation label %r, available labels: %r',
segmentation_label,
segmentation_label_result.get_available_labels()
)
continue
yield layout_document
def filter_layout_document_by_segmentation_label(
self,
layout_document: LayoutDocument,
segmentation_label: str
) -> LayoutDocument:
for filtered_layout_document in self.iter_filter_layout_document_by_segmentation_labels(
layout_document,
segmentation_labels=[segmentation_label]
):
return filtered_layout_document
return LayoutDocument(pages=[])
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
if get_bool_request_arg(RequestArgs.NO_USE_SEGMENTATION, default_value=False):
return [layout_document]
return self.iter_filter_layout_document_by_segmentation_labels(
layout_document, segmentation_labels=self.segmentation_labels
)
class NameHeaderModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(
self,
*args,
header_model: Model,
merge_raw_authors: bool,
**kwargs
):
super().__init__(*args, **kwargs)
self.header_model = header_model
self.merge_raw_authors = merge_raw_authors
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
header_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<header>'
)
labeled_layout_tokens = self.header_model.predict_labels_for_layout_document(
header_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_authors_list = list(
SemanticMixedContentWrapper(list(
self.header_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawAuthors)
)
LOGGER.info('semantic_raw_authors_list count: %d', len(semantic_raw_authors_list))
LOGGER.info('merge_raw_authors: %s', self.merge_raw_authors)
if self.merge_raw_authors:
return [
LayoutDocument.for_blocks([
block
for semantic_raw_authors in semantic_raw_authors_list
for block in semantic_raw_authors.iter_blocks()
]).remove_empty_blocks()
]
return [
LayoutDocument.for_blocks(
list(semantic_raw_authors.iter_blocks())
).remove_empty_blocks()
for semantic_raw_authors in semantic_raw_authors_list
]
class AffiliationAddressModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(self, *args, header_model: Model, **kwargs):
super().__init__(*args, **kwargs)
self.header_model = header_model
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
header_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<header>'
)
labeled_layout_tokens = self.header_model.predict_labels_for_layout_document(
header_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_aff_address_list = list(
SemanticMixedContentWrapper(list(
self.header_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawAffiliationAddress)
)
LOGGER.info('semantic_raw_aff_address_list count: %d', len(semantic_raw_aff_address_list))
return [
LayoutDocument.for_blocks(
list(semantic_raw_aff_address.iter_blocks())
).remove_empty_blocks()
for semantic_raw_aff_address in semantic_raw_aff_address_list
]
class CitationModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(self, *args, reference_segmenter_model: Model, **kwargs):
super().__init__(*args, **kwargs)
self.reference_segmenter_model = reference_segmenter_model
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
references_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<references>'
)
labeled_layout_tokens = self.reference_segmenter_model.predict_labels_for_layout_document(
references_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_references = list(
SemanticMixedContentWrapper(list(
self.reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
)
LOGGER.info('semantic_raw_references count: %d', len(semantic_raw_references))
return [
LayoutDocument.for_blocks(
[semantic_raw_reference.view_by_type(SemanticRawReferenceText).merged_block]
).remove_empty_blocks()
for semantic_raw_reference in semantic_raw_references
]
class NameCitationModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(
self,
*args,
reference_segmenter_model: Model,
citation_model: Model,
**kwargs
):
super().__init__(*args, **kwargs)
self.reference_segmenter_model = reference_segmenter_model
self.citation_model = citation_model
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
references_layout_document = self.filter_layout_document_by_segmentation_label(
layout_document, '<references>'
)
labeled_layout_tokens = self.reference_segmenter_model.predict_labels_for_layout_document(
references_layout_document,
app_features_context=self.app_features_context
)
LOGGER.debug('labeled_layout_tokens: %r', labeled_layout_tokens)
semantic_raw_references = list(
SemanticMixedContentWrapper(list(
self.reference_segmenter_model.iter_semantic_content_for_labeled_layout_tokens(
labeled_layout_tokens
)
)).iter_by_type(SemanticRawReference)
)
LOGGER.info('semantic_raw_references count: %d', len(semantic_raw_references))
raw_reference_documents = [
LayoutDocument.for_blocks(
[semantic_raw_reference.view_by_type(SemanticRawReferenceText).merged_block]
).remove_empty_blocks()
for semantic_raw_reference in semantic_raw_references
]
citation_labeled_layout_tokens_list = (
self.citation_model.predict_labels_for_layout_documents(
raw_reference_documents,
app_features_context=self.app_features_context
)
)
raw_authors = [
raw_author
for citation_labeled_layout_tokens in citation_labeled_layout_tokens_list
for ref in (
self.citation_model.iter_semantic_content_for_labeled_layout_tokens(
citation_labeled_layout_tokens
)
)
if isinstance(ref, SemanticReference)
for raw_author in ref.iter_by_type(SemanticRawAuthors)
]
return [
LayoutDocument.for_blocks([raw_author.merged_block]).remove_empty_blocks()
for raw_author in raw_authors
]
class FullTextChildModelNestedBluePrint(SegmentedModelNestedBluePrint):
def __init__(
self,
*args,
fulltext_model: Model,
semantic_type: Type[T_SemanticContentWrapper],
**kwargs
):
super().__init__(*args, **kwargs)
self.fulltext_model = fulltext_model
self.semantic_type = semantic_type
def iter_filter_layout_document(
self, layout_document: LayoutDocument
) -> Iterable[LayoutDocument]:
fulltext_layout_documents = list(self.iter_filter_layout_document_by_segmentation_labels(
layout_document, self.segmentation_labels
))
fulltext_labeled_layout_tokens_list = (
self.fulltext_model.predict_labels_for_layout_documents(
fulltext_layout_documents,
app_features_context=self.app_features_context
)
)
LOGGER.debug('fulltext_labeled_layout_tokens_list: %r', fulltext_labeled_layout_tokens_list)
semanti_content_list = [
semantic_content
for fulltext_labeled_layout_tokens in fulltext_labeled_layout_tokens_list
for semantic_content in iter_by_semantic_type_recursively(
self.fulltext_model.iter_semantic_content_for_labeled_layout_tokens(
fulltext_labeled_layout_tokens
),
self.semantic_type
)
]
LOGGER.debug('semanti_content_list: %s', semanti_content_list)
return [
LayoutDocument.for_blocks([semanti_content.merged_block]).remove_empty_blocks()
for semanti_content in semanti_content_list
]
class FigureModelNestedBluePrint(FullTextChildModelNestedBluePrint):
def __init__(self, *args, **kwargs):
super().__init__(*args, semantic_type=SemanticRawFigure, **kwargs)
class TableModelNestedBluePrint(FullTextChildModelNestedBluePrint):
def __init__(self, *args, **kwargs):
super().__init__(*args, semantic_type=SemanticRawTable, **kwargs)
class ApiBlueprint(Blueprint):
def __init__(
self,
sciencebeam_parser: ScienceBeamParser
):
super().__init__('api', __name__)
LOGGER.debug('sciencebeam_parser: %r', sciencebeam_parser)
self.sciencebeam_parser = sciencebeam_parser
self.route('/')(self.api_root)
self.route("/pdfalto", methods=['GET'])(self.pdfalto_form)
self.route("/pdfalto", methods=['POST'])(self.pdfalto)
self.route("/processHeaderDocument", methods=['GET'])(
self.process_header_document_api_form
)
self.route("/processHeaderDocument", methods=['POST'])(
self.process_header_document_api
)
self.route("/processFulltextDocument", methods=['GET'])(
self.process_fulltext_document_api_form
)
self.route("/processFulltextDocument", methods=['POST'])(
self.process_fulltext_document_api
)
self.route("/processReferences", methods=['GET'])(
self.process_references_api_form
)
self.route("/processReferences", methods=['POST'])(
self.process_references_api
)
self.route("/processFulltextAssetDocument", methods=['GET'])(
self.process_pdf_to_tei_assets_zip_form
)
self.route("/processFulltextAssetDocument", methods=['POST'])(
self.process_pdf_to_tei_assets_zip
)
self.route("/convert", methods=['GET'])(self.process_convert_api_form)
self.route("/convert", methods=['POST'])(self.process_convert_api)
self.pdfalto_wrapper = sciencebeam_parser.pdfalto_wrapper
self.app_context = sciencebeam_parser.app_context
self.fulltext_processor_config = sciencebeam_parser.fulltext_processor_config
self.fulltext_models = sciencebeam_parser.fulltext_models
self.app_features_context = sciencebeam_parser.app_features_context
fulltext_models = self.fulltext_models
app_features_context = self.app_features_context
ModelNestedBluePrint(
'Segmentation',
model=fulltext_models.segmentation_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context
).add_routes(self, '/models/segmentation')
SegmentedModelNestedBluePrint(
'Header',
model=fulltext_models.header_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<header>']
).add_routes(self, '/models/header')
NameHeaderModelNestedBluePrint(
'Name Header',
model=fulltext_models.name_header_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<header>'],
header_model=fulltext_models.header_model,
merge_raw_authors=self.fulltext_processor_config.merge_raw_authors
).add_routes(self, '/models/name-header')
AffiliationAddressModelNestedBluePrint(
'Affiliation Address',
model=fulltext_models.affiliation_address_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<header>'],
header_model=fulltext_models.header_model
).add_routes(self, '/models/affiliation-address')
fulltext_segmentation_labels = ['<body>', '<acknowledgement>', '<annex>']
SegmentedModelNestedBluePrint(
'FullText',
model=fulltext_models.fulltext_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=fulltext_segmentation_labels
).add_routes(self, '/models/fulltext')
FigureModelNestedBluePrint(
'Figure',
model=fulltext_models.figure_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=fulltext_segmentation_labels,
fulltext_model=fulltext_models.fulltext_model
).add_routes(self, '/models/figure')
TableModelNestedBluePrint(
'Table',
model=fulltext_models.table_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=fulltext_segmentation_labels,
fulltext_model=fulltext_models.fulltext_model
).add_routes(self, '/models/table')
SegmentedModelNestedBluePrint(
'Reference Segmenter',
model=fulltext_models.reference_segmenter_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<references>']
).add_routes(self, '/models/reference-segmenter')
CitationModelNestedBluePrint(
'Citation (Reference)',
model=fulltext_models.citation_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<references>'],
reference_segmenter_model=fulltext_models.reference_segmenter_model
).add_routes(self, '/models/citation')
NameCitationModelNestedBluePrint(
'Name Citaton',
model=fulltext_models.name_citation_model,
pdfalto_wrapper=self.pdfalto_wrapper,
app_features_context=app_features_context,
segmentation_model=fulltext_models.segmentation_model,
segmentation_labels=['<references>'],
reference_segmenter_model=fulltext_models.reference_segmenter_model,
citation_model=fulltext_models.citation_model
).add_routes(self, '/models/name-citation')
def api_root(self):
return jsonify({
'links': {
'pdfalto': url_for('.pdfalto')
}
})
def pdfalto_form(self):
return _get_file_upload_form('PdfAlto convert PDF to LXML')
@contextmanager
def _get_new_sciencebeam_parser_session(
self, **kwargs
) -> Iterator[ScienceBeamParserSession]:
with self.sciencebeam_parser.get_new_session(**kwargs) as session:
first_page = get_int_request_arg(RequestArgs.FIRST_PAGE)
last_page = get_int_request_arg(RequestArgs.LAST_PAGE)
session.document_request_parameters.first_page = first_page
session.document_request_parameters.last_page = last_page
yield session
@contextmanager
def _get_new_sciencebeam_parser_source(
self, **kwargs
) -> Iterator[ScienceBeamParserSessionSource]:
data_wrapper = get_data_wrapper_with_improved_media_type_or_filename(
get_required_post_data_wrapper()
)
with self._get_new_sciencebeam_parser_session(**kwargs) as session:
source_path = session.temp_path / 'source.file'
source_path.write_bytes(data_wrapper.data)
yield session.get_source(
source_path=str(source_path),
source_media_type=data_wrapper.media_type
)
def pdfalto(self):
response_type = 'text/xml'
with self._get_new_sciencebeam_parser_source() as source:
return send_file(
source.get_local_file_for_response_media_type(
MediaTypes.ALTO_XML
),
mimetype=response_type
)
def _process_pdf_to_response_media_type(
self,
response_media_type: str,
fulltext_processor_config: FullTextProcessorConfig
):
try:
LOGGER.debug('creating session source')
with self._get_new_sciencebeam_parser_source(
fulltext_processor_config=fulltext_processor_config
) as source:
LOGGER.debug('created session source: %r', source)
actual_response_media_type = response_media_type
if response_media_type in {
MediaTypes.TEI_XML, MediaTypes.JATS_XML
}:
actual_response_media_type = MediaTypes.XML
elif response_media_type in {
MediaTypes.TEI_ZIP, MediaTypes.JATS_ZIP
}:
actual_response_media_type = MediaTypes.ZIP
result_file = source.get_local_file_for_response_media_type(
response_media_type
)
LOGGER.debug('result_file: %r', result_file)
assert isinstance(result_file, str)
return send_file(
result_file,
mimetype=actual_response_media_type
)
except BadRequestScienceBeamParserError as exc:
LOGGER.warning('bad request: %r', exc)
return BadRequest(str(exc))
def process_header_document_api_form(self):
return _get_file_upload_form('Convert PDF to TEI (Header)')
def process_header_document_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_XML, MediaTypes.JATS_XML]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config.get_for_header_document()
)
def process_fulltext_document_api_form(self):
return _get_file_upload_form('Convert PDF to TEI (Full Text)')
def process_fulltext_document_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_XML, MediaTypes.JATS_XML]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config
)
def process_references_api_form(self):
return _get_file_upload_form('Convert PDF to TEI (References)')
def process_references_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_XML, MediaTypes.JATS_XML]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config.get_for_requested_field_names({
RequestFieldNames.REFERENCES
})
)
def process_pdf_to_tei_assets_zip_form(self):
return _get_file_upload_form('Convert PDF to TEI Assets ZIP')
def process_pdf_to_tei_assets_zip(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[MediaTypes.TEI_ZIP, MediaTypes.JATS_ZIP]
)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=self.fulltext_processor_config
)
def process_convert_api_form(self):
return _get_file_upload_form('Convert API')
def process_convert_api(self):
response_media_type = assert_and_get_first_accept_matching_media_type(
[
MediaTypes.JATS_XML, MediaTypes.TEI_XML,
MediaTypes.JATS_ZIP, MediaTypes.TEI_ZIP,
MediaTypes.PDF
]
)
includes_list = parse_comma_separated_value(
get_str_request_arg(RequestArgs.INCLUDES) or ''
)
LOGGER.info('includes_list: %r', includes_list)
fulltext_processor_config = self.fulltext_processor_config.get_for_requested_field_names(
set(includes_list)
)
LOGGER.info('fulltext_processor_config: %r', fulltext_processor_config)
return self._process_pdf_to_response_media_type(
response_media_type,
fulltext_processor_config=fulltext_processor_config
)
| 0.614972 | 0.146636 |
# ScienceBeam Trainer DeLFT
Work in-progress..
A thin(ish) wrapper around [DeLFT](https://github.com/kermitt2/delft) to enable training in the cloud.
Some of the main features:
- resources (model, data etc.) can be loaded from remote sources, currently:
- HTTP (`https://`, `http://`)
- Google Storage (`gs://`)
- resources can be saved to remote buckets, currently:
- Google Storage (`gs://`)
- on-demand embedding download
- Docker container(s)
- Support for Wapiti models
## Prerequisites
- Python 3
When using [pyenv](https://github.com/pyenv/pyenv),
you may need `libsqlite3-dev` and have Python installed with the `--enable-shared` flag.
For example:
```bash
apt-get install libsqlite3-dev
```
```bash
PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install --force 3.7.9
```
## Example Notebooks
- [train-header.ipynb](notebooks/train-header.ipynb) ([open in colab](https://colab.research.google.com/github/elifesciences/sciencebeam-trainer-delft/blob/develop/notebooks/train-header.ipynb))
## GROBID Docker Image with DeLFT
The Docker image `elifesciences/sciencebeam-trainer-delft-grobid_unstable`
can be used in-place of the main GROBID image.
It includes DeLFT (currently with CPU support only).
There are several ways to change the configuration or override models.
### Override Models using Docker Image
The `OVERRIDE_MODELS` or `OVERRIDE_MODEL_*` environment variables allow models to be overriden. Both environment variables are equivallent. `OVERRIDE_MODELS` is meant for overriding multiple models via a single environment variable (separated by `|`), whereas `OVERRIDE_MODEL_*` can be used to specify each model separately.
```bash
docker run --rm \
--env "OVERRIDE_MODELS=segmentation=/path/to/segmentation-model|header=/path/to/header-model" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
```
or:
```bash
docker run --rm \
--env "OVERRIDE_MODEL_1=segmentation=/path/to/segmentation-model" \
--env "OVERRIDE_MODEL_2=header=/path/to/header-model" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
```
e.g.:
```bash
docker run --rm \
--env "OVERRIDE_MODEL_1=header=https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/delft-grobid-header-biorxiv-no-word-embedding-2020-05-05.tar.gz" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
```
This functionality is mainly intended for loading models from a compressed file or bucket, such as Google Storage or S3 (you may also need to mount the relevant credentials).
## GROBID Trainer CLI
The GROBID Trainer CLI is the equivallent to [DeLFT's grobidTagger](https://github.com/kermitt2/delft/blob/master/grobidTagger.py). That is the main interface to interact with this project.
To get a list of all of the available parameters:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer --help
```
### Using Docker Image
```bash
docker run --rm elifesciences/sciencebeam-trainer-delft_unstable \
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer --help
```
### Train Sub Command
Training a model comes with many parameters. The following is an example to run the training without recommending parameters.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
An example command using more configurable parameters:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--char-embedding-size="11" \
--char-lstm-units="12" \
--char-input-mask-zero \
--char-input-dropout="0.3" \
--char-lstm-dropout="0.3" \
--max-char-length="13" \
--word-lstm-units="14" \
--dropout="0.1" \
--recurrent-dropout="0.2" \
--max-epoch="50"
```
### Train Eval Sub Command
The `train_eval` sub command is combining the `train` and `eval` command. It is reserving a slice of the input for the evaluation.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
If you rather want to provide separate evaluation data:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--eval-limit="100" \
--eval-max-sequence-length="100" \
--eval-input-window-stride="90" \
--early-stopping-patience="3" \
--max-epoch="50"
```
You can also train without using word embedding:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
### Train with layout features
Layout features are additional features provided with each token, e.g. whether it's the start of the line.
The model needs to support using such features. The following models do:
- `BidLSTM_CRF_FEATURES`
- `CustomBidLSTM_CRF`
- `CustomBidLSTM_CRF_FEATURES`
The features are generally provided. Some of the features are not suitable as input features because there are too many of them (e.g. a variation of the token itself). The features should be specified via `--features-indices`. The `input_info` sub command can help identify useful feature ranges (based on the count of unique values).
Example commands:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="BidLSTM_CRF_FEATURES" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="5" \
--features-lstm-units="7" \
--early-stopping-patience="10" \
--max-epoch="50"
```
or
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF_FEATURES" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="5" \
--features-lstm-units="7" \
--early-stopping-patience="10" \
--max-epoch="50"
```
or
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="0" \
--features-lstm-units="0" \
--early-stopping-patience="10" \
--max-epoch="50"
```
By default features are assumed to be categorical.
But features may also be [continuous](https://en.wikipedia.org/wiki/Continuous_or_discrete_variable).
Those values can be specified via the `--continuous-features-indices` parameter.
In that case they will automatically be part of the `features` and do not need to specified separately.
Continuous features will get [min-max scaled](https://en.wikipedia.org/wiki/Feature_scaling).
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-segmentation.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF" \
--use-features \
--features-indices="6-11" \
--continuous-features-indices="22,23,26" \
--features-embedding-size="0" \
--features-lstm-units="0" \
--early-stopping-patience="10" \
--max-epoch="50"
```
### Training with additional text features
Layout features may also contain additional token or text features.
For example the default GROBID *segmentation* model uses one data row for the whole line. With the first token being the main token, and the second token of the line being the the first feature (index `0`).
Train with additional token features:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--additional-token-feature-indices="0" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-segmentation.train.gz" \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
Additionally, a ScienceBeam modifcation of the GROBID *segmentation* model also contains a text feature containing the whole line (further details below).
Train with text features (using three tokens for word embeddings):
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--text-feature-indices="32" \
--concatenated-embeddings-token-count="3" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--limit="100" \
--eval-limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
In the [referenced training data](https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz), the last feature (`32`) represents the whole line (using non-breaking spaces instead of spaces). To use the model with GROBID, that [feature would need to be enabled](https://github.com/elifesciences/grobid/pull/25).
The same text feature also allows us to explore, whether the model would perform better,
if each token within the text feature was a separate token (data row).
In that case one would specify `--unroll-text-feature-index` with the token index of the text feature
that should get re-tokenized and "unrolled". The features and labels will get copied.
Another feature will get added with the *line status* (`LINESTART`, `LINEIN`, `LINEEND`) - feature index `33` in the example below.
Where the label has a beginning prefix (`B-`), it will get converted to an inside prefix (`I-`) for the remaining tokens
(see [IOB format](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging))).
At the prediction time, the model will receive the "unrolled" data, wheras the original data will get returned,
with the majority label for that line (majority without prefix, a beginning prefix will be used if the label has changed).
Example:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--unroll-text-feature-index="32" \
--use-features \
--feature-indices="6-11,33" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--limit="100" \
--eval-batch-size="1" \
--eval-limit="10" \
--eval-max-sequence-length="100" \
--early-stopping-patience="10" \
--max-epoch="50"
```
To inspect the unrolled predictions further, it is also possible to use the `tag` sub command using
`--tag-transformed`.
That flag will only make a difference for models already trained using the aforementioned
`--unroll-text-feature-index` parameter.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--tag-transformed \
--batch-size="16" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--model-path="data/models/sequenceLabelling/grobid-segmentation" \
--limit="2" \
--tag-output-format="data_unidiff" \
--tag-output-path="/tmp/test.diff"
```
### Resume training
Sometimes it can be useful to continue training a model.
For example an exception was thrown after epoch 42, you could continue training from the last checkpoint.
Or you want to fine-tune an existing model by training it on new data.
Note: the model configuration will be loaded from the checkpoint
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--resume-train-model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--initial-epoch="10" \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--resume-train-model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--initial-epoch="10" \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--eval-limit="100" \
--eval-batch-size="5" \
--early-stopping-patience="3" \
--max-epoch="50"
```
### Auto-resume training
As detailed in the previous section "Resume training",
there are situations where resuming training can be useful.
In particular, when the training process itself is automatically restarted,
then it is usually preferable to resume training rather than start it from
the beginning. By adding the `--auto-resume` flag, the training will be resume from the
the last saved checkpoint. Not surprisingly, saving checkpoints need to be enabled as well.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--auto-resume \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--checkpoint="./data/checkpoints/header-model" \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
### Transfer learning (experimental)
A limited form of transfer learning is also possible by copying selected layers from a previously trained model. e.g.:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--transfer-source-model-path="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/2020-10-04-delft-grobid-header-biorxiv-no-word-embedding.tar.gz" \
--transfer-copy-layers="char_embeddings=char_embeddings|char_lstm=char_lstm|word_lstm=word_lstm|word_lstm_dense=word_lstm_dense" \
--transfer-copy-preprocessor-fields="vocab_char,feature_preprocessor" \
--transfer-freeze-layers="char_embeddings,char_lstm,word_lstm" \
--batch-size="16" \
--architecture="CustomBidLSTM_CRF" \
--no-embedding \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-header.train.gz" \
--limit="1000" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-header.test.gz" \
--eval-limit="100" \
--max-sequence-length="1000" \
--eval-batch-size="5" \
--early-stopping-patience="3" \
--word-lstm-units="200" \
--use-features \
--feature-indices="9-25" \
--max-epoch="50"
```
Or transfer character weights from a different GROBID model:
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
affiliation-address \
train_eval \
--transfer-source-model-path="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/2020-10-04-delft-grobid-header-biorxiv-no-word-embedding.tar.gz" \
--transfer-copy-layers="char_embeddings=char_embeddings|char_lstm=char_lstm" \
--transfer-copy-preprocessor-fields="vocab_char" \
--transfer-freeze-layers="char_embeddings,char_lstm" \
--batch-size="32" \
--architecture="CustomBidLSTM_CRF" \
--no-embedding \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-affiliation-address.train.gz" \
--limit="1000" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-affiliation-address.test.gz" \
--eval-limit="100" \
--max-sequence-length="100" \
--eval-batch-size="5" \
--early-stopping-patience="5" \
--word-lstm-units="20" \
--max-epoch="50"
```
### Training very long sequences
Some training sequences can be very long and may exceed the available memory. This is in particular an issue when training the sequences.
Some approches to deal with the issue.
#### Truncate the sequences to a maximum length
By passing in the `--max-sequence-length`, sequences are being truncated.
In that case the model will not be trained on any data beyond the max sequence length.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
#### Training using [truncated BPTT](https://en.wikipedia.org/wiki/Backpropagation_through_time#Pseudocode) (Backpropagation through time)
This requires the LSTMs to be *stateful* (the state from the previous batch is passed on to the next). The `--stateful` flag should be passed in, and the `--input-window-stride` should be the same as `--max-sequence-length`
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input-window-stride="100" \
--stateful \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
Unfortunately the current implementation is very slow and training time might increase significantly.
#### Training using window slices
The alternative to the above is to not use *stateful* LSTMs but still pass in the input data using sliding windows.
To do that, do not pass `--stateful`. But use `--input-window-stride` which is equal or less to `--max-sequence-length`.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input-window-stride="50" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
```
This will not allow the LSTM to capture long term dependencies beyond the max sequence length but it will allow it to have seen all of the data, in chunks. Therefore max sequence length should be large enough, which depends on the model.
### Eval Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
eval \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="10" \
--quiet
```
The evaluation format can be changed to `json` using the `--eval-output-format`.
It can also be saved using `--eval-output-path`.
### Tag Sub Command
The `tag` sub command supports multiple output formats (`--tag-output-path`):
- `json`: more detailed tagging output
- `data`: data output with features but label being replaced by predicted label
- `text`: not really a tag output as it just outputs the input text
- `xml`: uses predicted labels as XML elements
- `xml_diff`: same as `xml` but it is showing a diff between expected and predicted results
The output will be written to the path specified via `--tag-output-path` if present. Otherwise it will be written to *stdout*.
#### XML Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="xml" \
--quiet
```
With the result:
```xml
<xml>
<p>
<title>Markov Chain Algorithms for Planar Lattice Structures</title>
<author>Michael Luby y Dana Randall z Alistair Sinclair</author>
<abstract>Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 񮽙 2n chessboard : starting from some arbitrary tiling , pick a 2 񮽙 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the 񮽙rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .</abstract>
</p>
</xml>
```
#### XML Diff Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="2" \
--tag-output-format="xml_diff" \
--quiet
```
With the result (the second document contains differences):
```xml
<xml>
<p>
<title>Markov Chain Algorithms for Planar Lattice Structures</title>
<author>Michael Luby y Dana Randall z Alistair Sinclair</author>
<abstract>Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 2n chessboard : starting from some arbitrary tiling , pick a 2 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .</abstract>
</p>
<p>
<title>Translucent Sums : A Foundation for Higher - Order Module Systems</title>
<author>Mark Lillibridge</author>
<date>May , 1997</date>
- <pubnum>- - 95 -</pubnum>
+ <pubnum>- - 95 - of</pubnum>
? +++
- <affiliation>of</affiliation>
</p>
</xml>
```
#### DATA Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="data" \
--quiet \
| head -5
```
With the result:
```text
Markov markov M Ma Mar Mark v ov kov rkov BLOCKSTART LINESTART NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 B-<title>
Chain chain C Ch Cha Chai n in ain hain BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Algorithms algorithms A Al Alg Algo s ms hms thms BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
for for f fo for for r or for for BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Planar planar P Pl Pla Plan r ar nar anar BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
```
#### DATA Unidiff Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="2" \
--tag-output-format="data_unidiff" \
--tag-output-path="/tmp/test.diff"
```
The output can be viewed using a specialised tool (such as [Kompare](https://en.wikipedia.org/wiki/Kompare)).
Example [unidiff](https://en.wikipedia.org/wiki/Diff#Unified_format) result:
```diff
--- header_document_000002.expected
+++ header_document_000002.actual
@@ -1,21 +1,21 @@
Translucent translucent T Tr Tra Tran t nt ent cent BLOCKSTART LINESTART NEWFONT HIGHERFONT 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 B-<title>
Sums sums S Su Sum Sums s ms ums Sums BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
: : : : : : : : : : BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 0 PUNCT 0 0 I-<title>
A a A A A A A A A A BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 ALLCAP NODIGIT 1 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Foundation foundation F Fo Fou Foun n on ion tion BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
for for f fo for for r or for for BLOCKIN LINEEND SAMEFONT SAMEFONTSIZE 1 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
...
- - - - - - - - - - BLOCKIN LINEEND SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
- - - - - - - - - - BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
95 95 9 95 95 95 5 95 95 95 BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<pubnum>
- - - - - - - - - - BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
-of of o of of of f of of of BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<affiliation>
+of of o of of of f of of of BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<pubnum>
```
#### Text Output Example
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="text" \
--quiet
```
With the result:
```text
Markov Chain Algorithms for Planar Lattice Structures Michael Luby y Dana Randall z Alistair Sinclair Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 2n chessboard : starting from some arbitrary tiling , pick a 2 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .
```
### Wapiti Sub Commands
The Wapiti sub commands allow to use a similar process for training, evaluating and tagging Wapiti models, as the sub commands for the other DL model(s) above.
Currently you would need to either install [Wapiti](https://wapiti.limsi.fr/) and make the `wapiti` command available in the path, or use the `--wapiti-install-source` switch to download and install a version from source.
#### Wapiti Train Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header wapiti_train \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--wapiti-template=https://raw.githubusercontent.com/kermitt2/grobid/0.5.6/grobid-trainer/resources/dataset/header/crfpp-templates/header.template \
--wapiti-install-source=https://github.com/kermitt2/Wapiti/archive/5f9a52351fddf21916008daa4becd41d56e7f608.tar.gz \
--output="data/models" \
--limit="100" \
--max-epoch="10"
```
#### Wapiti Train Eval Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header wapiti_train_eval \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--wapiti-template=https://raw.githubusercontent.com/kermitt2/grobid/0.5.6/grobid-trainer/resources/dataset/header/crfpp-templates/header.template \
--output="data/models" \
--limit="100" \
--max-epoch="10"
```
#### Wapiti Eval Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
wapiti_eval \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header" \
--quiet
```
#### Wapiti Tag Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
wapiti_tag \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header" \
--limit="1" \
--tag-output-format="xml_diff" \
--quiet
```
### Input Info Sub Command
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
input_info \
--quiet \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz
```
Result:
```text
number of input sequences: 2538
sequence lengths: {'q.00': 1, 'q.25': 61.0, 'q.50': 178.0, 'q.75': 300.75, 'q1.0': 6606}
token lengths: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 7.0, 'q1.0': 142}
number of features: 31
inconsistent feature length counts: {31: 536893, 30: 12855}
examples with feature length=31:
die D Di Die Die e ie Die Die BLOCKSTART LINESTART NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0
abscheidung A Ab Abs Absc g ng ung dung BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0
strömender s st str strö r er der nder BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0
examples with feature length=30:
gudina G Gu Gud Gudi a na ina dina BLOCKSTART LINESTART LINEINDENT NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 NOPUNCT 0 0
et e et et et t et et et BLOCKIN LINEIN LINEINDENT NEWFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 0 0 0 0 0 0 NOPUNCT 0 0
al a al al al l al al al BLOCKIN LINEIN LINEINDENT SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 1 0 0 0 0 0 0 NOPUNCT 0 0
feature value lengths: {0: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 7.0, 'q1.0': 142}, 1: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 2: {'q.00': 1, 'q.25': 1.0, 'q.50': 2.0, 'q.75': 2.0, 'q1.0': 2}, 3: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 3.0, 'q1.0': 3}, 4: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 4.0, 'q1.0': 4}, 5: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 6: {'q.00': 1, 'q.25': 1.0, 'q.50': 2.0, 'q.75': 2.0, 'q1.0': 2}, 7: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 3.0, 'q1.0': 3}, 8: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 4.0, 'q1.0': 4}, 9: {'q.00': 7, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 10}, 10: {'q.00': 6, 'q.25': 6.0, 'q.50': 6.0, 'q.75': 6.0, 'q1.0': 9}, 11: {'q.00': 7, 'q.25': 8.0, 'q.50': 8.0, 'q.75': 8.0, 'q1.0': 8}, 12: {'q.00': 9, 'q.25': 12.0, 'q.50': 12.0, 'q.75': 12.0, 'q1.0': 12}, 13: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 14: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 15: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 16: {'q.00': 6, 'q.25': 6.0, 'q.50': 6.0, 'q.75': 6.0, 'q1.0': 7}, 17: {'q.00': 7, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 14}, 18: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 19: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 20: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 21: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 22: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 23: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 24: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 25: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 26: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 27: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 28: {'q.00': 3, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 11}, 29: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 30: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}}
feature counts: {0: 1000, 1: 247, 2: 1000, 3: 1000, 4: 1000, 5: 265, 6: 1000, 7: 1000, 8: 1000, 9: 3, 10: 3, 11: 2, 12: 3, 13: 2, 14: 2, 15: 2, 16: 3, 17: 3, 18: 2, 19: 2, 20: 2, 21: 1, 22: 1, 23: 2, 24: 2, 25: 1, 26: 2, 27: 2, 28: 8, 29: 1, 30: 1}
suggested feature indices: 9-30
label counts: {'B-<title>': 2363, 'I-<title>': 24481, 'B-<author>': 2241, 'I-<author>': 25830, 'B-<reference>': 414, 'I-<reference>': 10121, 'B-<submission>': 409, 'I-<submission>': 3729, 'B-<abstract>': 1528, 'I-<abstract>': 269983, 'B-<affiliation>': 2782, 'I-<affiliation>': 23886, 'B-<address>': 2330, 'I-<address>': 13963, 'B-<date>': 658, 'I-<date>': 2204, 'B-<grant>': 105, 'I-<grant>': 4509, 'B-<email>': 891, 'I-<email>': 7796, 'B-<keyword>': 424, 'I-<keyword>': 7804, 'B-<entitle>': 24, 'I-<entitle>': 421, 'B-<pubnum>': 421, 'I-<pubnum>': 3755, 'B-<note>': 1823, 'I-<note>': 26033, 'B-<copyright>': 281, 'I-<copyright>': 5152, 'B-<date-submission>': 29, 'I-<date-submission>': 166, 'B-<intro>': 439, 'I-<intro>': 96944, 'B-<web>': 187, 'I-<web>': 3162, 'B-<phone>': 71, 'I-<phone>': 710, 'B-<dedication>': 22, 'I-<dedication>': 243, 'B-<degree>': 59, 'I-<degree>': 1355}
```
### Other CLI Parameters
#### `--log-file`
Specifying a log file (can also be gzipped by adding the `.gz` extension), will save the logging output to the file. This is mainly intended for cloud usage. Locally you could also use `tee` for that.
If the specified file is a remote file, then it will be uploaded when the program finishes (no streaming logs).
#### `--notification-url`
For a long running training process (`train` and `train_eval` or `wapiti_train` and `wapiti_train_eval`), it is possible to get notified via a Webhook URL
(e.g. [Slack](https://api.slack.com/messaging/webhooks) or [Mattermost](https://docs.mattermost.com/developer/webhooks-incoming.html)).
In that case, a message will be sent when the training completes or in case of an error (although not all error may be caught).
### Environment Variables
Environment variables can be useful when not directly interacting with the CLI, e.g. via GROBID.
The following environment variables can be specified:
| Name | Default | Description
| ---- | ------- | -----------
| `SCIENCEBEAM_DELFT_MAX_SEQUENCE_LENGTH` | *None* | The maximum sequence length to use, e.g. when tagging.
| `SCIENCEBEAM_DELFT_INPUT_WINDOW_STRIDE` | *None* | The window stride to use (if any). If the model is stateless, this could be set to the maximum sequence length. Otherwise this could be a set to a value below the maximum sequence length. The difference will be the overlapping window. If no window stride was specified, the sequence will be truncated at the maximum sequence length.
| `SCIENCEBEAM_DELFT_BATCH_SIZE` | `10` | The batch size to use
| `SCIENCEBEAM_DELFT_STATEFUL` | *None* (*False*) | Whether to enable stateful mode. This may only work with a batch size of `1`. Note: the stateful mode is currently very slow.
## Training in Google's AI Platform
You can train a model using Google's [AI Platform](https://cloud.google.com/ai-platform/). e.g.
```bash
gcloud beta ai-platform jobs submit training \
--job-dir "gs://your-job-bucket/path" \
--scale-tier=custom \
--master-machine-type=n1-highmem-8 \
--master-accelerator=count=1,type=NVIDIA_TESLA_K80 \
--region=europe-west1 \
--stream-logs \
--module-name sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
--package-path sciencebeam_trainer_delft \
-- \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="500" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="10000" \
--early-stopping-patience="10" \
--max-epoch="50"
```
Or using the project's wrapper script which provides some default values:
```bash
./gcloud-ai-platform-submit.sh \
--job-prefix "my_job_prefix" \
--job-dir "gs://your-job-bucket/path" \
--scale-tier=custom \
--master-machine-type=n1-highmem-8 \
--master-accelerator=count=1,type=NVIDIA_TESLA_K80 \
--region=europe-west1 \
-- \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="500" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="10000" \
--early-stopping-patience="10" \
--max-epoch="50"
```
(Alternatively you can train for free using Google Colab, see Example Notebooks above)
## Text Classification
### Train Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
train \
--model-path="data/models/textClassification/toxic" \
--train-input-limit=100 \
--train-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/train.csv"
```
### Eval Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
eval \
--model-path="data/models/textClassification/toxic" \
--eval-input-limit=100 \
--eval-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--eval-label-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test_labels.csv"
```
### Predict Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
predict \
--model-path="data/models/textClassification/toxic" \
--predict-input-limit=100 \
--predict-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--predict-output="./data/toxic_test_predictions.tsv"
```
### Train Eval Text Classification
```bash
python -m sciencebeam_trainer_delft.text_classification \
train_eval \
--model-path="data/models/textClassification/toxic" \
--train-input-limit=100 \
--train-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/train.csv" \
--eval-input-limit=100 \
--eval-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--eval-label-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test_labels.csv"
```
## Checkpoints CLI
The checkpoints CLI tool is there to give you a summary of the saved checkpoints. Checkpoints are optionally saved during training, they allow you to resume model training or further evaluate performance at the individual checkpoints. Usually training will stop after the f1 score hasn't improved for a number of epochs. The last checkpoint may not be the best.
The checkpoints tool will sort by the f1 score and show the *n* (`limit`) top checkpoints.
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints --help
```
### Checkpoints Text Output
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints \
--checkpoint="/path/to/checkpoints" \
--limit=3 \
--output-format=text
```
```text
best checkpoints:
00039: 0.5877923107411811 (/path/to/checkpoints/epoch-00039) (last)
00036: 0.5899450117831894 (/path/to/checkpoints/epoch-00036)
00034: 0.591387179996031 (/path/to/checkpoints/epoch-00034) (best)
```
### Checkpoints JSON Output
```bash
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints \
--checkpoint="/path/to/checkpoints" \
--limit=3 \
--output-format=json
```
```json
[
{
"loss": 40.520591011530236,
"f1": 0.5877923107411811,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 39,
"path": "/path/to/checkpoints/epoch-00039",
"is_last": true,
"is_best": false
},
{
"loss": 44.48661111276361,
"f1": 0.5899450117831894,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 36,
"path": "/path/to/checkpoints/epoch-00036",
"is_last": false,
"is_best": false
},
{
"loss": 47.80826501711393,
"f1": 0.591387179996031,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 34,
"path": "/path/to/checkpoints/epoch-00034",
"is_last": false,
"is_best": true
}
]
```
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/README.md
|
README.md
|
apt-get install libsqlite3-dev
PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install --force 3.7.9
docker run --rm \
--env "OVERRIDE_MODELS=segmentation=/path/to/segmentation-model|header=/path/to/header-model" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
docker run --rm \
--env "OVERRIDE_MODEL_1=segmentation=/path/to/segmentation-model" \
--env "OVERRIDE_MODEL_2=header=/path/to/header-model" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
docker run --rm \
--env "OVERRIDE_MODEL_1=header=https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/delft-grobid-header-biorxiv-no-word-embedding-2020-05-05.tar.gz" \
elifesciences/sciencebeam-trainer-delft-grobid_unstable
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer --help
docker run --rm elifesciences/sciencebeam-trainer-delft_unstable \
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer --help
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--char-embedding-size="11" \
--char-lstm-units="12" \
--char-input-mask-zero \
--char-input-dropout="0.3" \
--char-lstm-dropout="0.3" \
--max-char-length="13" \
--word-lstm-units="14" \
--dropout="0.1" \
--recurrent-dropout="0.2" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--eval-limit="100" \
--eval-max-sequence-length="100" \
--eval-input-window-stride="90" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="BidLSTM_CRF_FEATURES" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="5" \
--features-lstm-units="7" \
--early-stopping-patience="10" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF_FEATURES" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="5" \
--features-lstm-units="7" \
--early-stopping-patience="10" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF" \
--use-features \
--features-indices="9-30" \
--features-embedding-size="0" \
--features-lstm-units="0" \
--early-stopping-patience="10" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--no-embedding \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-segmentation.train.gz \
--limit="100" \
--architecture="CustomBidLSTM_CRF" \
--use-features \
--features-indices="6-11" \
--continuous-features-indices="22,23,26" \
--features-embedding-size="0" \
--features-lstm-units="0" \
--early-stopping-patience="10" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--additional-token-feature-indices="0" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-segmentation.train.gz" \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--text-feature-indices="32" \
--concatenated-embeddings-token-count="3" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--limit="100" \
--eval-limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
segmentation \
train_eval \
--batch-size="10" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--unroll-text-feature-index="32" \
--use-features \
--feature-indices="6-11,33" \
--max-char-length="60" \
--max-sequence-length="100" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-1927-delft-segmentation-with-text-feature-32.train.gz" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--limit="100" \
--eval-batch-size="1" \
--eval-limit="10" \
--eval-max-sequence-length="100" \
--early-stopping-patience="10" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--tag-transformed \
--batch-size="16" \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/2020-07-30-biorxiv-961-delft-segmentation-with-text-feature-32.validation.gz" \
--model-path="data/models/sequenceLabelling/grobid-segmentation" \
--limit="2" \
--tag-output-format="data_unidiff" \
--tag-output-path="/tmp/test.diff"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--resume-train-model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--initial-epoch="10" \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--resume-train-model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--initial-epoch="10" \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--eval-limit="100" \
--eval-batch-size="5" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train \
--auto-resume \
--batch-size="10" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--checkpoint="./data/checkpoints/header-model" \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--transfer-source-model-path="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/2020-10-04-delft-grobid-header-biorxiv-no-word-embedding.tar.gz" \
--transfer-copy-layers="char_embeddings=char_embeddings|char_lstm=char_lstm|word_lstm=word_lstm|word_lstm_dense=word_lstm_dense" \
--transfer-copy-preprocessor-fields="vocab_char,feature_preprocessor" \
--transfer-freeze-layers="char_embeddings,char_lstm,word_lstm" \
--batch-size="16" \
--architecture="CustomBidLSTM_CRF" \
--no-embedding \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-header.train.gz" \
--limit="1000" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-header.test.gz" \
--eval-limit="100" \
--max-sequence-length="1000" \
--eval-batch-size="5" \
--early-stopping-patience="3" \
--word-lstm-units="200" \
--use-features \
--feature-indices="9-25" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
affiliation-address \
train_eval \
--transfer-source-model-path="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/2020-10-04-delft-grobid-header-biorxiv-no-word-embedding.tar.gz" \
--transfer-copy-layers="char_embeddings=char_embeddings|char_lstm=char_lstm" \
--transfer-copy-preprocessor-fields="vocab_char" \
--transfer-freeze-layers="char_embeddings,char_lstm" \
--batch-size="32" \
--architecture="CustomBidLSTM_CRF" \
--no-embedding \
--input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-affiliation-address.train.gz" \
--limit="1000" \
--eval-input="https://github.com/elifesciences/sciencebeam-datasets/releases/download/grobid-0.6.1/delft-grobid-0.6.1-affiliation-address.test.gz" \
--eval-limit="100" \
--max-sequence-length="100" \
--eval-batch-size="5" \
--early-stopping-patience="5" \
--word-lstm-units="20" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input-window-stride="100" \
--stateful \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="100" \
--input-window-stride="50" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="100" \
--early-stopping-patience="3" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
eval \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="10" \
--quiet
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="xml" \
--quiet
<xml>
<p>
<title>Markov Chain Algorithms for Planar Lattice Structures</title>
<author>Michael Luby y Dana Randall z Alistair Sinclair</author>
<abstract>Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 񮽙 2n chessboard : starting from some arbitrary tiling , pick a 2 񮽙 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the 񮽙rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .</abstract>
</p>
</xml>
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="2" \
--tag-output-format="xml_diff" \
--quiet
<xml>
<p>
<title>Markov Chain Algorithms for Planar Lattice Structures</title>
<author>Michael Luby y Dana Randall z Alistair Sinclair</author>
<abstract>Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 2n chessboard : starting from some arbitrary tiling , pick a 2 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .</abstract>
</p>
<p>
<title>Translucent Sums : A Foundation for Higher - Order Module Systems</title>
<author>Mark Lillibridge</author>
<date>May , 1997</date>
- <pubnum>- - 95 -</pubnum>
+ <pubnum>- - 95 - of</pubnum>
? +++
- <affiliation>of</affiliation>
</p>
</xml>
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="data" \
--quiet \
| head -5
Markov markov M Ma Mar Mark v ov kov rkov BLOCKSTART LINESTART NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 B-<title>
Chain chain C Ch Cha Chai n in ain hain BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Algorithms algorithms A Al Alg Algo s ms hms thms BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
for for f fo for for r or for for BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Planar planar P Pl Pla Plan r ar nar anar BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="2" \
--tag-output-format="data_unidiff" \
--tag-output-path="/tmp/test.diff"
--- header_document_000002.expected
+++ header_document_000002.actual
@@ -1,21 +1,21 @@
Translucent translucent T Tr Tra Tran t nt ent cent BLOCKSTART LINESTART NEWFONT HIGHERFONT 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 B-<title>
Sums sums S Su Sum Sums s ms ums Sums BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
: : : : : : : : : : BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 0 PUNCT 0 0 I-<title>
A a A A A A A A A A BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 ALLCAP NODIGIT 1 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
Foundation foundation F Fo Fou Foun n on ion tion BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 1 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
for for f fo for for r or for for BLOCKIN LINEEND SAMEFONT SAMEFONTSIZE 1 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<title>
...
- - - - - - - - - - BLOCKIN LINEEND SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
- - - - - - - - - - BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
95 95 9 95 95 95 5 95 95 95 BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<pubnum>
- - - - - - - - - - BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 1 HYPHEN 0 0 I-<pubnum>
-of of o of of of f of of of BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<affiliation>
+of of o of of of f of of of BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0 I-<pubnum>
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
tag \
--batch-size="16" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header/" \
--limit="1" \
--tag-output-format="text" \
--quiet
Markov Chain Algorithms for Planar Lattice Structures Michael Luby y Dana Randall z Alistair Sinclair Abstract Consider the following Markov chain , whose states are all domino tilings of a 2n 2n chessboard : starting from some arbitrary tiling , pick a 2 2 window uniformly at random . If the four squares appearing in this window are covered by two parallel dominoes , rotate the dominoes in place . Repeat many times . This process is used in practice to generate a tiling , and is a tool in the study of the combinatorics of tilings and the behavior of dimer systems in statistical physics . Analogous Markov chains are used to randomly generate other structures on various two - dimensional lattices . This paper presents techniques which prove for the rst time that , in many interesting cases , a small number of random moves suuce to obtain a uniform distribution .
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header wapiti_train \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--wapiti-template=https://raw.githubusercontent.com/kermitt2/grobid/0.5.6/grobid-trainer/resources/dataset/header/crfpp-templates/header.template \
--wapiti-install-source=https://github.com/kermitt2/Wapiti/archive/5f9a52351fddf21916008daa4becd41d56e7f608.tar.gz \
--output="data/models" \
--limit="100" \
--max-epoch="10"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
header wapiti_train_eval \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--eval-input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--wapiti-template=https://raw.githubusercontent.com/kermitt2/grobid/0.5.6/grobid-trainer/resources/dataset/header/crfpp-templates/header.template \
--output="data/models" \
--limit="100" \
--max-epoch="10"
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
wapiti_eval \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header" \
--quiet
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
wapiti_tag \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.test.gz \
--model-path="https://github.com/kermitt2/grobid/raw/0.5.6/grobid-home/models/header" \
--limit="1" \
--tag-output-format="xml_diff" \
--quiet
python -m sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
input_info \
--quiet \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz
number of input sequences: 2538
sequence lengths: {'q.00': 1, 'q.25': 61.0, 'q.50': 178.0, 'q.75': 300.75, 'q1.0': 6606}
token lengths: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 7.0, 'q1.0': 142}
number of features: 31
inconsistent feature length counts: {31: 536893, 30: 12855}
examples with feature length=31:
die D Di Die Die e ie Die Die BLOCKSTART LINESTART NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 0 NOPUNCT 0 0
abscheidung A Ab Abs Absc g ng ung dung BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0
strömender s st str strö r er der nder BLOCKIN LINEIN SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 0 NOPUNCT 0 0
examples with feature length=30:
gudina G Gu Gud Gudi a na ina dina BLOCKSTART LINESTART LINEINDENT NEWFONT HIGHERFONT 0 0 0 INITCAP NODIGIT 0 0 0 0 0 0 0 0 NOPUNCT 0 0
et e et et et t et et et BLOCKIN LINEIN LINEINDENT NEWFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 0 0 0 0 0 0 0 NOPUNCT 0 0
al a al al al l al al al BLOCKIN LINEIN LINEINDENT SAMEFONT SAMEFONTSIZE 0 0 0 NOCAPS NODIGIT 0 1 0 0 0 0 0 0 NOPUNCT 0 0
feature value lengths: {0: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 7.0, 'q1.0': 142}, 1: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 2: {'q.00': 1, 'q.25': 1.0, 'q.50': 2.0, 'q.75': 2.0, 'q1.0': 2}, 3: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 3.0, 'q1.0': 3}, 4: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 4.0, 'q1.0': 4}, 5: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 6: {'q.00': 1, 'q.25': 1.0, 'q.50': 2.0, 'q.75': 2.0, 'q1.0': 2}, 7: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 3.0, 'q1.0': 3}, 8: {'q.00': 1, 'q.25': 1.0, 'q.50': 3.0, 'q.75': 4.0, 'q1.0': 4}, 9: {'q.00': 7, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 10}, 10: {'q.00': 6, 'q.25': 6.0, 'q.50': 6.0, 'q.75': 6.0, 'q1.0': 9}, 11: {'q.00': 7, 'q.25': 8.0, 'q.50': 8.0, 'q.75': 8.0, 'q1.0': 8}, 12: {'q.00': 9, 'q.25': 12.0, 'q.50': 12.0, 'q.75': 12.0, 'q1.0': 12}, 13: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 14: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 15: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 16: {'q.00': 6, 'q.25': 6.0, 'q.50': 6.0, 'q.75': 6.0, 'q1.0': 7}, 17: {'q.00': 7, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 14}, 18: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 19: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 20: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 21: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 22: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 23: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 24: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 25: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 26: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 27: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 28: {'q.00': 3, 'q.25': 7.0, 'q.50': 7.0, 'q.75': 7.0, 'q1.0': 11}, 29: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}, 30: {'q.00': 1, 'q.25': 1.0, 'q.50': 1.0, 'q.75': 1.0, 'q1.0': 1}}
feature counts: {0: 1000, 1: 247, 2: 1000, 3: 1000, 4: 1000, 5: 265, 6: 1000, 7: 1000, 8: 1000, 9: 3, 10: 3, 11: 2, 12: 3, 13: 2, 14: 2, 15: 2, 16: 3, 17: 3, 18: 2, 19: 2, 20: 2, 21: 1, 22: 1, 23: 2, 24: 2, 25: 1, 26: 2, 27: 2, 28: 8, 29: 1, 30: 1}
suggested feature indices: 9-30
label counts: {'B-<title>': 2363, 'I-<title>': 24481, 'B-<author>': 2241, 'I-<author>': 25830, 'B-<reference>': 414, 'I-<reference>': 10121, 'B-<submission>': 409, 'I-<submission>': 3729, 'B-<abstract>': 1528, 'I-<abstract>': 269983, 'B-<affiliation>': 2782, 'I-<affiliation>': 23886, 'B-<address>': 2330, 'I-<address>': 13963, 'B-<date>': 658, 'I-<date>': 2204, 'B-<grant>': 105, 'I-<grant>': 4509, 'B-<email>': 891, 'I-<email>': 7796, 'B-<keyword>': 424, 'I-<keyword>': 7804, 'B-<entitle>': 24, 'I-<entitle>': 421, 'B-<pubnum>': 421, 'I-<pubnum>': 3755, 'B-<note>': 1823, 'I-<note>': 26033, 'B-<copyright>': 281, 'I-<copyright>': 5152, 'B-<date-submission>': 29, 'I-<date-submission>': 166, 'B-<intro>': 439, 'I-<intro>': 96944, 'B-<web>': 187, 'I-<web>': 3162, 'B-<phone>': 71, 'I-<phone>': 710, 'B-<dedication>': 22, 'I-<dedication>': 243, 'B-<degree>': 59, 'I-<degree>': 1355}
gcloud beta ai-platform jobs submit training \
--job-dir "gs://your-job-bucket/path" \
--scale-tier=custom \
--master-machine-type=n1-highmem-8 \
--master-accelerator=count=1,type=NVIDIA_TESLA_K80 \
--region=europe-west1 \
--stream-logs \
--module-name sciencebeam_trainer_delft.sequence_labelling.grobid_trainer \
--package-path sciencebeam_trainer_delft \
-- \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="500" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="10000" \
--early-stopping-patience="10" \
--max-epoch="50"
./gcloud-ai-platform-submit.sh \
--job-prefix "my_job_prefix" \
--job-dir "gs://your-job-bucket/path" \
--scale-tier=custom \
--master-machine-type=n1-highmem-8 \
--master-accelerator=count=1,type=NVIDIA_TESLA_K80 \
--region=europe-west1 \
-- \
header train_eval \
--batch-size="16" \
--embedding="https://github.com/elifesciences/sciencebeam-models/releases/download/v0.0.1/glove.6B.50d.txt.xz" \
--max-sequence-length="500" \
--input=https://github.com/elifesciences/sciencebeam-datasets/releases/download/v0.0.1/delft-grobid-0.5.6-header.train.gz \
--limit="10000" \
--early-stopping-patience="10" \
--max-epoch="50"
python -m sciencebeam_trainer_delft.text_classification \
train \
--model-path="data/models/textClassification/toxic" \
--train-input-limit=100 \
--train-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/train.csv"
python -m sciencebeam_trainer_delft.text_classification \
eval \
--model-path="data/models/textClassification/toxic" \
--eval-input-limit=100 \
--eval-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--eval-label-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test_labels.csv"
python -m sciencebeam_trainer_delft.text_classification \
predict \
--model-path="data/models/textClassification/toxic" \
--predict-input-limit=100 \
--predict-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--predict-output="./data/toxic_test_predictions.tsv"
python -m sciencebeam_trainer_delft.text_classification \
train_eval \
--model-path="data/models/textClassification/toxic" \
--train-input-limit=100 \
--train-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/train.csv" \
--eval-input-limit=100 \
--eval-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test.csv" \
--eval-label-input="https://github.com/kermitt2/delft/raw/v0.2.3/data/textClassification/toxic/test_labels.csv"
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints --help
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints \
--checkpoint="/path/to/checkpoints" \
--limit=3 \
--output-format=text
best checkpoints:
00039: 0.5877923107411811 (/path/to/checkpoints/epoch-00039) (last)
00036: 0.5899450117831894 (/path/to/checkpoints/epoch-00036)
00034: 0.591387179996031 (/path/to/checkpoints/epoch-00034) (best)
python -m sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints \
--checkpoint="/path/to/checkpoints" \
--limit=3 \
--output-format=json
[
{
"loss": 40.520591011530236,
"f1": 0.5877923107411811,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 39,
"path": "/path/to/checkpoints/epoch-00039",
"is_last": true,
"is_best": false
},
{
"loss": 44.48661111276361,
"f1": 0.5899450117831894,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 36,
"path": "/path/to/checkpoints/epoch-00036",
"is_last": false,
"is_best": false
},
{
"loss": 47.80826501711393,
"f1": 0.591387179996031,
"optimizer": {
"type": "keras.optimizers.Adam",
"lr": 0.0010000000474974513
},
"epoch": 34,
"path": "/path/to/checkpoints/epoch-00034",
"is_last": false,
"is_best": true
}
]
| 0.455199 | 0.902266 |
import os
import logging
import time
from contextlib import contextmanager
from pathlib import Path
from typing import IO, Iterator, Optional
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
format_tag_result
)
LOGGER = logging.getLogger(__name__)
SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT = "SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT"
@contextmanager
def exclusive_prefixed_file(prefix: str, suffix: str = '') -> Iterator[IO]:
for index in range(1, 10000):
filename = '%s-%d%s' % (prefix, index, suffix)
try:
with open(filename, mode='x', encoding='utf-8') as fileobj:
yield fileobj
return
except FileExistsError:
continue
raise FileExistsError('could not create any prefixed file: %s, suffix: %s' % (prefix, suffix))
class TagDebugReporter:
def __init__(self, output_directory: str):
self.output_directory = output_directory
def get_base_output_name(self, model_name: str) -> str:
return os.path.join(self.output_directory, 'sciencebeam-delft-%s-%s' % (
round(time.time()),
model_name
))
def report_tag_results(
self,
texts: np.array,
features: np.array,
annotations,
model_name: str):
base_filename_prefix = self.get_base_output_name(model_name=model_name)
with exclusive_prefixed_file(base_filename_prefix, '.json') as json_fp:
output_file = json_fp.name
filename_prefix = os.path.splitext(output_file)[0]
LOGGER.info('tagger, output_file: %s', output_file)
format_tag_result_kwargs = dict(
tag_result=annotations,
texts=texts,
features=features,
model_name=model_name
)
formatted_text = format_tag_result(
output_format=TagOutputFormats.TEXT,
**format_tag_result_kwargs
)
Path(filename_prefix + '.txt').write_text(formatted_text, encoding='utf-8')
formatted_json = format_tag_result(
output_format=TagOutputFormats.JSON,
**format_tag_result_kwargs
)
json_fp.write(formatted_json)
formatted_xml = format_tag_result(
output_format=TagOutputFormats.XML,
**format_tag_result_kwargs
)
Path(filename_prefix + '.xml').write_text(formatted_xml, encoding='utf-8')
if features is not None:
formatted_data = format_tag_result(
output_format=TagOutputFormats.DATA,
**format_tag_result_kwargs
)
Path(filename_prefix + '.data').write_text(formatted_data, encoding='utf-8')
def get_tag_debug_reporter_if_enabled() -> Optional[TagDebugReporter]:
output_directory = os.environ.get(SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT)
if not output_directory:
return None
return TagDebugReporter(output_directory)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/debug.py
|
debug.py
|
import os
import logging
import time
from contextlib import contextmanager
from pathlib import Path
from typing import IO, Iterator, Optional
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
format_tag_result
)
LOGGER = logging.getLogger(__name__)
SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT = "SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT"
@contextmanager
def exclusive_prefixed_file(prefix: str, suffix: str = '') -> Iterator[IO]:
for index in range(1, 10000):
filename = '%s-%d%s' % (prefix, index, suffix)
try:
with open(filename, mode='x', encoding='utf-8') as fileobj:
yield fileobj
return
except FileExistsError:
continue
raise FileExistsError('could not create any prefixed file: %s, suffix: %s' % (prefix, suffix))
class TagDebugReporter:
def __init__(self, output_directory: str):
self.output_directory = output_directory
def get_base_output_name(self, model_name: str) -> str:
return os.path.join(self.output_directory, 'sciencebeam-delft-%s-%s' % (
round(time.time()),
model_name
))
def report_tag_results(
self,
texts: np.array,
features: np.array,
annotations,
model_name: str):
base_filename_prefix = self.get_base_output_name(model_name=model_name)
with exclusive_prefixed_file(base_filename_prefix, '.json') as json_fp:
output_file = json_fp.name
filename_prefix = os.path.splitext(output_file)[0]
LOGGER.info('tagger, output_file: %s', output_file)
format_tag_result_kwargs = dict(
tag_result=annotations,
texts=texts,
features=features,
model_name=model_name
)
formatted_text = format_tag_result(
output_format=TagOutputFormats.TEXT,
**format_tag_result_kwargs
)
Path(filename_prefix + '.txt').write_text(formatted_text, encoding='utf-8')
formatted_json = format_tag_result(
output_format=TagOutputFormats.JSON,
**format_tag_result_kwargs
)
json_fp.write(formatted_json)
formatted_xml = format_tag_result(
output_format=TagOutputFormats.XML,
**format_tag_result_kwargs
)
Path(filename_prefix + '.xml').write_text(formatted_xml, encoding='utf-8')
if features is not None:
formatted_data = format_tag_result(
output_format=TagOutputFormats.DATA,
**format_tag_result_kwargs
)
Path(filename_prefix + '.data').write_text(formatted_data, encoding='utf-8')
def get_tag_debug_reporter_if_enabled() -> Optional[TagDebugReporter]:
output_directory = os.environ.get(SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT)
if not output_directory:
return None
return TagDebugReporter(output_directory)
| 0.647575 | 0.074433 |
from collections import Counter, defaultdict, OrderedDict
from typing import Dict, Iterable, List
import numpy as np
def iter_flat_batch_tokens(batch_tokens: List[List[str]]):
return (
token
for doc_tokens in batch_tokens
for token in doc_tokens
)
def iter_flat_features(features: np.ndarray):
return (
features_vector
for features_doc in features
for features_vector in features_doc
)
def get_quantiles(values: Iterable[float]) -> Dict[str, float]:
arr = np.array(list(values))
return OrderedDict([
('q.00', np.quantile(arr, 0)),
('q.25', np.quantile(arr, 0.25)),
('q.50', np.quantile(arr, 0.50)),
('q.75', np.quantile(arr, 0.75)),
('q1.0', np.quantile(arr, 1))
])
def get_quantiles_feature_value_length_by_index(features: np.ndarray):
return dict(enumerate(map(
lambda feature_values: get_quantiles(map(len, feature_values)),
zip(*iter_flat_features(features))
)))
def get_feature_value_counts_by_index(features: np.ndarray, max_feature_values: int = 1000):
feature_value_counts_by_index: Dict[int, Counter] = defaultdict(Counter)
for feature_vector in iter_flat_features(features):
for index, value in enumerate(feature_vector):
feature_value_counts = feature_value_counts_by_index[index]
if (
len(feature_value_counts) >= max_feature_values
and value not in feature_value_counts):
continue
feature_value_counts[value] += 1
return feature_value_counts_by_index
def get_feature_counts(features: np.ndarray):
feature_value_counts_by_index = get_feature_value_counts_by_index(features)
return OrderedDict([
(index, len(feature_value_counts_by_index[index]))
for index in sorted(feature_value_counts_by_index.keys())
])
def get_suggested_feature_indices(feature_counts: Dict[int, int], threshold: int = 12):
return [
index
for index in sorted(feature_counts.keys())
if feature_counts[index] <= threshold
]
def format_dict(d: dict) -> str:
return '{' + ', '.join([
'%s: %s' % (
repr(key),
format_dict(value) if isinstance(value, dict) else repr(value)
)
for key, value in d.items()
]) + '}'
def iter_index_groups(indices: List[int]) -> Iterable[List[int]]:
group: List[int] = []
for index in indices:
if not group or group[-1] + 1 == index:
group.append(index)
continue
yield group
group = []
if group:
yield group
def iter_formatted_index_groups(indices: List[int]) -> Iterable[str]:
for group in iter_index_groups(indices):
if len(group) == 1:
yield str(group[0])
continue
yield '%s-%s' % (group[0], group[-1])
def format_indices(indices: List[int]) -> str:
return ','.join(list(iter_formatted_index_groups(indices)))
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/input_info.py
|
input_info.py
|
from collections import Counter, defaultdict, OrderedDict
from typing import Dict, Iterable, List
import numpy as np
def iter_flat_batch_tokens(batch_tokens: List[List[str]]):
return (
token
for doc_tokens in batch_tokens
for token in doc_tokens
)
def iter_flat_features(features: np.ndarray):
return (
features_vector
for features_doc in features
for features_vector in features_doc
)
def get_quantiles(values: Iterable[float]) -> Dict[str, float]:
arr = np.array(list(values))
return OrderedDict([
('q.00', np.quantile(arr, 0)),
('q.25', np.quantile(arr, 0.25)),
('q.50', np.quantile(arr, 0.50)),
('q.75', np.quantile(arr, 0.75)),
('q1.0', np.quantile(arr, 1))
])
def get_quantiles_feature_value_length_by_index(features: np.ndarray):
return dict(enumerate(map(
lambda feature_values: get_quantiles(map(len, feature_values)),
zip(*iter_flat_features(features))
)))
def get_feature_value_counts_by_index(features: np.ndarray, max_feature_values: int = 1000):
feature_value_counts_by_index: Dict[int, Counter] = defaultdict(Counter)
for feature_vector in iter_flat_features(features):
for index, value in enumerate(feature_vector):
feature_value_counts = feature_value_counts_by_index[index]
if (
len(feature_value_counts) >= max_feature_values
and value not in feature_value_counts):
continue
feature_value_counts[value] += 1
return feature_value_counts_by_index
def get_feature_counts(features: np.ndarray):
feature_value_counts_by_index = get_feature_value_counts_by_index(features)
return OrderedDict([
(index, len(feature_value_counts_by_index[index]))
for index in sorted(feature_value_counts_by_index.keys())
])
def get_suggested_feature_indices(feature_counts: Dict[int, int], threshold: int = 12):
return [
index
for index in sorted(feature_counts.keys())
if feature_counts[index] <= threshold
]
def format_dict(d: dict) -> str:
return '{' + ', '.join([
'%s: %s' % (
repr(key),
format_dict(value) if isinstance(value, dict) else repr(value)
)
for key, value in d.items()
]) + '}'
def iter_index_groups(indices: List[int]) -> Iterable[List[int]]:
group: List[int] = []
for index in indices:
if not group or group[-1] + 1 == index:
group.append(index)
continue
yield group
group = []
if group:
yield group
def iter_formatted_index_groups(indices: List[int]) -> Iterable[str]:
for group in iter_index_groups(indices):
if len(group) == 1:
yield str(group[0])
continue
yield '%s-%s' % (group[0], group[-1])
def format_indices(indices: List[int]) -> str:
return ','.join(list(iter_formatted_index_groups(indices)))
| 0.76908 | 0.616676 |
import logging
import os
import time
from functools import partial
from typing import Callable, Iterable, List, Optional, Tuple, Union, cast
import numpy as np
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.preprocess import WordPreprocessor, FeaturesPreprocessor
from delft.sequenceLabelling.wrapper import Sequence as _Sequence
from delft.sequenceLabelling.config import TrainingConfig as DelftTrainingConfig
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.numpy import concatenate_or_none
from sciencebeam_trainer_delft.utils.misc import str_to_bool
from sciencebeam_trainer_delft.sequence_labelling.tools.install_models import (
copy_directory_with_source_meta
)
from sciencebeam_trainer_delft.embedding import Embeddings, EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig, TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.data_generator import (
DataGenerator,
iter_batch_text_list,
get_concatenated_embeddings_token_count
)
from sciencebeam_trainer_delft.sequence_labelling.trainer import (
Scorer,
Trainer
)
from sciencebeam_trainer_delft.sequence_labelling.models import (
is_model_stateful,
get_model,
updated_implicit_model_config_props
)
from sciencebeam_trainer_delft.sequence_labelling.preprocess import (
T_FeaturesPreprocessor,
FeaturesPreprocessor as ScienceBeamFeaturesPreprocessor,
faster_preprocessor_fit
)
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelSaver, ModelLoader
from sciencebeam_trainer_delft.sequence_labelling.tagger import Tagger
from sciencebeam_trainer_delft.sequence_labelling.evaluation import ClassificationResult
from sciencebeam_trainer_delft.sequence_labelling.debug import get_tag_debug_reporter_if_enabled
from sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints import (
get_checkpoints_json,
get_last_checkpoint_url
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
TransferLearningConfig,
TransferLearningSource,
freeze_model_layers
)
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform import (
DummyDatasetTransformer
)
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform.unroll_transform import (
UnrollingTextFeatureDatasetTransformer
)
LOGGER = logging.getLogger(__name__)
DEFAUT_MODEL_PATH = 'data/models/sequenceLabelling/'
DEFAULT_EMBEDDINGS_PATH = './embedding-registry.json'
DEFAUT_BATCH_SIZE = 10
class EnvironmentVariables:
# environment variables are mainly intended for GROBID, as we can't pass in arguments
MAX_SEQUENCE_LENGTH = 'SCIENCEBEAM_DELFT_MAX_SEQUENCE_LENGTH'
INPUT_WINDOW_STRIDE = 'SCIENCEBEAM_DELFT_INPUT_WINDOW_STRIDE'
BATCH_SIZE = 'SCIENCEBEAM_DELFT_BATCH_SIZE'
STATEFUL = 'SCIENCEBEAM_DELFT_STATEFUL'
def get_typed_env(
key: str,
type_fn: Callable[[str], T],
default_value: Optional[T] = None
) -> Optional[T]:
max_sequence_length_str = os.getenv(key)
if not max_sequence_length_str:
return default_value
return type_fn(max_sequence_length_str)
def get_default_max_sequence_length() -> Optional[int]:
return get_typed_env(EnvironmentVariables.MAX_SEQUENCE_LENGTH, int, default_value=None)
def get_default_input_window_stride() -> Optional[int]:
return get_typed_env(EnvironmentVariables.INPUT_WINDOW_STRIDE, int, default_value=None)
def get_default_batch_size() -> Optional[int]:
return get_typed_env(EnvironmentVariables.BATCH_SIZE, int, default_value=DEFAUT_BATCH_SIZE)
def get_default_stateful() -> Optional[bool]:
return get_typed_env(
EnvironmentVariables.STATEFUL,
str_to_bool,
default_value=None
)
def get_features_preprocessor(
model_config: ModelConfig,
features: np.array = None) -> T_FeaturesPreprocessor:
if not model_config.use_features:
LOGGER.info('features not enabled')
return None
if features is None:
LOGGER.info('no features available')
return None
if model_config.use_features_indices_input:
LOGGER.info(
'using feature indices as input, features_indices=%s, features_vocab_size=%s',
model_config.features_indices, model_config.features_vocabulary_size
)
return FeaturesPreprocessor(
features_indices=model_config.features_indices,
features_vocabulary_size=model_config.features_vocabulary_size
)
LOGGER.info(
'using feature indices=%s', model_config.features_indices
)
return ScienceBeamFeaturesPreprocessor(
features_indices=model_config.features_indices,
continuous_features_indices=model_config.continuous_features_indices
)
def get_preprocessor(
model_config: ModelConfig,
features: np.array = None) -> WordPreprocessor:
feature_preprocessor = get_features_preprocessor(model_config, features=features)
return WordPreprocessor(
max_char_length=model_config.max_char_length,
feature_preprocessor=feature_preprocessor
)
def prepare_preprocessor(
X, y,
model_config: ModelConfig,
features: Optional[List[List[List[str]]]] = None
):
preprocessor = get_preprocessor(model_config, features=features)
batch_text_list_iterable = iter_batch_text_list(
X, features,
additional_token_feature_indices=model_config.additional_token_feature_indices,
text_feature_indices=model_config.text_feature_indices
)
if isinstance(preprocessor, WordPreprocessor):
LOGGER.info('fitting preprocessor (faster)')
faster_preprocessor_fit(preprocessor, batch_text_list_iterable, y)
else:
LOGGER.info('fitting preprocessor (default)')
preprocessor.fit(batch_text_list_iterable, y)
if model_config.use_features and features is not None:
LOGGER.info('fitting features preprocessor')
preprocessor.fit_features(features)
if model_config.features_indices != preprocessor.feature_preprocessor.features_indices:
LOGGER.info('revised features_indices: %s', model_config.features_indices)
model_config.features_indices = preprocessor.feature_preprocessor.features_indices
model_config.features_map_to_index = preprocessor.feature_preprocessor.features_map_to_index
LOGGER.info('done fitting preprocessor')
return preprocessor
def get_model_directory(model_name: str, dir_path: str = None):
return os.path.join(dir_path or DEFAUT_MODEL_PATH, model_name)
class Sequence(_Sequence):
def __init__(
self, *args,
use_features: bool = False,
features_indices: List[int] = None,
features_embedding_size: int = None,
multiprocessing: bool = False,
embedding_registry_path: str = None,
embedding_manager: EmbeddingManager = None,
config_props: dict = None,
training_props: dict = None,
max_sequence_length: int = None,
input_window_stride: int = None,
eval_max_sequence_length: int = None,
eval_input_window_stride: int = None,
batch_size: int = None,
eval_batch_size: int = None,
stateful: bool = None,
transfer_learning_config: TransferLearningConfig = None,
tag_transformed: bool = False,
**kwargs):
# initialise logging if not already initialised
logging.basicConfig(level='INFO')
LOGGER.debug('Sequence, args=%s, kwargs=%s', args, kwargs)
self.embedding_registry_path = embedding_registry_path or DEFAULT_EMBEDDINGS_PATH
if embedding_manager is None:
embedding_manager = EmbeddingManager(
path=self.embedding_registry_path,
download_manager=DownloadManager()
)
self.download_manager = embedding_manager.download_manager
self.embedding_manager = embedding_manager
self.embeddings: Optional[Embeddings] = None
if not batch_size:
batch_size = get_default_batch_size()
if not max_sequence_length:
max_sequence_length = get_default_max_sequence_length()
self.max_sequence_length = max_sequence_length
if not input_window_stride:
input_window_stride = get_default_input_window_stride()
self.input_window_stride = input_window_stride
self.eval_max_sequence_length = eval_max_sequence_length
self.eval_input_window_stride = eval_input_window_stride
self.eval_batch_size = eval_batch_size
self.model_path: Optional[str] = None
if stateful is None:
# use a stateful model, if supported
stateful = get_default_stateful()
self.stateful = stateful
self.transfer_learning_config = transfer_learning_config
self.dataset_transformer_factory = DummyDatasetTransformer
self.tag_transformed = tag_transformed
super().__init__(
*args,
max_sequence_length=max_sequence_length,
batch_size=batch_size,
**kwargs
)
LOGGER.debug('use_features=%s', use_features)
self.model_config: ModelConfig = ModelConfig(
**{ # type: ignore
**vars(self.model_config),
**(config_props or {}),
'features_indices': features_indices,
'features_embedding_size': features_embedding_size
},
use_features=use_features
)
self.update_model_config_word_embedding_size()
updated_implicit_model_config_props(self.model_config)
self.update_dataset_transformer_factor()
self.training_config: TrainingConfig = TrainingConfig(
**vars(cast(DelftTrainingConfig, self.training_config)),
**(training_props or {})
)
LOGGER.info('training_config: %s', vars(self.training_config))
self.multiprocessing = multiprocessing
self.tag_debug_reporter = get_tag_debug_reporter_if_enabled()
self._load_exception = None
self.p: Optional[WordPreprocessor] = None
self.model: Optional[BaseModel] = None
self.models: List[BaseModel] = []
def update_model_config_word_embedding_size(self):
if self.embeddings:
token_count = get_concatenated_embeddings_token_count(
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
additional_token_feature_indices=(
self.model_config.additional_token_feature_indices
)
)
self.model_config.word_embedding_size = (
self.embeddings.embed_size * token_count
)
def update_dataset_transformer_factor(self):
self.dataset_transformer_factory = DummyDatasetTransformer
if self.model_config.unroll_text_feature_index is not None:
LOGGER.info(
'using unrolling text feature dataset transformer, index=%s',
self.model_config.unroll_text_feature_index
)
self.dataset_transformer_factory = partial(
UnrollingTextFeatureDatasetTransformer,
self.model_config.unroll_text_feature_index,
used_features_indices=self.model_config.features_indices
)
def clear_embedding_cache(self):
if not self.embeddings:
return
if self.embeddings.use_ELMo:
self.embeddings.clean_ELMo_cache()
if self.embeddings.use_BERT:
self.embeddings.clean_BERT_cache()
def train( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid=None, y_valid=None,
features_train: np.array = None,
features_valid: np.array = None):
# TBD if valid is None, segment train to get one
dataset_fransformer = self.dataset_transformer_factory()
x_train, y_train, features_train = dataset_fransformer.fit_transform(
x_train, y_train, features_train
)
if x_valid is not None:
x_valid, y_valid, features_valid = dataset_fransformer.fit_transform(
x_valid, y_valid, features_valid
)
x_all = np.concatenate((x_train, x_valid), axis=0)
y_all = np.concatenate((y_train, y_valid), axis=0)
features_all = concatenate_or_none((features_train, features_valid), axis=0)
transfer_learning_source: Optional[TransferLearningSource] = None
if self.p is None or self.model is None:
transfer_learning_source = TransferLearningSource.from_config(
self.transfer_learning_config,
download_manager=self.download_manager
)
if self.p is None:
if transfer_learning_source:
self.p = transfer_learning_source.copy_preprocessor_if_enabled()
if self.p is None:
self.p = prepare_preprocessor(
x_all, y_all,
features=features_all,
model_config=self.model_config
)
if transfer_learning_source:
transfer_learning_source.apply_preprocessor(target_preprocessor=self.p)
self.model_config.char_vocab_size = len(self.p.vocab_char)
self.model_config.case_vocab_size = len(self.p.vocab_case)
if self.model_config.use_features and features_train is not None:
LOGGER.info('x_train.shape: %s', x_train.shape)
LOGGER.info('features_train.shape: %s', features_train.shape)
sample_transformed_features = self.p.transform_features(features_train[:1])
try:
if isinstance(sample_transformed_features, tuple):
sample_transformed_features = sample_transformed_features[0]
LOGGER.info(
'sample_transformed_features.shape: %s',
sample_transformed_features.shape
)
self.model_config.max_feature_size = sample_transformed_features.shape[-1]
LOGGER.info('max_feature_size: %s', self.model_config.max_feature_size)
except Exception: # pylint: disable=broad-except
LOGGER.info('features do not implement shape, set max_feature_size manually')
if self.model is None:
self.model = get_model(self.model_config, self.p, len(self.p.vocab_tag))
if transfer_learning_source:
transfer_learning_source.apply_weights(target_model=self.model)
if self.transfer_learning_config:
freeze_model_layers(self.model, self.transfer_learning_config.freeze_layers)
trainer = Trainer(
self.model,
self.models,
self.embeddings,
self.model_config,
training_config=self.training_config,
model_saver=self.get_model_saver(),
multiprocessing=self.multiprocessing,
checkpoint_path=self.log_dir,
preprocessor=self.p
)
trainer.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
self.clear_embedding_cache()
def get_model_saver(self):
return ModelSaver(
preprocessor=self.p,
model_config=self.model_config
)
def train_nfold( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid=None, y_valid=None, fold_number=10,
features_train: np.array = None,
features_valid: np.array = None):
if x_valid is not None and y_valid is not None:
x_all = np.concatenate((x_train, x_valid), axis=0)
y_all = np.concatenate((y_train, y_valid), axis=0)
features_all = concatenate_or_none((features_train, features_valid), axis=0)
self.p = prepare_preprocessor(
x_all, y_all,
features=features_all,
model_config=self.model_config
)
else:
self.p = prepare_preprocessor(
x_train, y_train,
features=features_train,
model_config=self.model_config
)
self.model_config.char_vocab_size = len(self.p.vocab_char)
self.model_config.case_vocab_size = len(self.p.vocab_case)
self.p.return_lengths = True
self.models = []
for _ in range(0, fold_number):
model = get_model(self.model_config, self.p, len(self.p.vocab_tag))
self.models.append(model)
trainer = Trainer(
self.model,
self.models,
self.embeddings,
self.model_config,
training_config=self.training_config,
model_saver=self.get_model_saver(),
checkpoint_path=self.log_dir,
preprocessor=self.p
)
trainer.train_nfold(
x_train, y_train,
x_valid, y_valid,
features_train=features_train,
features_valid=features_valid
)
if self.embeddings:
if self.embeddings.use_ELMo:
self.embeddings.clean_ELMo_cache()
if self.embeddings.use_BERT:
self.embeddings.clean_BERT_cache()
def eval( # pylint: disable=arguments-differ
self, x_test, y_test, features: np.array = None):
should_eval_nfold = (
self.model_config.fold_number > 1
and self.models
and len(self.models) == self.model_config.fold_number
)
if should_eval_nfold:
self.eval_nfold(x_test, y_test, features=features)
else:
self.eval_single(x_test, y_test, features=features)
def create_eval_data_generator(self, *args, **kwargs) -> DataGenerator:
return DataGenerator( # type: ignore
*args,
batch_size=(
self.eval_batch_size
or self.training_config.batch_size
),
preprocessor=self.p,
additional_token_feature_indices=self.model_config.additional_token_feature_indices,
text_feature_indices=self.model_config.text_feature_indices,
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
char_embed_size=self.model_config.char_embedding_size,
is_deprecated_padded_batch_text_list_enabled=(
self.model_config.is_deprecated_padded_batch_text_list_enabled
),
max_sequence_length=self.eval_max_sequence_length,
embeddings=self.embeddings,
**kwargs
)
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
self._require_model()
if self.model_config.use_features and features is None:
raise ValueError('features required')
tagger = Tagger(
self.model, self.model_config, self.embeddings,
dataset_transformer_factory=self.dataset_transformer_factory,
max_sequence_length=self.eval_max_sequence_length,
input_window_stride=self.eval_input_window_stride,
preprocessor=self.p
)
tag_result = tagger.tag(
list(x_test),
output_format=None,
features=features
)
y_pred = [
[token_tag for _, token_tag in doc_pred]
for doc_pred in tag_result
]
# convert to list, get_entities is type checking for list but not ndarray
y_true = [list(true_doc) for true_doc in y_test]
return ClassificationResult(y_pred=y_pred, y_true=y_true)
def eval_single( # pylint: disable=arguments-differ
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
classification_result = self.get_evaluation_result(
x_test=x_test,
y_test=y_test,
features=features
)
print(classification_result.get_formatted_report(digits=4))
def eval_nfold( # pylint: disable=arguments-differ
self, x_test, y_test, features: np.array = None):
if self.models is not None:
total_f1 = 0
best_f1 = 0
best_index = 0
worst_f1 = 1
worst_index = 0
reports = []
total_precision = 0
total_recall = 0
for i in range(0, self.model_config.fold_number):
print(
'\n------------------------ fold %s --------------------------------------'
% i
)
# Prepare test data(steps, generator)
test_generator = self.create_eval_data_generator(
x_test, y_test,
features=features,
shuffle=False
)
# Build the evaluator and evaluate the model
scorer = Scorer(test_generator, self.p, evaluation=True)
scorer.model = self.models[i]
scorer.on_epoch_end(epoch=-1)
f1 = scorer.f1
precision = scorer.precision
recall = scorer.recall
reports.append(scorer.report)
if best_f1 < f1:
best_f1 = f1
best_index = i
if worst_f1 > f1:
worst_f1 = f1
worst_index = i
total_f1 += f1
total_precision += precision
total_recall += recall
macro_f1 = total_f1 / self.model_config.fold_number
macro_precision = total_precision / self.model_config.fold_number
macro_recall = total_recall / self.model_config.fold_number
print("\naverage over", self.model_config.fold_number, "folds")
print("\tmacro f1 =", macro_f1)
print("\tmacro precision =", macro_precision)
print("\tmacro recall =", macro_recall, "\n")
print("\n** Worst ** model scores - \n")
print(reports[worst_index])
self.model = self.models[best_index]
print("\n** Best ** model scores - \n")
print(reports[best_index])
def iter_tag(
self, texts, output_format, features=None
) -> Union[dict, Iterable[List[Tuple[str, str]]]]:
# annotate a list of sentences, return the list of annotations in the
# specified output_format
self._require_model()
if self.model_config.use_features and features is None:
raise ValueError('features required')
tagger = Tagger(
self.model, self.model_config, self.embeddings,
dataset_transformer_factory=self.dataset_transformer_factory,
max_sequence_length=self.max_sequence_length,
input_window_stride=self.input_window_stride,
preprocessor=self.p
)
LOGGER.debug('tag_transformed: %s', self.tag_transformed)
annotations: Union[dict, Iterable[List[Tuple[str, str]]]]
if output_format == 'json':
start_time = time.time()
annotations = tagger.tag(
list(texts), output_format,
features=features,
tag_transformed=self.tag_transformed
)
runtime = round(time.time() - start_time, 3)
assert isinstance(annotations, dict)
annotations["runtime"] = runtime
else:
annotations = tagger.iter_tag(
list(texts), output_format,
features=features,
tag_transformed=self.tag_transformed
)
if self.tag_debug_reporter:
if not isinstance(annotations, dict):
# the tag debug reporter only supports lists
# additionally should not consume the iterable
annotations = list(annotations)
self.tag_debug_reporter.report_tag_results(
texts=texts,
features=features,
annotations=annotations,
model_name=self._get_model_name()
)
return annotations
def tag(self, *args, **kwargs) -> Union[dict, List[List[Tuple[str, str]]]]:
iterable_or_dict = self.iter_tag(*args, **kwargs)
if isinstance(iterable_or_dict, dict):
return iterable_or_dict
return list(iterable_or_dict)
def _require_model(self):
if not self.model:
try:
raise OSError('Model not loaded: %s (previous load exception: %r)' % (
self._get_model_name(), self._load_exception
)) from self._load_exception
except Exception as exc:
LOGGER.exception('Model required but not loaded: %r', exc, exc_info=exc)
raise
def _get_model_name(self):
return self.model_config.model_name
@property
def last_checkpoint_path(self) -> Optional[str]:
if not self.log_dir:
return None
return get_last_checkpoint_url(get_checkpoints_json(self.log_dir))
@property
def model_summary_props(self) -> dict:
return {
'model_type': 'delft',
'architecture': self.model_config.model_type,
'model_config': vars(self.model_config)
}
def get_model_output_path(self, dir_path: str = None) -> str:
return get_model_directory(model_name=self.model_config.model_name, dir_path=dir_path)
def _get_model_directory(self, dir_path: str = None) -> str:
return self.get_model_output_path(dir_path=dir_path)
def get_embedding_for_model_config(self, model_config: ModelConfig):
embedding_name = model_config.embeddings_name
if not model_config.use_word_embeddings or not embedding_name:
return None
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
embeddings = Embeddings(
embedding_name,
path=self.embedding_registry_path,
use_ELMo=model_config.use_ELMo,
use_BERT=model_config.use_BERT
)
if not embeddings.embed_size > 0:
raise AssertionError(
'invalid embedding size, embeddings not loaded? %s' % embedding_name
)
return embeddings
def get_meta(self):
return {
'training_config': vars(self.training_config)
}
def save(self, dir_path=None):
# create subfolder for the model if not already exists
directory = self._get_model_directory(dir_path)
os.makedirs(directory, exist_ok=True)
self.get_model_saver().save_to(directory, model=self.model, meta=self.get_meta())
def load(self, dir_path=None):
directory = None
try:
directory = self._get_model_directory(dir_path)
self.load_from(directory)
except Exception as exc:
self._load_exception = exc
LOGGER.exception('failed to load model from %r', directory, exc_info=exc)
raise
def download_model(self, dir_path: str) -> str:
if not dir_path.endswith('.tar.gz'):
return dir_path
local_dir_path = str(self.download_manager.get_local_file(
dir_path, auto_uncompress=False
)).replace('.tar.gz', '')
copy_directory_with_source_meta(dir_path, local_dir_path)
return local_dir_path
def load_from(self, directory: str):
model_loader = ModelLoader(download_manager=self.download_manager)
directory = self.download_model(directory)
self.model_path = directory
self.p = model_loader.load_preprocessor_from_directory(directory)
self.model_config = model_loader.load_model_config_from_directory(directory)
self.model_config.batch_size = self.training_config.batch_size
if self.stateful is not None:
self.model_config.stateful = self.stateful
# load embeddings
LOGGER.info('loading embeddings: %s', self.model_config.embeddings_name)
self.embeddings = self.get_embedding_for_model_config(self.model_config)
self.update_model_config_word_embedding_size()
self.model = get_model(self.model_config, self.p, ntags=len(self.p.vocab_tag))
# update stateful flag depending on whether the model is actually stateful
# (and supports that)
self.model_config.stateful = is_model_stateful(self.model)
# load weights
model_loader.load_model_from_directory(directory, model=self.model)
self.update_dataset_transformer_factor()
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/wrapper.py
|
wrapper.py
|
import logging
import os
import time
from functools import partial
from typing import Callable, Iterable, List, Optional, Tuple, Union, cast
import numpy as np
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.preprocess import WordPreprocessor, FeaturesPreprocessor
from delft.sequenceLabelling.wrapper import Sequence as _Sequence
from delft.sequenceLabelling.config import TrainingConfig as DelftTrainingConfig
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.numpy import concatenate_or_none
from sciencebeam_trainer_delft.utils.misc import str_to_bool
from sciencebeam_trainer_delft.sequence_labelling.tools.install_models import (
copy_directory_with_source_meta
)
from sciencebeam_trainer_delft.embedding import Embeddings, EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig, TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.data_generator import (
DataGenerator,
iter_batch_text_list,
get_concatenated_embeddings_token_count
)
from sciencebeam_trainer_delft.sequence_labelling.trainer import (
Scorer,
Trainer
)
from sciencebeam_trainer_delft.sequence_labelling.models import (
is_model_stateful,
get_model,
updated_implicit_model_config_props
)
from sciencebeam_trainer_delft.sequence_labelling.preprocess import (
T_FeaturesPreprocessor,
FeaturesPreprocessor as ScienceBeamFeaturesPreprocessor,
faster_preprocessor_fit
)
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelSaver, ModelLoader
from sciencebeam_trainer_delft.sequence_labelling.tagger import Tagger
from sciencebeam_trainer_delft.sequence_labelling.evaluation import ClassificationResult
from sciencebeam_trainer_delft.sequence_labelling.debug import get_tag_debug_reporter_if_enabled
from sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints import (
get_checkpoints_json,
get_last_checkpoint_url
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
TransferLearningConfig,
TransferLearningSource,
freeze_model_layers
)
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform import (
DummyDatasetTransformer
)
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform.unroll_transform import (
UnrollingTextFeatureDatasetTransformer
)
LOGGER = logging.getLogger(__name__)
DEFAUT_MODEL_PATH = 'data/models/sequenceLabelling/'
DEFAULT_EMBEDDINGS_PATH = './embedding-registry.json'
DEFAUT_BATCH_SIZE = 10
class EnvironmentVariables:
# environment variables are mainly intended for GROBID, as we can't pass in arguments
MAX_SEQUENCE_LENGTH = 'SCIENCEBEAM_DELFT_MAX_SEQUENCE_LENGTH'
INPUT_WINDOW_STRIDE = 'SCIENCEBEAM_DELFT_INPUT_WINDOW_STRIDE'
BATCH_SIZE = 'SCIENCEBEAM_DELFT_BATCH_SIZE'
STATEFUL = 'SCIENCEBEAM_DELFT_STATEFUL'
def get_typed_env(
key: str,
type_fn: Callable[[str], T],
default_value: Optional[T] = None
) -> Optional[T]:
max_sequence_length_str = os.getenv(key)
if not max_sequence_length_str:
return default_value
return type_fn(max_sequence_length_str)
def get_default_max_sequence_length() -> Optional[int]:
return get_typed_env(EnvironmentVariables.MAX_SEQUENCE_LENGTH, int, default_value=None)
def get_default_input_window_stride() -> Optional[int]:
return get_typed_env(EnvironmentVariables.INPUT_WINDOW_STRIDE, int, default_value=None)
def get_default_batch_size() -> Optional[int]:
return get_typed_env(EnvironmentVariables.BATCH_SIZE, int, default_value=DEFAUT_BATCH_SIZE)
def get_default_stateful() -> Optional[bool]:
return get_typed_env(
EnvironmentVariables.STATEFUL,
str_to_bool,
default_value=None
)
def get_features_preprocessor(
model_config: ModelConfig,
features: np.array = None) -> T_FeaturesPreprocessor:
if not model_config.use_features:
LOGGER.info('features not enabled')
return None
if features is None:
LOGGER.info('no features available')
return None
if model_config.use_features_indices_input:
LOGGER.info(
'using feature indices as input, features_indices=%s, features_vocab_size=%s',
model_config.features_indices, model_config.features_vocabulary_size
)
return FeaturesPreprocessor(
features_indices=model_config.features_indices,
features_vocabulary_size=model_config.features_vocabulary_size
)
LOGGER.info(
'using feature indices=%s', model_config.features_indices
)
return ScienceBeamFeaturesPreprocessor(
features_indices=model_config.features_indices,
continuous_features_indices=model_config.continuous_features_indices
)
def get_preprocessor(
model_config: ModelConfig,
features: np.array = None) -> WordPreprocessor:
feature_preprocessor = get_features_preprocessor(model_config, features=features)
return WordPreprocessor(
max_char_length=model_config.max_char_length,
feature_preprocessor=feature_preprocessor
)
def prepare_preprocessor(
X, y,
model_config: ModelConfig,
features: Optional[List[List[List[str]]]] = None
):
preprocessor = get_preprocessor(model_config, features=features)
batch_text_list_iterable = iter_batch_text_list(
X, features,
additional_token_feature_indices=model_config.additional_token_feature_indices,
text_feature_indices=model_config.text_feature_indices
)
if isinstance(preprocessor, WordPreprocessor):
LOGGER.info('fitting preprocessor (faster)')
faster_preprocessor_fit(preprocessor, batch_text_list_iterable, y)
else:
LOGGER.info('fitting preprocessor (default)')
preprocessor.fit(batch_text_list_iterable, y)
if model_config.use_features and features is not None:
LOGGER.info('fitting features preprocessor')
preprocessor.fit_features(features)
if model_config.features_indices != preprocessor.feature_preprocessor.features_indices:
LOGGER.info('revised features_indices: %s', model_config.features_indices)
model_config.features_indices = preprocessor.feature_preprocessor.features_indices
model_config.features_map_to_index = preprocessor.feature_preprocessor.features_map_to_index
LOGGER.info('done fitting preprocessor')
return preprocessor
def get_model_directory(model_name: str, dir_path: str = None):
return os.path.join(dir_path or DEFAUT_MODEL_PATH, model_name)
class Sequence(_Sequence):
def __init__(
self, *args,
use_features: bool = False,
features_indices: List[int] = None,
features_embedding_size: int = None,
multiprocessing: bool = False,
embedding_registry_path: str = None,
embedding_manager: EmbeddingManager = None,
config_props: dict = None,
training_props: dict = None,
max_sequence_length: int = None,
input_window_stride: int = None,
eval_max_sequence_length: int = None,
eval_input_window_stride: int = None,
batch_size: int = None,
eval_batch_size: int = None,
stateful: bool = None,
transfer_learning_config: TransferLearningConfig = None,
tag_transformed: bool = False,
**kwargs):
# initialise logging if not already initialised
logging.basicConfig(level='INFO')
LOGGER.debug('Sequence, args=%s, kwargs=%s', args, kwargs)
self.embedding_registry_path = embedding_registry_path or DEFAULT_EMBEDDINGS_PATH
if embedding_manager is None:
embedding_manager = EmbeddingManager(
path=self.embedding_registry_path,
download_manager=DownloadManager()
)
self.download_manager = embedding_manager.download_manager
self.embedding_manager = embedding_manager
self.embeddings: Optional[Embeddings] = None
if not batch_size:
batch_size = get_default_batch_size()
if not max_sequence_length:
max_sequence_length = get_default_max_sequence_length()
self.max_sequence_length = max_sequence_length
if not input_window_stride:
input_window_stride = get_default_input_window_stride()
self.input_window_stride = input_window_stride
self.eval_max_sequence_length = eval_max_sequence_length
self.eval_input_window_stride = eval_input_window_stride
self.eval_batch_size = eval_batch_size
self.model_path: Optional[str] = None
if stateful is None:
# use a stateful model, if supported
stateful = get_default_stateful()
self.stateful = stateful
self.transfer_learning_config = transfer_learning_config
self.dataset_transformer_factory = DummyDatasetTransformer
self.tag_transformed = tag_transformed
super().__init__(
*args,
max_sequence_length=max_sequence_length,
batch_size=batch_size,
**kwargs
)
LOGGER.debug('use_features=%s', use_features)
self.model_config: ModelConfig = ModelConfig(
**{ # type: ignore
**vars(self.model_config),
**(config_props or {}),
'features_indices': features_indices,
'features_embedding_size': features_embedding_size
},
use_features=use_features
)
self.update_model_config_word_embedding_size()
updated_implicit_model_config_props(self.model_config)
self.update_dataset_transformer_factor()
self.training_config: TrainingConfig = TrainingConfig(
**vars(cast(DelftTrainingConfig, self.training_config)),
**(training_props or {})
)
LOGGER.info('training_config: %s', vars(self.training_config))
self.multiprocessing = multiprocessing
self.tag_debug_reporter = get_tag_debug_reporter_if_enabled()
self._load_exception = None
self.p: Optional[WordPreprocessor] = None
self.model: Optional[BaseModel] = None
self.models: List[BaseModel] = []
def update_model_config_word_embedding_size(self):
if self.embeddings:
token_count = get_concatenated_embeddings_token_count(
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
additional_token_feature_indices=(
self.model_config.additional_token_feature_indices
)
)
self.model_config.word_embedding_size = (
self.embeddings.embed_size * token_count
)
def update_dataset_transformer_factor(self):
self.dataset_transformer_factory = DummyDatasetTransformer
if self.model_config.unroll_text_feature_index is not None:
LOGGER.info(
'using unrolling text feature dataset transformer, index=%s',
self.model_config.unroll_text_feature_index
)
self.dataset_transformer_factory = partial(
UnrollingTextFeatureDatasetTransformer,
self.model_config.unroll_text_feature_index,
used_features_indices=self.model_config.features_indices
)
def clear_embedding_cache(self):
if not self.embeddings:
return
if self.embeddings.use_ELMo:
self.embeddings.clean_ELMo_cache()
if self.embeddings.use_BERT:
self.embeddings.clean_BERT_cache()
def train( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid=None, y_valid=None,
features_train: np.array = None,
features_valid: np.array = None):
# TBD if valid is None, segment train to get one
dataset_fransformer = self.dataset_transformer_factory()
x_train, y_train, features_train = dataset_fransformer.fit_transform(
x_train, y_train, features_train
)
if x_valid is not None:
x_valid, y_valid, features_valid = dataset_fransformer.fit_transform(
x_valid, y_valid, features_valid
)
x_all = np.concatenate((x_train, x_valid), axis=0)
y_all = np.concatenate((y_train, y_valid), axis=0)
features_all = concatenate_or_none((features_train, features_valid), axis=0)
transfer_learning_source: Optional[TransferLearningSource] = None
if self.p is None or self.model is None:
transfer_learning_source = TransferLearningSource.from_config(
self.transfer_learning_config,
download_manager=self.download_manager
)
if self.p is None:
if transfer_learning_source:
self.p = transfer_learning_source.copy_preprocessor_if_enabled()
if self.p is None:
self.p = prepare_preprocessor(
x_all, y_all,
features=features_all,
model_config=self.model_config
)
if transfer_learning_source:
transfer_learning_source.apply_preprocessor(target_preprocessor=self.p)
self.model_config.char_vocab_size = len(self.p.vocab_char)
self.model_config.case_vocab_size = len(self.p.vocab_case)
if self.model_config.use_features and features_train is not None:
LOGGER.info('x_train.shape: %s', x_train.shape)
LOGGER.info('features_train.shape: %s', features_train.shape)
sample_transformed_features = self.p.transform_features(features_train[:1])
try:
if isinstance(sample_transformed_features, tuple):
sample_transformed_features = sample_transformed_features[0]
LOGGER.info(
'sample_transformed_features.shape: %s',
sample_transformed_features.shape
)
self.model_config.max_feature_size = sample_transformed_features.shape[-1]
LOGGER.info('max_feature_size: %s', self.model_config.max_feature_size)
except Exception: # pylint: disable=broad-except
LOGGER.info('features do not implement shape, set max_feature_size manually')
if self.model is None:
self.model = get_model(self.model_config, self.p, len(self.p.vocab_tag))
if transfer_learning_source:
transfer_learning_source.apply_weights(target_model=self.model)
if self.transfer_learning_config:
freeze_model_layers(self.model, self.transfer_learning_config.freeze_layers)
trainer = Trainer(
self.model,
self.models,
self.embeddings,
self.model_config,
training_config=self.training_config,
model_saver=self.get_model_saver(),
multiprocessing=self.multiprocessing,
checkpoint_path=self.log_dir,
preprocessor=self.p
)
trainer.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
self.clear_embedding_cache()
def get_model_saver(self):
return ModelSaver(
preprocessor=self.p,
model_config=self.model_config
)
def train_nfold( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid=None, y_valid=None, fold_number=10,
features_train: np.array = None,
features_valid: np.array = None):
if x_valid is not None and y_valid is not None:
x_all = np.concatenate((x_train, x_valid), axis=0)
y_all = np.concatenate((y_train, y_valid), axis=0)
features_all = concatenate_or_none((features_train, features_valid), axis=0)
self.p = prepare_preprocessor(
x_all, y_all,
features=features_all,
model_config=self.model_config
)
else:
self.p = prepare_preprocessor(
x_train, y_train,
features=features_train,
model_config=self.model_config
)
self.model_config.char_vocab_size = len(self.p.vocab_char)
self.model_config.case_vocab_size = len(self.p.vocab_case)
self.p.return_lengths = True
self.models = []
for _ in range(0, fold_number):
model = get_model(self.model_config, self.p, len(self.p.vocab_tag))
self.models.append(model)
trainer = Trainer(
self.model,
self.models,
self.embeddings,
self.model_config,
training_config=self.training_config,
model_saver=self.get_model_saver(),
checkpoint_path=self.log_dir,
preprocessor=self.p
)
trainer.train_nfold(
x_train, y_train,
x_valid, y_valid,
features_train=features_train,
features_valid=features_valid
)
if self.embeddings:
if self.embeddings.use_ELMo:
self.embeddings.clean_ELMo_cache()
if self.embeddings.use_BERT:
self.embeddings.clean_BERT_cache()
def eval( # pylint: disable=arguments-differ
self, x_test, y_test, features: np.array = None):
should_eval_nfold = (
self.model_config.fold_number > 1
and self.models
and len(self.models) == self.model_config.fold_number
)
if should_eval_nfold:
self.eval_nfold(x_test, y_test, features=features)
else:
self.eval_single(x_test, y_test, features=features)
def create_eval_data_generator(self, *args, **kwargs) -> DataGenerator:
return DataGenerator( # type: ignore
*args,
batch_size=(
self.eval_batch_size
or self.training_config.batch_size
),
preprocessor=self.p,
additional_token_feature_indices=self.model_config.additional_token_feature_indices,
text_feature_indices=self.model_config.text_feature_indices,
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
char_embed_size=self.model_config.char_embedding_size,
is_deprecated_padded_batch_text_list_enabled=(
self.model_config.is_deprecated_padded_batch_text_list_enabled
),
max_sequence_length=self.eval_max_sequence_length,
embeddings=self.embeddings,
**kwargs
)
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
self._require_model()
if self.model_config.use_features and features is None:
raise ValueError('features required')
tagger = Tagger(
self.model, self.model_config, self.embeddings,
dataset_transformer_factory=self.dataset_transformer_factory,
max_sequence_length=self.eval_max_sequence_length,
input_window_stride=self.eval_input_window_stride,
preprocessor=self.p
)
tag_result = tagger.tag(
list(x_test),
output_format=None,
features=features
)
y_pred = [
[token_tag for _, token_tag in doc_pred]
for doc_pred in tag_result
]
# convert to list, get_entities is type checking for list but not ndarray
y_true = [list(true_doc) for true_doc in y_test]
return ClassificationResult(y_pred=y_pred, y_true=y_true)
def eval_single( # pylint: disable=arguments-differ
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
classification_result = self.get_evaluation_result(
x_test=x_test,
y_test=y_test,
features=features
)
print(classification_result.get_formatted_report(digits=4))
def eval_nfold( # pylint: disable=arguments-differ
self, x_test, y_test, features: np.array = None):
if self.models is not None:
total_f1 = 0
best_f1 = 0
best_index = 0
worst_f1 = 1
worst_index = 0
reports = []
total_precision = 0
total_recall = 0
for i in range(0, self.model_config.fold_number):
print(
'\n------------------------ fold %s --------------------------------------'
% i
)
# Prepare test data(steps, generator)
test_generator = self.create_eval_data_generator(
x_test, y_test,
features=features,
shuffle=False
)
# Build the evaluator and evaluate the model
scorer = Scorer(test_generator, self.p, evaluation=True)
scorer.model = self.models[i]
scorer.on_epoch_end(epoch=-1)
f1 = scorer.f1
precision = scorer.precision
recall = scorer.recall
reports.append(scorer.report)
if best_f1 < f1:
best_f1 = f1
best_index = i
if worst_f1 > f1:
worst_f1 = f1
worst_index = i
total_f1 += f1
total_precision += precision
total_recall += recall
macro_f1 = total_f1 / self.model_config.fold_number
macro_precision = total_precision / self.model_config.fold_number
macro_recall = total_recall / self.model_config.fold_number
print("\naverage over", self.model_config.fold_number, "folds")
print("\tmacro f1 =", macro_f1)
print("\tmacro precision =", macro_precision)
print("\tmacro recall =", macro_recall, "\n")
print("\n** Worst ** model scores - \n")
print(reports[worst_index])
self.model = self.models[best_index]
print("\n** Best ** model scores - \n")
print(reports[best_index])
def iter_tag(
self, texts, output_format, features=None
) -> Union[dict, Iterable[List[Tuple[str, str]]]]:
# annotate a list of sentences, return the list of annotations in the
# specified output_format
self._require_model()
if self.model_config.use_features and features is None:
raise ValueError('features required')
tagger = Tagger(
self.model, self.model_config, self.embeddings,
dataset_transformer_factory=self.dataset_transformer_factory,
max_sequence_length=self.max_sequence_length,
input_window_stride=self.input_window_stride,
preprocessor=self.p
)
LOGGER.debug('tag_transformed: %s', self.tag_transformed)
annotations: Union[dict, Iterable[List[Tuple[str, str]]]]
if output_format == 'json':
start_time = time.time()
annotations = tagger.tag(
list(texts), output_format,
features=features,
tag_transformed=self.tag_transformed
)
runtime = round(time.time() - start_time, 3)
assert isinstance(annotations, dict)
annotations["runtime"] = runtime
else:
annotations = tagger.iter_tag(
list(texts), output_format,
features=features,
tag_transformed=self.tag_transformed
)
if self.tag_debug_reporter:
if not isinstance(annotations, dict):
# the tag debug reporter only supports lists
# additionally should not consume the iterable
annotations = list(annotations)
self.tag_debug_reporter.report_tag_results(
texts=texts,
features=features,
annotations=annotations,
model_name=self._get_model_name()
)
return annotations
def tag(self, *args, **kwargs) -> Union[dict, List[List[Tuple[str, str]]]]:
iterable_or_dict = self.iter_tag(*args, **kwargs)
if isinstance(iterable_or_dict, dict):
return iterable_or_dict
return list(iterable_or_dict)
def _require_model(self):
if not self.model:
try:
raise OSError('Model not loaded: %s (previous load exception: %r)' % (
self._get_model_name(), self._load_exception
)) from self._load_exception
except Exception as exc:
LOGGER.exception('Model required but not loaded: %r', exc, exc_info=exc)
raise
def _get_model_name(self):
return self.model_config.model_name
@property
def last_checkpoint_path(self) -> Optional[str]:
if not self.log_dir:
return None
return get_last_checkpoint_url(get_checkpoints_json(self.log_dir))
@property
def model_summary_props(self) -> dict:
return {
'model_type': 'delft',
'architecture': self.model_config.model_type,
'model_config': vars(self.model_config)
}
def get_model_output_path(self, dir_path: str = None) -> str:
return get_model_directory(model_name=self.model_config.model_name, dir_path=dir_path)
def _get_model_directory(self, dir_path: str = None) -> str:
return self.get_model_output_path(dir_path=dir_path)
def get_embedding_for_model_config(self, model_config: ModelConfig):
embedding_name = model_config.embeddings_name
if not model_config.use_word_embeddings or not embedding_name:
return None
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
embeddings = Embeddings(
embedding_name,
path=self.embedding_registry_path,
use_ELMo=model_config.use_ELMo,
use_BERT=model_config.use_BERT
)
if not embeddings.embed_size > 0:
raise AssertionError(
'invalid embedding size, embeddings not loaded? %s' % embedding_name
)
return embeddings
def get_meta(self):
return {
'training_config': vars(self.training_config)
}
def save(self, dir_path=None):
# create subfolder for the model if not already exists
directory = self._get_model_directory(dir_path)
os.makedirs(directory, exist_ok=True)
self.get_model_saver().save_to(directory, model=self.model, meta=self.get_meta())
def load(self, dir_path=None):
directory = None
try:
directory = self._get_model_directory(dir_path)
self.load_from(directory)
except Exception as exc:
self._load_exception = exc
LOGGER.exception('failed to load model from %r', directory, exc_info=exc)
raise
def download_model(self, dir_path: str) -> str:
if not dir_path.endswith('.tar.gz'):
return dir_path
local_dir_path = str(self.download_manager.get_local_file(
dir_path, auto_uncompress=False
)).replace('.tar.gz', '')
copy_directory_with_source_meta(dir_path, local_dir_path)
return local_dir_path
def load_from(self, directory: str):
model_loader = ModelLoader(download_manager=self.download_manager)
directory = self.download_model(directory)
self.model_path = directory
self.p = model_loader.load_preprocessor_from_directory(directory)
self.model_config = model_loader.load_model_config_from_directory(directory)
self.model_config.batch_size = self.training_config.batch_size
if self.stateful is not None:
self.model_config.stateful = self.stateful
# load embeddings
LOGGER.info('loading embeddings: %s', self.model_config.embeddings_name)
self.embeddings = self.get_embedding_for_model_config(self.model_config)
self.update_model_config_word_embedding_size()
self.model = get_model(self.model_config, self.p, ntags=len(self.p.vocab_tag))
# update stateful flag depending on whether the model is actually stateful
# (and supports that)
self.model_config.stateful = is_model_stateful(self.model)
# load weights
model_loader.load_model_from_directory(directory, model=self.model)
self.update_dataset_transformer_factor()
| 0.826187 | 0.161783 |
import json
from typing import List, Optional
from delft.sequenceLabelling.config import (
ModelConfig as _ModelConfig,
TrainingConfig as _TrainingConfig
)
FIRST_MODEL_VERSION = 1
MODEL_VERSION = 2
DEFAULT_CHAR_INPUT_DROPOUT = 0.0
DEFAULT_CHAR_LSTM_DROPOUT = 0.0
NOT_SET = -1
class ModelConfig(_ModelConfig):
def __init__(
self,
*args,
use_word_embeddings: bool = True,
use_features: bool = False,
continuous_features_indices: List[int] = None,
max_feature_size: int = 50,
additional_token_feature_indices: List[int] = None,
text_feature_indices: List[int] = None,
unroll_text_feature_index: Optional[int] = None,
concatenated_embeddings_token_count: int = None,
use_features_indices_input: bool = False,
char_input_mask_zero: bool = False,
char_input_dropout: float = DEFAULT_CHAR_INPUT_DROPOUT,
char_lstm_dropout: float = DEFAULT_CHAR_LSTM_DROPOUT,
stateful: bool = False,
model_version: int = MODEL_VERSION,
# deprecated
feature_indices: List[int] = None,
feature_embedding_size: int = NOT_SET,
**kwargs):
if feature_indices:
kwargs['features_indices'] = feature_indices
if feature_embedding_size != NOT_SET:
kwargs['features_embedding_size'] = feature_embedding_size
super().__init__(*args)
self.use_word_embeddings = use_word_embeddings
self.additional_token_feature_indices = additional_token_feature_indices
self.text_feature_indices = text_feature_indices
self.unroll_text_feature_index = unroll_text_feature_index
self.concatenated_embeddings_token_count = concatenated_embeddings_token_count
self.use_features = use_features
self.continuous_features_indices = continuous_features_indices
self.max_feature_size = max_feature_size
self.use_features_indices_input = use_features_indices_input
self.char_input_mask_zero = char_input_mask_zero
self.char_input_dropout = char_input_dropout
self.char_lstm_dropout = char_lstm_dropout
self.stateful = stateful
self.model_version = model_version
for key, val in kwargs.items():
setattr(self, key, val)
@property
def is_deprecated_padded_batch_text_list_enabled(self):
return bool(
self.model_version < 2
and self.text_feature_indices
)
def save(self, file):
try:
super().save(file)
except TypeError:
json.dump(vars(self), file, sort_keys=False, indent=4)
@classmethod
def load(cls, file):
variables = json.load(file)
self = cls()
# model version is assumed to the first version if not saved
self.model_version = FIRST_MODEL_VERSION
for key, val in variables.items():
setattr(self, key, val)
return self
# alias due to properties having been renamed in upstream implementation
@property
def feature_indices(self) -> Optional[List[int]]:
features_indices = self.features_indices
if not features_indices:
features_indices = self.__dict__.get('feature_indices', [])
return features_indices
@feature_indices.setter
def feature_indices(self, feature_indices: List[int]):
self.features_indices = feature_indices
@property
def feature_embedding_size(self) -> Optional[int]:
return (
self.features_embedding_size
or self.__dict__.get('feature_embedding_size')
)
@feature_embedding_size.setter
def feature_embedding_size(self, feature_embedding_size: int):
self.features_embedding_size = feature_embedding_size
class TrainingConfig(_TrainingConfig):
def __init__(
self,
*args,
initial_epoch: int = None,
input_window_stride: int = None,
checkpoint_epoch_interval: int = 1,
initial_meta: Optional[dict] = None,
**kwargs):
super().__init__(*args, **kwargs)
self.initial_epoch = initial_epoch
self.input_window_stride = input_window_stride
self.checkpoint_epoch_interval = checkpoint_epoch_interval
self.initial_meta = initial_meta
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/config.py
|
config.py
|
import json
from typing import List, Optional
from delft.sequenceLabelling.config import (
ModelConfig as _ModelConfig,
TrainingConfig as _TrainingConfig
)
FIRST_MODEL_VERSION = 1
MODEL_VERSION = 2
DEFAULT_CHAR_INPUT_DROPOUT = 0.0
DEFAULT_CHAR_LSTM_DROPOUT = 0.0
NOT_SET = -1
class ModelConfig(_ModelConfig):
def __init__(
self,
*args,
use_word_embeddings: bool = True,
use_features: bool = False,
continuous_features_indices: List[int] = None,
max_feature_size: int = 50,
additional_token_feature_indices: List[int] = None,
text_feature_indices: List[int] = None,
unroll_text_feature_index: Optional[int] = None,
concatenated_embeddings_token_count: int = None,
use_features_indices_input: bool = False,
char_input_mask_zero: bool = False,
char_input_dropout: float = DEFAULT_CHAR_INPUT_DROPOUT,
char_lstm_dropout: float = DEFAULT_CHAR_LSTM_DROPOUT,
stateful: bool = False,
model_version: int = MODEL_VERSION,
# deprecated
feature_indices: List[int] = None,
feature_embedding_size: int = NOT_SET,
**kwargs):
if feature_indices:
kwargs['features_indices'] = feature_indices
if feature_embedding_size != NOT_SET:
kwargs['features_embedding_size'] = feature_embedding_size
super().__init__(*args)
self.use_word_embeddings = use_word_embeddings
self.additional_token_feature_indices = additional_token_feature_indices
self.text_feature_indices = text_feature_indices
self.unroll_text_feature_index = unroll_text_feature_index
self.concatenated_embeddings_token_count = concatenated_embeddings_token_count
self.use_features = use_features
self.continuous_features_indices = continuous_features_indices
self.max_feature_size = max_feature_size
self.use_features_indices_input = use_features_indices_input
self.char_input_mask_zero = char_input_mask_zero
self.char_input_dropout = char_input_dropout
self.char_lstm_dropout = char_lstm_dropout
self.stateful = stateful
self.model_version = model_version
for key, val in kwargs.items():
setattr(self, key, val)
@property
def is_deprecated_padded_batch_text_list_enabled(self):
return bool(
self.model_version < 2
and self.text_feature_indices
)
def save(self, file):
try:
super().save(file)
except TypeError:
json.dump(vars(self), file, sort_keys=False, indent=4)
@classmethod
def load(cls, file):
variables = json.load(file)
self = cls()
# model version is assumed to the first version if not saved
self.model_version = FIRST_MODEL_VERSION
for key, val in variables.items():
setattr(self, key, val)
return self
# alias due to properties having been renamed in upstream implementation
@property
def feature_indices(self) -> Optional[List[int]]:
features_indices = self.features_indices
if not features_indices:
features_indices = self.__dict__.get('feature_indices', [])
return features_indices
@feature_indices.setter
def feature_indices(self, feature_indices: List[int]):
self.features_indices = feature_indices
@property
def feature_embedding_size(self) -> Optional[int]:
return (
self.features_embedding_size
or self.__dict__.get('feature_embedding_size')
)
@feature_embedding_size.setter
def feature_embedding_size(self, feature_embedding_size: int):
self.features_embedding_size = feature_embedding_size
class TrainingConfig(_TrainingConfig):
def __init__(
self,
*args,
initial_epoch: int = None,
input_window_stride: int = None,
checkpoint_epoch_interval: int = 1,
initial_meta: Optional[dict] = None,
**kwargs):
super().__init__(*args, **kwargs)
self.initial_epoch = initial_epoch
self.input_window_stride = input_window_stride
self.checkpoint_epoch_interval = checkpoint_epoch_interval
self.initial_meta = initial_meta
| 0.864568 | 0.089335 |
import json
import difflib
import logging
from xml.sax.saxutils import escape as xml_escape
from typing import Optional, Union, Iterable, List, Tuple
import numpy as np
from delft.sequenceLabelling.evaluation import get_entities
LOGGER = logging.getLogger(__name__)
class TagOutputFormats:
JSON = 'json'
DATA = 'data'
DATA_UNIDIFF = 'data_unidiff'
TEXT = 'text'
XML = 'xml'
XML_DIFF = 'xml_diff'
TAG_OUTPUT_FORMATS = [
TagOutputFormats.JSON,
TagOutputFormats.DATA,
TagOutputFormats.DATA_UNIDIFF,
TagOutputFormats.TEXT,
TagOutputFormats.XML,
TagOutputFormats.XML_DIFF,
]
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=arguments-differ, method-hidden
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def get_tag_result(texts: List[List[str]], labels: List[List[str]]):
return [
list(zip(doc_texts, doc_labels))
for doc_texts, doc_labels in zip(texts, labels)
]
def format_json_tag_result_as_json(tag_result: dict) -> str:
return json.dumps(tag_result, indent=2, cls=CustomJsonEncoder)
def format_list_tag_result_as_json(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None,
features: np.array = None,
model_name: str = None) -> str:
output_props = {
'model': model_name,
'texts': np.array(texts).tolist(),
'features': np.array(features).tolist() if features is not None else None,
'annotations': list(tag_result)
}
return json.dumps(output_props, indent=2, cls=CustomJsonEncoder)
def iter_to_data_lines(
features: np.array,
annotations: Iterable[List[Tuple[str, str]]]
) -> Iterable[str]:
for document_lindex, (line_annotations, line_features) in enumerate(
zip(annotations, features.tolist())
):
if document_lindex > 0:
yield '' # blank line separator
yield from (
' '.join([token_annoation[0]] + list(token_features) + [token_annoation[1]])
for token_annoation, token_features in zip(line_annotations, line_features)
)
def to_data_lines(*args, **kwargs) -> List[str]:
return list(iter_to_data_lines(*args, **kwargs))
def iter_format_list_tag_result_as_data(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None,
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert features is not None
data_text_iterable = iter_to_data_lines(
features=features,
annotations=tag_result
)
for line_index, data_text in enumerate(data_text_iterable):
if line_index > 0:
yield '\n'
yield data_text
def format_list_tag_result_as_data(*args, **kwargs) -> str:
return ''.join(iter_format_list_tag_result_as_data(*args, **kwargs))
def iter_simple_unidiff(
a, b, fromfile='', tofile='', lineterm='\n',
force_output: bool = False
) -> Iterable[str]:
if len(a) > len(b):
# truncate expected, as predicted sequences may have truncuated
a = a[:len(b)]
assert len(a) == len(b)
line_count = len(a)
is_diff_list = [
value_1 != value_2
for value_1, value_2 in zip(a, b)
]
LOGGER.debug('is_diff_list: %s', is_diff_list)
diff_count = sum(is_diff_list)
if not diff_count and not force_output:
return
if fromfile:
yield f'--- {fromfile}{lineterm}'
if tofile:
yield f'+++ {tofile}{lineterm}'
removed_with_prefix = f'-{diff_count}' if diff_count else '-0'
added_with_prefix = f'+{diff_count}' if diff_count else '+0'
yield f'@@ {removed_with_prefix},{line_count} {added_with_prefix},{line_count} @@{lineterm}'
for is_diff, value_1, value_2 in zip(is_diff_list, a, b):
if is_diff:
yield f'-{value_1}'
yield f'+{value_2}'
else:
yield f' {value_1}'
def split_lines_with_line_feed(text: str, line_feed: str = '\n') -> List[str]:
# Note: similar to .splitlines(keepends=True), but always adds the line feed
return [
line + line_feed
for line in text.splitlines()
]
def iter_format_document_tag_result_as_data_unidiff(
document_tag_result: List[Tuple[str, str]],
document_expected_tag_result: List[Tuple[str, str]],
document_features: List[List[str]],
document_name: str
) -> Iterable[str]:
actual_data = format_list_tag_result_as_data(
[document_tag_result],
features=np.expand_dims(document_features, axis=0)
)
expected_data = format_list_tag_result_as_data(
[document_expected_tag_result],
features=np.expand_dims(document_features, axis=0)
)
LOGGER.debug('actual_data: %r', actual_data)
LOGGER.debug('expected_data: %r', expected_data)
yield from iter_simple_unidiff(
split_lines_with_line_feed(expected_data),
split_lines_with_line_feed(actual_data),
fromfile=f'{document_name}.expected',
tofile=f'{document_name}.actual'
)
def iter_format_document_list_tag_result_as_data_unidiff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
features: np.ndarray,
document_name_prefix: str
) -> Iterable[str]:
for document_index, document_tag_result in enumerate(tag_result):
yield from iter_format_document_tag_result_as_data_unidiff(
document_tag_result=document_tag_result,
document_expected_tag_result=expected_tag_result[document_index],
document_features=features[document_index],
document_name='%s%06d' % (document_name_prefix, 1 + document_index)
)
def iter_format_list_tag_result_as_data_unidiff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
texts: np.ndarray = None, # pylint: disable=unused-argument
features: np.ndarray = None,
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert expected_tag_result
document_name_prefix = 'document_'
if model_name:
document_name_prefix = model_name + '_' + document_name_prefix
yield from iter_format_document_list_tag_result_as_data_unidiff(
tag_result=tag_result,
expected_tag_result=expected_tag_result,
features=features,
document_name_prefix=document_name_prefix
)
def iter_to_flat_text(texts: np.array) -> Iterable[str]:
for document_index, line_tokens in enumerate(texts):
if document_index > 0:
yield '\n'
yield ' '.join(line_tokens)
def iter_format_list_tag_result_as_text(
tag_result: Iterable[List[Tuple[str, str]]], # pylint: disable=unused-argument
texts: np.array = None,
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert texts is not None
yield from iter_to_flat_text(texts=texts)
def get_xml_tag_for_annotation_label(annotation_label: str) -> str:
return annotation_label.replace('<', '').replace('>', '').split('-', maxsplit=1)[-1]
def iter_add_untagged_token_spans(
entity_chunks: Iterable[Tuple[str, int, int]],
token_count: int,
untagged_chunk_type: str = None
) -> Iterable[Tuple[Optional[str], int, int]]:
prev_chunk_end_excl = 0
for chunk_type, chunk_start, chunk_end in entity_chunks:
if chunk_start > prev_chunk_end_excl:
yield untagged_chunk_type, prev_chunk_end_excl, (chunk_start - 1)
yield chunk_type, chunk_start, chunk_end
prev_chunk_end_excl = chunk_end + 1
if token_count > prev_chunk_end_excl:
yield untagged_chunk_type, prev_chunk_end_excl, (token_count - 1)
def iter_doc_annotations_xml_text(
doc_annotations: List[Tuple[str, str]]) -> Iterable[str]:
LOGGER.debug('doc_annotations: %s', doc_annotations)
text_tokens = [token_text for token_text, _ in doc_annotations]
token_labels = [token_label for _, token_label in doc_annotations]
entity_chunks = list(iter_add_untagged_token_spans(
get_entities(token_labels),
len(token_labels)
))
LOGGER.debug('text_tokens: %s', text_tokens)
LOGGER.debug('token_labels: %s', token_labels)
LOGGER.debug('entity_chunks: %s', entity_chunks)
return '\n'.join((
(
' <{tag}>{text}</{tag}>'.format(
tag=get_xml_tag_for_annotation_label(chunk_type),
text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1]))
)
if chunk_type
else
' {text}'.format(
text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1]))
)
)
for chunk_type, chunk_start, chunk_end in entity_chunks
)) + '\n'
def iter_annotations_xml_text(
annotations: Iterable[List[Tuple[str, str]]]
) -> Iterable[str]:
for doc_index, doc_annotations in enumerate(annotations):
if doc_index > 0:
yield '\n\n'
yield ' <p>\n'
yield from iter_doc_annotations_xml_text(doc_annotations)
yield ' </p>\n'
def iter_format_list_tag_result_as_xml(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
yield '<xml>\n'
yield from iter_annotations_xml_text(
annotations=tag_result
)
yield '</xml>'
def format_list_tag_result_as_xml(*args, **kwargs) -> str:
return ''.join(iter_format_list_tag_result_as_xml(*args, **kwargs))
def iter_format_list_tag_result_as_xml_diff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert expected_tag_result
actual_xml = format_list_tag_result_as_xml(tag_result)
expected_xml = format_list_tag_result_as_xml(expected_tag_result)
yield from difflib.ndiff(
expected_xml.splitlines(keepends=True),
actual_xml.splitlines(keepends=True)
)
def iter_format_list_tag_result(
*args,
output_format: str,
expected_tag_result: Optional[List[List[Tuple[str, str]]]] = None,
**kwargs) -> Iterable[str]:
if output_format == TagOutputFormats.JSON:
yield format_list_tag_result_as_json(*args, **kwargs)
return
if output_format == TagOutputFormats.DATA:
yield from iter_format_list_tag_result_as_data(*args, **kwargs)
return
if output_format == TagOutputFormats.DATA_UNIDIFF:
assert expected_tag_result
yield from iter_format_list_tag_result_as_data_unidiff( # type: ignore
*args,
expected_tag_result=expected_tag_result,
**kwargs
)
return
if output_format == TagOutputFormats.TEXT:
yield from iter_format_list_tag_result_as_text(*args, **kwargs)
return
if output_format == TagOutputFormats.XML:
yield from iter_format_list_tag_result_as_xml(*args, **kwargs)
return
if output_format == TagOutputFormats.XML_DIFF:
assert expected_tag_result
yield from iter_format_list_tag_result_as_xml_diff( # type: ignore
*args,
expected_tag_result=expected_tag_result,
**kwargs
)
return
raise ValueError('unrecognised output format: %s' % output_format)
def iter_format_tag_result(
tag_result: Union[dict, list, Iterable],
output_format: str,
expected_tag_result: Optional[List[List[Tuple[str, str]]]] = None,
texts: np.array = None,
features: np.array = None,
model_name: str = None) -> Iterable[str]:
if isinstance(tag_result, dict):
assert output_format == TagOutputFormats.JSON
yield format_json_tag_result_as_json(tag_result)
return
yield from iter_format_list_tag_result(
tag_result,
output_format=output_format,
expected_tag_result=expected_tag_result,
texts=texts,
features=features,
model_name=model_name
)
def format_tag_result(*args, **kwargs) -> str:
return ''.join(iter_format_tag_result(*args, **kwargs))
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tag_formatter.py
|
tag_formatter.py
|
import json
import difflib
import logging
from xml.sax.saxutils import escape as xml_escape
from typing import Optional, Union, Iterable, List, Tuple
import numpy as np
from delft.sequenceLabelling.evaluation import get_entities
LOGGER = logging.getLogger(__name__)
class TagOutputFormats:
JSON = 'json'
DATA = 'data'
DATA_UNIDIFF = 'data_unidiff'
TEXT = 'text'
XML = 'xml'
XML_DIFF = 'xml_diff'
TAG_OUTPUT_FORMATS = [
TagOutputFormats.JSON,
TagOutputFormats.DATA,
TagOutputFormats.DATA_UNIDIFF,
TagOutputFormats.TEXT,
TagOutputFormats.XML,
TagOutputFormats.XML_DIFF,
]
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=arguments-differ, method-hidden
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def get_tag_result(texts: List[List[str]], labels: List[List[str]]):
return [
list(zip(doc_texts, doc_labels))
for doc_texts, doc_labels in zip(texts, labels)
]
def format_json_tag_result_as_json(tag_result: dict) -> str:
return json.dumps(tag_result, indent=2, cls=CustomJsonEncoder)
def format_list_tag_result_as_json(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None,
features: np.array = None,
model_name: str = None) -> str:
output_props = {
'model': model_name,
'texts': np.array(texts).tolist(),
'features': np.array(features).tolist() if features is not None else None,
'annotations': list(tag_result)
}
return json.dumps(output_props, indent=2, cls=CustomJsonEncoder)
def iter_to_data_lines(
features: np.array,
annotations: Iterable[List[Tuple[str, str]]]
) -> Iterable[str]:
for document_lindex, (line_annotations, line_features) in enumerate(
zip(annotations, features.tolist())
):
if document_lindex > 0:
yield '' # blank line separator
yield from (
' '.join([token_annoation[0]] + list(token_features) + [token_annoation[1]])
for token_annoation, token_features in zip(line_annotations, line_features)
)
def to_data_lines(*args, **kwargs) -> List[str]:
return list(iter_to_data_lines(*args, **kwargs))
def iter_format_list_tag_result_as_data(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None,
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert features is not None
data_text_iterable = iter_to_data_lines(
features=features,
annotations=tag_result
)
for line_index, data_text in enumerate(data_text_iterable):
if line_index > 0:
yield '\n'
yield data_text
def format_list_tag_result_as_data(*args, **kwargs) -> str:
return ''.join(iter_format_list_tag_result_as_data(*args, **kwargs))
def iter_simple_unidiff(
a, b, fromfile='', tofile='', lineterm='\n',
force_output: bool = False
) -> Iterable[str]:
if len(a) > len(b):
# truncate expected, as predicted sequences may have truncuated
a = a[:len(b)]
assert len(a) == len(b)
line_count = len(a)
is_diff_list = [
value_1 != value_2
for value_1, value_2 in zip(a, b)
]
LOGGER.debug('is_diff_list: %s', is_diff_list)
diff_count = sum(is_diff_list)
if not diff_count and not force_output:
return
if fromfile:
yield f'--- {fromfile}{lineterm}'
if tofile:
yield f'+++ {tofile}{lineterm}'
removed_with_prefix = f'-{diff_count}' if diff_count else '-0'
added_with_prefix = f'+{diff_count}' if diff_count else '+0'
yield f'@@ {removed_with_prefix},{line_count} {added_with_prefix},{line_count} @@{lineterm}'
for is_diff, value_1, value_2 in zip(is_diff_list, a, b):
if is_diff:
yield f'-{value_1}'
yield f'+{value_2}'
else:
yield f' {value_1}'
def split_lines_with_line_feed(text: str, line_feed: str = '\n') -> List[str]:
# Note: similar to .splitlines(keepends=True), but always adds the line feed
return [
line + line_feed
for line in text.splitlines()
]
def iter_format_document_tag_result_as_data_unidiff(
document_tag_result: List[Tuple[str, str]],
document_expected_tag_result: List[Tuple[str, str]],
document_features: List[List[str]],
document_name: str
) -> Iterable[str]:
actual_data = format_list_tag_result_as_data(
[document_tag_result],
features=np.expand_dims(document_features, axis=0)
)
expected_data = format_list_tag_result_as_data(
[document_expected_tag_result],
features=np.expand_dims(document_features, axis=0)
)
LOGGER.debug('actual_data: %r', actual_data)
LOGGER.debug('expected_data: %r', expected_data)
yield from iter_simple_unidiff(
split_lines_with_line_feed(expected_data),
split_lines_with_line_feed(actual_data),
fromfile=f'{document_name}.expected',
tofile=f'{document_name}.actual'
)
def iter_format_document_list_tag_result_as_data_unidiff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
features: np.ndarray,
document_name_prefix: str
) -> Iterable[str]:
for document_index, document_tag_result in enumerate(tag_result):
yield from iter_format_document_tag_result_as_data_unidiff(
document_tag_result=document_tag_result,
document_expected_tag_result=expected_tag_result[document_index],
document_features=features[document_index],
document_name='%s%06d' % (document_name_prefix, 1 + document_index)
)
def iter_format_list_tag_result_as_data_unidiff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
texts: np.ndarray = None, # pylint: disable=unused-argument
features: np.ndarray = None,
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert expected_tag_result
document_name_prefix = 'document_'
if model_name:
document_name_prefix = model_name + '_' + document_name_prefix
yield from iter_format_document_list_tag_result_as_data_unidiff(
tag_result=tag_result,
expected_tag_result=expected_tag_result,
features=features,
document_name_prefix=document_name_prefix
)
def iter_to_flat_text(texts: np.array) -> Iterable[str]:
for document_index, line_tokens in enumerate(texts):
if document_index > 0:
yield '\n'
yield ' '.join(line_tokens)
def iter_format_list_tag_result_as_text(
tag_result: Iterable[List[Tuple[str, str]]], # pylint: disable=unused-argument
texts: np.array = None,
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert texts is not None
yield from iter_to_flat_text(texts=texts)
def get_xml_tag_for_annotation_label(annotation_label: str) -> str:
return annotation_label.replace('<', '').replace('>', '').split('-', maxsplit=1)[-1]
def iter_add_untagged_token_spans(
entity_chunks: Iterable[Tuple[str, int, int]],
token_count: int,
untagged_chunk_type: str = None
) -> Iterable[Tuple[Optional[str], int, int]]:
prev_chunk_end_excl = 0
for chunk_type, chunk_start, chunk_end in entity_chunks:
if chunk_start > prev_chunk_end_excl:
yield untagged_chunk_type, prev_chunk_end_excl, (chunk_start - 1)
yield chunk_type, chunk_start, chunk_end
prev_chunk_end_excl = chunk_end + 1
if token_count > prev_chunk_end_excl:
yield untagged_chunk_type, prev_chunk_end_excl, (token_count - 1)
def iter_doc_annotations_xml_text(
doc_annotations: List[Tuple[str, str]]) -> Iterable[str]:
LOGGER.debug('doc_annotations: %s', doc_annotations)
text_tokens = [token_text for token_text, _ in doc_annotations]
token_labels = [token_label for _, token_label in doc_annotations]
entity_chunks = list(iter_add_untagged_token_spans(
get_entities(token_labels),
len(token_labels)
))
LOGGER.debug('text_tokens: %s', text_tokens)
LOGGER.debug('token_labels: %s', token_labels)
LOGGER.debug('entity_chunks: %s', entity_chunks)
return '\n'.join((
(
' <{tag}>{text}</{tag}>'.format(
tag=get_xml_tag_for_annotation_label(chunk_type),
text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1]))
)
if chunk_type
else
' {text}'.format(
text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1]))
)
)
for chunk_type, chunk_start, chunk_end in entity_chunks
)) + '\n'
def iter_annotations_xml_text(
annotations: Iterable[List[Tuple[str, str]]]
) -> Iterable[str]:
for doc_index, doc_annotations in enumerate(annotations):
if doc_index > 0:
yield '\n\n'
yield ' <p>\n'
yield from iter_doc_annotations_xml_text(doc_annotations)
yield ' </p>\n'
def iter_format_list_tag_result_as_xml(
tag_result: Iterable[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
yield '<xml>\n'
yield from iter_annotations_xml_text(
annotations=tag_result
)
yield '</xml>'
def format_list_tag_result_as_xml(*args, **kwargs) -> str:
return ''.join(iter_format_list_tag_result_as_xml(*args, **kwargs))
def iter_format_list_tag_result_as_xml_diff(
tag_result: Iterable[List[Tuple[str, str]]],
expected_tag_result: List[List[Tuple[str, str]]],
texts: np.array = None, # pylint: disable=unused-argument
features: np.array = None, # pylint: disable=unused-argument
model_name: str = None # pylint: disable=unused-argument
) -> Iterable[str]:
assert expected_tag_result
actual_xml = format_list_tag_result_as_xml(tag_result)
expected_xml = format_list_tag_result_as_xml(expected_tag_result)
yield from difflib.ndiff(
expected_xml.splitlines(keepends=True),
actual_xml.splitlines(keepends=True)
)
def iter_format_list_tag_result(
*args,
output_format: str,
expected_tag_result: Optional[List[List[Tuple[str, str]]]] = None,
**kwargs) -> Iterable[str]:
if output_format == TagOutputFormats.JSON:
yield format_list_tag_result_as_json(*args, **kwargs)
return
if output_format == TagOutputFormats.DATA:
yield from iter_format_list_tag_result_as_data(*args, **kwargs)
return
if output_format == TagOutputFormats.DATA_UNIDIFF:
assert expected_tag_result
yield from iter_format_list_tag_result_as_data_unidiff( # type: ignore
*args,
expected_tag_result=expected_tag_result,
**kwargs
)
return
if output_format == TagOutputFormats.TEXT:
yield from iter_format_list_tag_result_as_text(*args, **kwargs)
return
if output_format == TagOutputFormats.XML:
yield from iter_format_list_tag_result_as_xml(*args, **kwargs)
return
if output_format == TagOutputFormats.XML_DIFF:
assert expected_tag_result
yield from iter_format_list_tag_result_as_xml_diff( # type: ignore
*args,
expected_tag_result=expected_tag_result,
**kwargs
)
return
raise ValueError('unrecognised output format: %s' % output_format)
def iter_format_tag_result(
tag_result: Union[dict, list, Iterable],
output_format: str,
expected_tag_result: Optional[List[List[Tuple[str, str]]]] = None,
texts: np.array = None,
features: np.array = None,
model_name: str = None) -> Iterable[str]:
if isinstance(tag_result, dict):
assert output_format == TagOutputFormats.JSON
yield format_json_tag_result_as_json(tag_result)
return
yield from iter_format_list_tag_result(
tag_result,
output_format=output_format,
expected_tag_result=expected_tag_result,
texts=texts,
features=features,
model_name=model_name
)
def format_tag_result(*args, **kwargs) -> str:
return ''.join(iter_format_tag_result(*args, **kwargs))
| 0.717309 | 0.271206 |
import logging
import json
import os
from datetime import datetime
from abc import ABC
from typing import Callable, Dict
import joblib
from delft.sequenceLabelling.models import Model
from delft.sequenceLabelling.preprocess import (
FeaturesPreprocessor as DelftFeaturesPreprocessor,
WordPreprocessor as DelftWordPreprocessor
)
from sciencebeam_trainer_delft.utils.typing import T, U, V
from sciencebeam_trainer_delft.utils.cloud_support import auto_upload_from_local_file
from sciencebeam_trainer_delft.utils.io import open_file, write_text, read_text
from sciencebeam_trainer_delft.utils.json import to_json, from_json
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig
from sciencebeam_trainer_delft.sequence_labelling.preprocess import (
T_FeaturesPreprocessor
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.tools.install_models import (
copy_directory_with_source_meta
)
LOGGER = logging.getLogger(__name__)
class _BaseModelSaverLoader(ABC):
config_file = 'config.json'
weight_file = 'model_weights.hdf5'
preprocessor_pickle_file = 'preprocessor.pkl'
preprocessor_json_file = 'preprocessor.json'
meta_file = 'meta.json'
def _convert_keys(
d: Dict[T, V],
convert_fn: Callable[[T], U]
) -> Dict[U, V]:
return {
convert_fn(key): value
for key, value in d.items()
}
def get_feature_preprocessor_json(
feature_preprocessor: T_FeaturesPreprocessor) -> dict:
if not isinstance(feature_preprocessor, DelftFeaturesPreprocessor):
return feature_preprocessor.__getstate__()
feature_preprocessor_dict = vars(feature_preprocessor).copy()
feature_preprocessor_dict['features_map_to_index'] = _convert_keys(
feature_preprocessor_dict['features_map_to_index'],
str
)
return feature_preprocessor_dict
def get_preprocessor_json(preprocessor: DelftWordPreprocessor) -> dict:
if type(preprocessor) != DelftWordPreprocessor: # pylint: disable=unidiomatic-typecheck
return to_json(preprocessor)
preprocessor_dict = vars(preprocessor).copy()
feature_preprocessor = preprocessor_dict.get('feature_preprocessor')
if feature_preprocessor:
if type(feature_preprocessor) != DelftFeaturesPreprocessor: # noqa pylint: disable=unidiomatic-typecheck
return to_json(preprocessor)
preprocessor_dict['feature_preprocessor'] = get_feature_preprocessor_json(
feature_preprocessor
)
return to_json(preprocessor_dict, plain_json=True)
def get_feature_preprocessor_for_json(feature_preprocessor_json: dict) -> T_FeaturesPreprocessor:
if not feature_preprocessor_json:
return None
LOGGER.debug('feature_preprocessor_json: %s', feature_preprocessor_json)
feature_preprocessor = from_json(feature_preprocessor_json, DelftFeaturesPreprocessor)
if isinstance(feature_preprocessor, DelftFeaturesPreprocessor):
if isinstance(feature_preprocessor.features_map_to_index, dict):
# features_map_to_index is initialized as a list (but then used as a dict)
feature_preprocessor.features_map_to_index = _convert_keys(
feature_preprocessor.features_map_to_index,
int
)
return feature_preprocessor
def get_preprocessor_for_json(preprocessor_json: dict) -> DelftWordPreprocessor:
preprocessor = from_json(preprocessor_json, DelftWordPreprocessor)
LOGGER.debug('preprocessor type: %s', type(preprocessor))
if isinstance(preprocessor, str):
LOGGER.debug('preprocessor: %r', preprocessor)
if isinstance(preprocessor.feature_preprocessor, dict):
preprocessor.feature_preprocessor = get_feature_preprocessor_for_json(
preprocessor.feature_preprocessor
)
return preprocessor
class ModelSaver(_BaseModelSaverLoader):
def __init__(
self,
preprocessor: DelftWordPreprocessor,
model_config: ModelConfig):
self.preprocessor = preprocessor
self.model_config = model_config
def _save_preprocessor_json(self, preprocessor: DelftWordPreprocessor, filepath: str):
write_text(
filepath,
json.dumps(get_preprocessor_json(preprocessor), sort_keys=False, indent=4)
)
LOGGER.info('preprocessor json saved to %s', filepath)
def _save_preprocessor_pickle(self, preprocessor: DelftWordPreprocessor, filepath: str):
with open_file(filepath, 'wb') as fp:
joblib.dump(preprocessor, fp)
LOGGER.info('preprocessor pickle saved to %s', filepath)
def _save_model_config(self, model_config: ModelConfig, filepath: str):
LOGGER.debug('model_config: %s', model_config)
with open_file(filepath, 'w') as fp:
model_config.save(fp)
LOGGER.info('model config file saved to %s', filepath)
def _save_model(self, model: Model, filepath: str):
with auto_upload_from_local_file(filepath) as local_filepath:
model.save(local_filepath)
LOGGER.info('model saved to %s', filepath)
def _save_meta(self, meta: dict, filepath: str):
with open_file(filepath, 'w') as fp:
json.dump(meta, fp, sort_keys=False, indent=4)
LOGGER.info('model meta saved to %s', filepath)
def _update_checkpoints_meta_file(self, filepath: str, checkpoint_directory: str, epoch: int):
try:
with open_file(filepath, 'r') as fp:
meta = json.load(fp)
except FileNotFoundError:
meta = {}
checkpoint_meta = {
'epoch': (1 + epoch),
'path': checkpoint_directory,
'timestamp': datetime.utcnow().isoformat()
}
meta['checkpoints'] = meta.get('checkpoints', [])
meta['checkpoints'].append(checkpoint_meta)
meta['last_checkpoint'] = checkpoint_meta
with open_file(filepath, 'w') as fp:
json.dump(meta, fp, sort_keys=False, indent=4)
LOGGER.info('updated checkpoints meta: %s', filepath)
def save_to(self, directory: str, model: Model, meta: dict = None):
os.makedirs(directory, exist_ok=True)
self._save_preprocessor_json(
self.preprocessor, os.path.join(directory, self.preprocessor_json_file)
)
self._save_preprocessor_pickle(
self.preprocessor, os.path.join(directory, self.preprocessor_pickle_file)
)
self._save_model_config(self.model_config, os.path.join(directory, self.config_file))
self._save_model(model, os.path.join(directory, self.weight_file))
if meta:
self._save_meta(meta, os.path.join(directory, self.meta_file))
def add_checkpoint_meta(self, checkpoint_directory: str, epoch: int):
self._update_checkpoints_meta_file(
os.path.join(os.path.dirname(checkpoint_directory), 'checkpoints.json'),
checkpoint_directory=checkpoint_directory,
epoch=epoch
)
class ModelLoader(_BaseModelSaverLoader):
def __init__(
self,
download_manager: DownloadManager = None):
if download_manager is None:
download_manager = DownloadManager()
self.download_manager = download_manager
def download_model(self, dir_path: str) -> str:
if not dir_path.endswith('.tar.gz'):
return dir_path
local_dir_path = str(self.download_manager.get_local_file(
dir_path, auto_uncompress=False
)).replace('.tar.gz', '')
copy_directory_with_source_meta(dir_path, local_dir_path)
return local_dir_path
def load_preprocessor_from_directory(self, directory: str):
try:
return self.load_preprocessor_from_json_file(
os.path.join(directory, self.preprocessor_json_file)
)
except FileNotFoundError:
LOGGER.info('preprocessor json not found, falling back to pickle')
return self.load_preprocessor_from_pickle_file(
os.path.join(directory, self.preprocessor_pickle_file)
)
def load_preprocessor_from_pickle_file(self, filepath: str):
LOGGER.info('loading preprocessor pickle from %s', filepath)
with open_file(filepath, 'rb') as fp:
return joblib.load(fp)
def load_preprocessor_from_json_file(self, filepath: str):
LOGGER.info('loading preprocessor json from %s', filepath)
return get_preprocessor_for_json(json.loads(
read_text(filepath)
))
def load_model_config_from_directory(self, directory: str):
return self.load_model_config_from_file(os.path.join(directory, self.config_file))
def load_model_config_from_file(self, filepath: str):
LOGGER.info('loading model config from %s', filepath)
with open_file(filepath, 'r') as fp:
return ModelConfig.load(fp)
def load_model_from_directory(self, directory: str, model: Model):
return self.load_model_from_file(
os.path.join(directory, self.weight_file),
model=model
)
def load_model_from_file(self, filepath: str, model: Model):
LOGGER.info('loading model from %s', filepath)
# we need a seekable file, ensure we download the file first
local_filepath = self.download_manager.download_if_url(filepath)
# using load_weights to avoid print statement in load method
model.model.load_weights(local_filepath)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/saving.py
|
saving.py
|
import logging
import json
import os
from datetime import datetime
from abc import ABC
from typing import Callable, Dict
import joblib
from delft.sequenceLabelling.models import Model
from delft.sequenceLabelling.preprocess import (
FeaturesPreprocessor as DelftFeaturesPreprocessor,
WordPreprocessor as DelftWordPreprocessor
)
from sciencebeam_trainer_delft.utils.typing import T, U, V
from sciencebeam_trainer_delft.utils.cloud_support import auto_upload_from_local_file
from sciencebeam_trainer_delft.utils.io import open_file, write_text, read_text
from sciencebeam_trainer_delft.utils.json import to_json, from_json
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig
from sciencebeam_trainer_delft.sequence_labelling.preprocess import (
T_FeaturesPreprocessor
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.tools.install_models import (
copy_directory_with_source_meta
)
LOGGER = logging.getLogger(__name__)
class _BaseModelSaverLoader(ABC):
config_file = 'config.json'
weight_file = 'model_weights.hdf5'
preprocessor_pickle_file = 'preprocessor.pkl'
preprocessor_json_file = 'preprocessor.json'
meta_file = 'meta.json'
def _convert_keys(
d: Dict[T, V],
convert_fn: Callable[[T], U]
) -> Dict[U, V]:
return {
convert_fn(key): value
for key, value in d.items()
}
def get_feature_preprocessor_json(
feature_preprocessor: T_FeaturesPreprocessor) -> dict:
if not isinstance(feature_preprocessor, DelftFeaturesPreprocessor):
return feature_preprocessor.__getstate__()
feature_preprocessor_dict = vars(feature_preprocessor).copy()
feature_preprocessor_dict['features_map_to_index'] = _convert_keys(
feature_preprocessor_dict['features_map_to_index'],
str
)
return feature_preprocessor_dict
def get_preprocessor_json(preprocessor: DelftWordPreprocessor) -> dict:
if type(preprocessor) != DelftWordPreprocessor: # pylint: disable=unidiomatic-typecheck
return to_json(preprocessor)
preprocessor_dict = vars(preprocessor).copy()
feature_preprocessor = preprocessor_dict.get('feature_preprocessor')
if feature_preprocessor:
if type(feature_preprocessor) != DelftFeaturesPreprocessor: # noqa pylint: disable=unidiomatic-typecheck
return to_json(preprocessor)
preprocessor_dict['feature_preprocessor'] = get_feature_preprocessor_json(
feature_preprocessor
)
return to_json(preprocessor_dict, plain_json=True)
def get_feature_preprocessor_for_json(feature_preprocessor_json: dict) -> T_FeaturesPreprocessor:
if not feature_preprocessor_json:
return None
LOGGER.debug('feature_preprocessor_json: %s', feature_preprocessor_json)
feature_preprocessor = from_json(feature_preprocessor_json, DelftFeaturesPreprocessor)
if isinstance(feature_preprocessor, DelftFeaturesPreprocessor):
if isinstance(feature_preprocessor.features_map_to_index, dict):
# features_map_to_index is initialized as a list (but then used as a dict)
feature_preprocessor.features_map_to_index = _convert_keys(
feature_preprocessor.features_map_to_index,
int
)
return feature_preprocessor
def get_preprocessor_for_json(preprocessor_json: dict) -> DelftWordPreprocessor:
preprocessor = from_json(preprocessor_json, DelftWordPreprocessor)
LOGGER.debug('preprocessor type: %s', type(preprocessor))
if isinstance(preprocessor, str):
LOGGER.debug('preprocessor: %r', preprocessor)
if isinstance(preprocessor.feature_preprocessor, dict):
preprocessor.feature_preprocessor = get_feature_preprocessor_for_json(
preprocessor.feature_preprocessor
)
return preprocessor
class ModelSaver(_BaseModelSaverLoader):
def __init__(
self,
preprocessor: DelftWordPreprocessor,
model_config: ModelConfig):
self.preprocessor = preprocessor
self.model_config = model_config
def _save_preprocessor_json(self, preprocessor: DelftWordPreprocessor, filepath: str):
write_text(
filepath,
json.dumps(get_preprocessor_json(preprocessor), sort_keys=False, indent=4)
)
LOGGER.info('preprocessor json saved to %s', filepath)
def _save_preprocessor_pickle(self, preprocessor: DelftWordPreprocessor, filepath: str):
with open_file(filepath, 'wb') as fp:
joblib.dump(preprocessor, fp)
LOGGER.info('preprocessor pickle saved to %s', filepath)
def _save_model_config(self, model_config: ModelConfig, filepath: str):
LOGGER.debug('model_config: %s', model_config)
with open_file(filepath, 'w') as fp:
model_config.save(fp)
LOGGER.info('model config file saved to %s', filepath)
def _save_model(self, model: Model, filepath: str):
with auto_upload_from_local_file(filepath) as local_filepath:
model.save(local_filepath)
LOGGER.info('model saved to %s', filepath)
def _save_meta(self, meta: dict, filepath: str):
with open_file(filepath, 'w') as fp:
json.dump(meta, fp, sort_keys=False, indent=4)
LOGGER.info('model meta saved to %s', filepath)
def _update_checkpoints_meta_file(self, filepath: str, checkpoint_directory: str, epoch: int):
try:
with open_file(filepath, 'r') as fp:
meta = json.load(fp)
except FileNotFoundError:
meta = {}
checkpoint_meta = {
'epoch': (1 + epoch),
'path': checkpoint_directory,
'timestamp': datetime.utcnow().isoformat()
}
meta['checkpoints'] = meta.get('checkpoints', [])
meta['checkpoints'].append(checkpoint_meta)
meta['last_checkpoint'] = checkpoint_meta
with open_file(filepath, 'w') as fp:
json.dump(meta, fp, sort_keys=False, indent=4)
LOGGER.info('updated checkpoints meta: %s', filepath)
def save_to(self, directory: str, model: Model, meta: dict = None):
os.makedirs(directory, exist_ok=True)
self._save_preprocessor_json(
self.preprocessor, os.path.join(directory, self.preprocessor_json_file)
)
self._save_preprocessor_pickle(
self.preprocessor, os.path.join(directory, self.preprocessor_pickle_file)
)
self._save_model_config(self.model_config, os.path.join(directory, self.config_file))
self._save_model(model, os.path.join(directory, self.weight_file))
if meta:
self._save_meta(meta, os.path.join(directory, self.meta_file))
def add_checkpoint_meta(self, checkpoint_directory: str, epoch: int):
self._update_checkpoints_meta_file(
os.path.join(os.path.dirname(checkpoint_directory), 'checkpoints.json'),
checkpoint_directory=checkpoint_directory,
epoch=epoch
)
class ModelLoader(_BaseModelSaverLoader):
def __init__(
self,
download_manager: DownloadManager = None):
if download_manager is None:
download_manager = DownloadManager()
self.download_manager = download_manager
def download_model(self, dir_path: str) -> str:
if not dir_path.endswith('.tar.gz'):
return dir_path
local_dir_path = str(self.download_manager.get_local_file(
dir_path, auto_uncompress=False
)).replace('.tar.gz', '')
copy_directory_with_source_meta(dir_path, local_dir_path)
return local_dir_path
def load_preprocessor_from_directory(self, directory: str):
try:
return self.load_preprocessor_from_json_file(
os.path.join(directory, self.preprocessor_json_file)
)
except FileNotFoundError:
LOGGER.info('preprocessor json not found, falling back to pickle')
return self.load_preprocessor_from_pickle_file(
os.path.join(directory, self.preprocessor_pickle_file)
)
def load_preprocessor_from_pickle_file(self, filepath: str):
LOGGER.info('loading preprocessor pickle from %s', filepath)
with open_file(filepath, 'rb') as fp:
return joblib.load(fp)
def load_preprocessor_from_json_file(self, filepath: str):
LOGGER.info('loading preprocessor json from %s', filepath)
return get_preprocessor_for_json(json.loads(
read_text(filepath)
))
def load_model_config_from_directory(self, directory: str):
return self.load_model_config_from_file(os.path.join(directory, self.config_file))
def load_model_config_from_file(self, filepath: str):
LOGGER.info('loading model config from %s', filepath)
with open_file(filepath, 'r') as fp:
return ModelConfig.load(fp)
def load_model_from_directory(self, directory: str, model: Model):
return self.load_model_from_file(
os.path.join(directory, self.weight_file),
model=model
)
def load_model_from_file(self, filepath: str, model: Model):
LOGGER.info('loading model from %s', filepath)
# we need a seekable file, ensure we download the file first
local_filepath = self.download_manager.download_if_url(filepath)
# using load_weights to avoid print statement in load method
model.model.load_weights(local_filepath)
| 0.613468 | 0.077832 |
import logging
import itertools
from functools import partial
from typing import Any, Dict, List, Iterable, Set, Tuple, Union
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import FunctionTransformer
from delft.sequenceLabelling.preprocess import (
FeaturesPreprocessor as DelftFeaturesPreprocessor,
WordPreprocessor as DelftWordPreprocessor,
PAD,
UNK
)
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.utils.progress_logger import logging_tqdm
import sciencebeam_trainer_delft.utils.compat.sklearn # noqa pylint: disable=unused-import
LOGGER = logging.getLogger(__name__)
def to_dict(
value_list_batch: List[list],
feature_indices: Set[int] = None,
exclude_features_indices: Set[int] = None
) -> Iterable[dict]:
# Note: keeping `feature_indices` name for pickle compatibility
# (also matches upstream for `to_dict`)
return (
{
index: value
for index, value in enumerate(value_list)
if (
(not feature_indices or index in feature_indices)
and (not exclude_features_indices or index not in exclude_features_indices)
)
}
for value_list in value_list_batch
)
def to_float_features(
value_list_batch: List[list],
features_indices: Set[int]
) -> Iterable[List[float]]:
return (
[
float(value)
for index, value in enumerate(value_list)
if index in features_indices
]
for value_list in value_list_batch
)
def faster_preprocessor_fit(self: DelftWordPreprocessor, X, y):
chars = {PAD: 0, UNK: 1}
tags = {PAD: 0}
if self.use_char_feature:
temp_chars = {
c
for w in set(itertools.chain(*X))
for c in w
}
sorted_chars = sorted(temp_chars)
sorted_chars_dict = {
c: idx + 2
for idx, c in enumerate(sorted_chars)
}
chars = {**chars, **sorted_chars_dict}
temp_tags = set(itertools.chain(*y))
sorted_tags = sorted(temp_tags)
sorted_tags_dict = {
tag: idx + 1
for idx, tag in enumerate(sorted_tags)
}
tags = {**tags, **sorted_tags_dict}
self.vocab_char = chars
self.vocab_tag = tags
class WordPreprocessor(DelftWordPreprocessor):
# keeping class for pickle compatibility
pass
def iter_batch(iterable: Iterable[T], n: int = 1) -> Iterable[List[T]]:
batch = []
for item in iterable:
batch.append(item)
if len(batch) >= n:
yield batch
batch = []
if batch:
yield batch
class IterableMinMaxScaler(MinMaxScaler):
def fit(self, X, y=None):
batch_size = 1000
for batch in iter_batch(X, batch_size):
self.partial_fit(batch)
def transform(self, X):
return super().transform(list(X))
STATE_ATTRIBUTE_NAMES_BY_TYPE = {
DictVectorizer: ['feature_names_', 'vocabulary_'],
StandardScaler: ['scale_', 'mean_', 'var_', 'n_samples_seen_'],
MinMaxScaler: ['min_', 'scale_', 'data_min_', 'data_max_', 'data_range_', 'n_samples_seen_']
}
STATE_ATTRIBUTE_NAMES_BY_TYPE[IterableMinMaxScaler] = STATE_ATTRIBUTE_NAMES_BY_TYPE[MinMaxScaler]
def _iter_nested_pipeline_steps(steps: List[Tuple[str, Any]]) -> Iterable[Tuple[str, Any]]:
for step_name, step_value in steps:
yield step_name, step_value
if isinstance(step_value, Pipeline):
yield from _iter_nested_pipeline_steps(step_value.steps)
if isinstance(step_value, FeatureUnion):
yield from _iter_nested_pipeline_steps(step_value.transformer_list)
continue
def _find_step_by_name(steps: List[Tuple[str, Any]], name: str):
for step_name, step_value in _iter_nested_pipeline_steps(steps):
if step_name == name:
return step_value
raise ValueError(f'step with name {repr(name)} not found')
def _get_dict_vectorizer_state(vectorizer: DictVectorizer) -> dict:
return {
'vectorizer.feature_names': vectorizer.feature_names_,
'vectorizer.vocabulary': vectorizer.vocabulary_
}
def _get_attributes_state(obj, attribute_names: List[str]) -> dict:
result = {}
for attribute_name in attribute_names:
value = getattr(obj, attribute_name)
if isinstance(value, np.ndarray):
result[attribute_name] = value.tolist()
result[attribute_name + '.is_numpy'] = True
else:
result[attribute_name] = value
return result
def _restore_attributes_state(obj, state: Dict[str, Any]):
for attribute_name, value in state.items():
if '.' in attribute_name:
continue
if state.get(attribute_name + '.is_numpy'):
value = np.asarray(value)
setattr(obj, attribute_name, value)
def _get_pipeline_steps_state(steps: List[Tuple[str, Any]]) -> dict:
result = {}
for step_name, step_value in _iter_nested_pipeline_steps(steps):
state_attribute_names = STATE_ATTRIBUTE_NAMES_BY_TYPE.get(type(step_value))
if not state_attribute_names:
continue
result[step_name] = _get_attributes_state(step_value, state_attribute_names)
return result
def _restore_pipeline_steps_state(steps: List[Tuple[str, Any]], state: dict):
for step_name, step_value in _iter_nested_pipeline_steps(steps):
step_state = state.get(step_name)
if not step_state:
continue
_restore_attributes_state(step_value, step_state)
def _fit_transformer_with_progress_logging(
transformer: TransformerMixin,
X,
logger: logging.Logger,
message_prefix: str,
unit: str,
message_suffx: str = ': '
):
if isinstance(transformer, Pipeline):
steps = transformer.steps
if len(steps) == 1 and isinstance(steps[0][1], FeatureUnion):
feature_union = steps[0][1]
for name, union_transformer in feature_union.transformer_list:
X = logging_tqdm(
iterable=X,
logger=logger,
desc=f'{message_prefix}.{name}{message_suffx}',
unit=unit
)
union_transformer.fit(X)
return
X = logging_tqdm(iterable=X, logger=logger, desc=message_prefix + message_suffx, unit=unit)
transformer.fit(X)
class FeaturesPreprocessor(BaseEstimator, TransformerMixin):
def __init__(
self,
features_indices: Iterable[int] = None,
continuous_features_indices: Iterable[int] = None
):
self.features_indices = features_indices
self.continuous_features_indices = continuous_features_indices
self.features_map_to_index = None
self.pipeline = FeaturesPreprocessor._create_pipeline(
features_indices=features_indices,
continuous_features_indices=continuous_features_indices
)
@staticmethod
def _create_pipeline(
features_indices: Iterable[int] = None,
continuous_features_indices: Iterable[int] = None
):
features_indices_set = None
if features_indices:
features_indices_set = set(features_indices)
continuous_features_indices_set = set(
continuous_features_indices or []
)
to_dict_fn = partial(
to_dict,
feature_indices=features_indices_set,
exclude_features_indices=continuous_features_indices_set
)
pipeline = Pipeline(steps=[
('to_dict', FunctionTransformer(to_dict_fn, validate=False)),
('vectorize', DictVectorizer(sparse=False))
])
if continuous_features_indices_set:
to_float_features_fn = partial(
to_float_features,
features_indices=continuous_features_indices_set
)
continuous_features_pipeline = Pipeline(steps=[
('to_float_features', FunctionTransformer(to_float_features_fn, validate=False)),
('min_max_scalar', IterableMinMaxScaler()),
])
pipeline = Pipeline(steps=[
('union', FeatureUnion([
('continuous', continuous_features_pipeline),
('discreet', pipeline)
]))
])
LOGGER.info('pipeline=%s', pipeline)
return pipeline
@property
def vectorizer(self) -> DictVectorizer:
return _find_step_by_name(self.pipeline.steps, 'vectorize')
@property
def standard_scalar(self) -> StandardScaler:
return _find_step_by_name(self.pipeline.steps, 'standard_scalar')
def __getstate__(self):
return {
**_get_pipeline_steps_state(self.pipeline.steps),
'features_indices': self.features_indices,
'continuous_features_indices': self.continuous_features_indices
}
def __setstate__(self, state):
try:
if 'pipeline' in state:
# original pickle
return super().__setstate__(state)
self.features_indices = state['features_indices']
self.continuous_features_indices = state.get('continuous_features_indices')
self.pipeline = FeaturesPreprocessor._create_pipeline(
features_indices=self.features_indices,
continuous_features_indices=self.continuous_features_indices
)
_restore_pipeline_steps_state(self.pipeline.steps, state)
vectorizer_feature_names = state.get('vectorizer.feature_names')
vectorizer_vocabulary = state.get('vectorizer.vocabulary')
if vectorizer_feature_names is not None:
# restore deprecated state
vectorizer = self.vectorizer
vectorizer.feature_names_ = vectorizer_feature_names
vectorizer.vocabulary_ = vectorizer_vocabulary
except KeyError as exc:
raise KeyError('%r: found %s' % (exc, state.keys())) from exc
return self
def fit(self, X):
flattened_features = [
word_features
for sentence_features in X
for word_features in sentence_features
]
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('flattened_features: %s', flattened_features)
_fit_transformer_with_progress_logging(
self.pipeline,
flattened_features,
logger=LOGGER,
message_prefix='FeaturesPreprocessor.fit',
unit='token-features'
)
# self.pipeline.fit(flattened_features)
vectorizer = self.vectorizer
LOGGER.info('vectorizer.feature_names: %r', vectorizer.feature_names_)
LOGGER.info('vectorizer.vocabulary size: %r', len(vectorizer.vocabulary_))
return self
def transform(self, X, **_):
LOGGER.debug('transform, X: %s', X)
return np.asarray([
self.pipeline.transform(sentence_features)
for sentence_features in X
])
T_FeaturesPreprocessor = Union[FeaturesPreprocessor, DelftFeaturesPreprocessor]
class Preprocessor(WordPreprocessor):
# keeping class for pickle compatibility
pass
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/preprocess.py
|
preprocess.py
|
import logging
import itertools
from functools import partial
from typing import Any, Dict, List, Iterable, Set, Tuple, Union
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import FunctionTransformer
from delft.sequenceLabelling.preprocess import (
FeaturesPreprocessor as DelftFeaturesPreprocessor,
WordPreprocessor as DelftWordPreprocessor,
PAD,
UNK
)
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.utils.progress_logger import logging_tqdm
import sciencebeam_trainer_delft.utils.compat.sklearn # noqa pylint: disable=unused-import
LOGGER = logging.getLogger(__name__)
def to_dict(
value_list_batch: List[list],
feature_indices: Set[int] = None,
exclude_features_indices: Set[int] = None
) -> Iterable[dict]:
# Note: keeping `feature_indices` name for pickle compatibility
# (also matches upstream for `to_dict`)
return (
{
index: value
for index, value in enumerate(value_list)
if (
(not feature_indices or index in feature_indices)
and (not exclude_features_indices or index not in exclude_features_indices)
)
}
for value_list in value_list_batch
)
def to_float_features(
value_list_batch: List[list],
features_indices: Set[int]
) -> Iterable[List[float]]:
return (
[
float(value)
for index, value in enumerate(value_list)
if index in features_indices
]
for value_list in value_list_batch
)
def faster_preprocessor_fit(self: DelftWordPreprocessor, X, y):
chars = {PAD: 0, UNK: 1}
tags = {PAD: 0}
if self.use_char_feature:
temp_chars = {
c
for w in set(itertools.chain(*X))
for c in w
}
sorted_chars = sorted(temp_chars)
sorted_chars_dict = {
c: idx + 2
for idx, c in enumerate(sorted_chars)
}
chars = {**chars, **sorted_chars_dict}
temp_tags = set(itertools.chain(*y))
sorted_tags = sorted(temp_tags)
sorted_tags_dict = {
tag: idx + 1
for idx, tag in enumerate(sorted_tags)
}
tags = {**tags, **sorted_tags_dict}
self.vocab_char = chars
self.vocab_tag = tags
class WordPreprocessor(DelftWordPreprocessor):
# keeping class for pickle compatibility
pass
def iter_batch(iterable: Iterable[T], n: int = 1) -> Iterable[List[T]]:
batch = []
for item in iterable:
batch.append(item)
if len(batch) >= n:
yield batch
batch = []
if batch:
yield batch
class IterableMinMaxScaler(MinMaxScaler):
def fit(self, X, y=None):
batch_size = 1000
for batch in iter_batch(X, batch_size):
self.partial_fit(batch)
def transform(self, X):
return super().transform(list(X))
STATE_ATTRIBUTE_NAMES_BY_TYPE = {
DictVectorizer: ['feature_names_', 'vocabulary_'],
StandardScaler: ['scale_', 'mean_', 'var_', 'n_samples_seen_'],
MinMaxScaler: ['min_', 'scale_', 'data_min_', 'data_max_', 'data_range_', 'n_samples_seen_']
}
STATE_ATTRIBUTE_NAMES_BY_TYPE[IterableMinMaxScaler] = STATE_ATTRIBUTE_NAMES_BY_TYPE[MinMaxScaler]
def _iter_nested_pipeline_steps(steps: List[Tuple[str, Any]]) -> Iterable[Tuple[str, Any]]:
for step_name, step_value in steps:
yield step_name, step_value
if isinstance(step_value, Pipeline):
yield from _iter_nested_pipeline_steps(step_value.steps)
if isinstance(step_value, FeatureUnion):
yield from _iter_nested_pipeline_steps(step_value.transformer_list)
continue
def _find_step_by_name(steps: List[Tuple[str, Any]], name: str):
for step_name, step_value in _iter_nested_pipeline_steps(steps):
if step_name == name:
return step_value
raise ValueError(f'step with name {repr(name)} not found')
def _get_dict_vectorizer_state(vectorizer: DictVectorizer) -> dict:
return {
'vectorizer.feature_names': vectorizer.feature_names_,
'vectorizer.vocabulary': vectorizer.vocabulary_
}
def _get_attributes_state(obj, attribute_names: List[str]) -> dict:
result = {}
for attribute_name in attribute_names:
value = getattr(obj, attribute_name)
if isinstance(value, np.ndarray):
result[attribute_name] = value.tolist()
result[attribute_name + '.is_numpy'] = True
else:
result[attribute_name] = value
return result
def _restore_attributes_state(obj, state: Dict[str, Any]):
for attribute_name, value in state.items():
if '.' in attribute_name:
continue
if state.get(attribute_name + '.is_numpy'):
value = np.asarray(value)
setattr(obj, attribute_name, value)
def _get_pipeline_steps_state(steps: List[Tuple[str, Any]]) -> dict:
result = {}
for step_name, step_value in _iter_nested_pipeline_steps(steps):
state_attribute_names = STATE_ATTRIBUTE_NAMES_BY_TYPE.get(type(step_value))
if not state_attribute_names:
continue
result[step_name] = _get_attributes_state(step_value, state_attribute_names)
return result
def _restore_pipeline_steps_state(steps: List[Tuple[str, Any]], state: dict):
for step_name, step_value in _iter_nested_pipeline_steps(steps):
step_state = state.get(step_name)
if not step_state:
continue
_restore_attributes_state(step_value, step_state)
def _fit_transformer_with_progress_logging(
transformer: TransformerMixin,
X,
logger: logging.Logger,
message_prefix: str,
unit: str,
message_suffx: str = ': '
):
if isinstance(transformer, Pipeline):
steps = transformer.steps
if len(steps) == 1 and isinstance(steps[0][1], FeatureUnion):
feature_union = steps[0][1]
for name, union_transformer in feature_union.transformer_list:
X = logging_tqdm(
iterable=X,
logger=logger,
desc=f'{message_prefix}.{name}{message_suffx}',
unit=unit
)
union_transformer.fit(X)
return
X = logging_tqdm(iterable=X, logger=logger, desc=message_prefix + message_suffx, unit=unit)
transformer.fit(X)
class FeaturesPreprocessor(BaseEstimator, TransformerMixin):
def __init__(
self,
features_indices: Iterable[int] = None,
continuous_features_indices: Iterable[int] = None
):
self.features_indices = features_indices
self.continuous_features_indices = continuous_features_indices
self.features_map_to_index = None
self.pipeline = FeaturesPreprocessor._create_pipeline(
features_indices=features_indices,
continuous_features_indices=continuous_features_indices
)
@staticmethod
def _create_pipeline(
features_indices: Iterable[int] = None,
continuous_features_indices: Iterable[int] = None
):
features_indices_set = None
if features_indices:
features_indices_set = set(features_indices)
continuous_features_indices_set = set(
continuous_features_indices or []
)
to_dict_fn = partial(
to_dict,
feature_indices=features_indices_set,
exclude_features_indices=continuous_features_indices_set
)
pipeline = Pipeline(steps=[
('to_dict', FunctionTransformer(to_dict_fn, validate=False)),
('vectorize', DictVectorizer(sparse=False))
])
if continuous_features_indices_set:
to_float_features_fn = partial(
to_float_features,
features_indices=continuous_features_indices_set
)
continuous_features_pipeline = Pipeline(steps=[
('to_float_features', FunctionTransformer(to_float_features_fn, validate=False)),
('min_max_scalar', IterableMinMaxScaler()),
])
pipeline = Pipeline(steps=[
('union', FeatureUnion([
('continuous', continuous_features_pipeline),
('discreet', pipeline)
]))
])
LOGGER.info('pipeline=%s', pipeline)
return pipeline
@property
def vectorizer(self) -> DictVectorizer:
return _find_step_by_name(self.pipeline.steps, 'vectorize')
@property
def standard_scalar(self) -> StandardScaler:
return _find_step_by_name(self.pipeline.steps, 'standard_scalar')
def __getstate__(self):
return {
**_get_pipeline_steps_state(self.pipeline.steps),
'features_indices': self.features_indices,
'continuous_features_indices': self.continuous_features_indices
}
def __setstate__(self, state):
try:
if 'pipeline' in state:
# original pickle
return super().__setstate__(state)
self.features_indices = state['features_indices']
self.continuous_features_indices = state.get('continuous_features_indices')
self.pipeline = FeaturesPreprocessor._create_pipeline(
features_indices=self.features_indices,
continuous_features_indices=self.continuous_features_indices
)
_restore_pipeline_steps_state(self.pipeline.steps, state)
vectorizer_feature_names = state.get('vectorizer.feature_names')
vectorizer_vocabulary = state.get('vectorizer.vocabulary')
if vectorizer_feature_names is not None:
# restore deprecated state
vectorizer = self.vectorizer
vectorizer.feature_names_ = vectorizer_feature_names
vectorizer.vocabulary_ = vectorizer_vocabulary
except KeyError as exc:
raise KeyError('%r: found %s' % (exc, state.keys())) from exc
return self
def fit(self, X):
flattened_features = [
word_features
for sentence_features in X
for word_features in sentence_features
]
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('flattened_features: %s', flattened_features)
_fit_transformer_with_progress_logging(
self.pipeline,
flattened_features,
logger=LOGGER,
message_prefix='FeaturesPreprocessor.fit',
unit='token-features'
)
# self.pipeline.fit(flattened_features)
vectorizer = self.vectorizer
LOGGER.info('vectorizer.feature_names: %r', vectorizer.feature_names_)
LOGGER.info('vectorizer.vocabulary size: %r', len(vectorizer.vocabulary_))
return self
def transform(self, X, **_):
LOGGER.debug('transform, X: %s', X)
return np.asarray([
self.pipeline.transform(sentence_features)
for sentence_features in X
])
T_FeaturesPreprocessor = Union[FeaturesPreprocessor, DelftFeaturesPreprocessor]
class Preprocessor(WordPreprocessor):
# keeping class for pickle compatibility
pass
| 0.742795 | 0.309245 |
import argparse
import logging
from typing import Dict, List, Optional, NamedTuple
import keras
import numpy as np
from delft.sequenceLabelling.preprocess import WordPreprocessor
from delft.sequenceLabelling.models import BaseModel
from sciencebeam_trainer_delft.utils.misc import (
parse_comma_separated_str,
parse_dict
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelLoader
from sciencebeam_trainer_delft.sequence_labelling.models import (
get_model
)
LOGGER = logging.getLogger(__name__)
class TransferLearningConfig(NamedTuple):
source_model_path: Optional[str] = None
copy_layers: Optional[Dict[str, str]] = None
copy_preprocessor: bool = False
copy_preprocessor_fields: Optional[List[str]] = None
freeze_layers: Optional[List[str]] = None
class TransferModelWrapper:
def __init__(self, model: BaseModel):
self.model = model
self.keras_model: keras.Model = model.model
self.keras_layers_by_name: Dict[str, keras.layers.Layer] = {
layer.name: layer
for layer in self.keras_model.layers
}
self.layer_names = set(self.keras_layers_by_name.keys())
def get_layer_weights(self, layer_name: str) -> List[np.ndarray]:
return self.keras_layers_by_name[layer_name].get_weights()
def set_layer_weights(self, layer_name: str, weights: List[np.ndarray]):
LOGGER.info('setting weights of layer: %r', layer_name)
LOGGER.debug('setting weights of layer %r to:\n%s', layer_name, weights)
self.keras_layers_by_name[layer_name].set_weights(weights)
def freeze_layer(self, layer_name: str):
LOGGER.info('freezing layer: %r', layer_name)
self.keras_layers_by_name[layer_name].trainable = False
class TransferLearningSource:
def __init__(
self,
transfer_learning_config: TransferLearningConfig,
source_model: BaseModel,
source_preprocessor: WordPreprocessor
):
self.transfer_learning_config = transfer_learning_config
self.source_model = source_model
self.source_preprocessor = source_preprocessor
@staticmethod
def from_config(
transfer_learning_config: Optional[TransferLearningConfig],
download_manager: DownloadManager = None
) -> Optional['TransferLearningSource']:
if not transfer_learning_config:
LOGGER.info('no transfer learning config specified')
return None
if not transfer_learning_config.source_model_path:
LOGGER.info('no transfer learning source model specified')
return None
LOGGER.info('transfer learning config: %s', transfer_learning_config)
model_loader = ModelLoader(download_manager=download_manager)
directory = model_loader.download_model(transfer_learning_config.source_model_path)
source_model_config = model_loader.load_model_config_from_directory(directory)
source_preprocessor = model_loader.load_preprocessor_from_directory(directory)
source_model: BaseModel = get_model(
source_model_config,
source_preprocessor,
ntags=len(source_preprocessor.vocab_tag)
)
model_loader.load_model_from_directory(directory, source_model)
return TransferLearningSource(
transfer_learning_config=transfer_learning_config,
source_model=source_model,
source_preprocessor=source_preprocessor
)
def copy_preprocessor_if_enabled(self) -> Optional[WordPreprocessor]:
if self.transfer_learning_config.copy_preprocessor:
LOGGER.info('copying preprocessor')
return self.source_preprocessor
return None
def apply_preprocessor(self, target_preprocessor: WordPreprocessor):
if not self.transfer_learning_config.copy_preprocessor_fields:
LOGGER.info('no transfer learning preprocessor fields specified')
return
for field_name in self.transfer_learning_config.copy_preprocessor_fields:
LOGGER.info('copying preprocessor field: %r', field_name)
value = getattr(self.source_preprocessor, field_name)
setattr(target_preprocessor, field_name, value)
def apply_weights(self, target_model: BaseModel):
if not self.transfer_learning_config.copy_layers:
LOGGER.info('no transfer learning source layers specified')
return
wrapped_source_model = TransferModelWrapper(self.source_model)
wrapped_target_model = TransferModelWrapper(target_model)
copy_layers_map = self.transfer_learning_config.copy_layers
requested_target_layers = copy_layers_map.keys()
requested_source_layers = copy_layers_map.values()
missing_source_layers = (
set(requested_source_layers) - set(wrapped_source_model.layer_names)
)
if missing_source_layers:
raise ValueError('missing source layers for transfer learning: %s (available: %s)' % (
missing_source_layers, wrapped_source_model.layer_names
))
missing_target_layers = (
set(requested_target_layers) - set(wrapped_target_model.layer_names)
)
if missing_target_layers:
raise ValueError('missing target layers for transfer learning: %s (available: %s)' % (
missing_target_layers, wrapped_target_model.layer_names
))
for target_layer_name, source_layer_name in copy_layers_map.items():
LOGGER.info('copying layer weights: %r -> %r', source_layer_name, target_layer_name)
try:
wrapped_target_model.set_layer_weights(
target_layer_name,
wrapped_source_model.get_layer_weights(source_layer_name)
)
except Exception as exc:
raise RuntimeError(
'failed to copy layer weights (%r -> %r) due to %r' % (
source_layer_name, target_layer_name, exc
)
) from exc
def freeze_model_layers(target_model: BaseModel, layers: Optional[List[str]]):
if not layers:
return
wrapped_target_model = TransferModelWrapper(target_model)
for layer_name in layers:
wrapped_target_model.freeze_layer(layer_name)
def add_transfer_learning_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
'--transfer-source-model-path',
type=str,
help='path to model, that learned layers or parameters should be transfered from'
)
parser.add_argument(
'--transfer-copy-layers',
type=parse_dict,
help='the layers to transfer (mapping from target to source)'
)
parser.add_argument(
'--transfer-copy-preprocessor',
action='store_true',
default=False,
help='copy the whole preprocessor'
)
parser.add_argument(
'--transfer-copy-preprocessor-fields',
type=parse_comma_separated_str,
help='the preprocessor fields to transfer (e.g. "vocab_char")'
)
parser.add_argument(
'--transfer-freeze-layers',
type=parse_comma_separated_str,
help='the layers to freeze'
)
def get_transfer_learning_config_for_parsed_args(
args: argparse.Namespace
) -> TransferLearningConfig:
return TransferLearningConfig(
source_model_path=args.transfer_source_model_path,
copy_layers=args.transfer_copy_layers,
copy_preprocessor=args.transfer_copy_preprocessor,
copy_preprocessor_fields=args.transfer_copy_preprocessor_fields,
freeze_layers=args.transfer_freeze_layers
)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/transfer_learning.py
|
transfer_learning.py
|
import argparse
import logging
from typing import Dict, List, Optional, NamedTuple
import keras
import numpy as np
from delft.sequenceLabelling.preprocess import WordPreprocessor
from delft.sequenceLabelling.models import BaseModel
from sciencebeam_trainer_delft.utils.misc import (
parse_comma_separated_str,
parse_dict
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelLoader
from sciencebeam_trainer_delft.sequence_labelling.models import (
get_model
)
LOGGER = logging.getLogger(__name__)
class TransferLearningConfig(NamedTuple):
source_model_path: Optional[str] = None
copy_layers: Optional[Dict[str, str]] = None
copy_preprocessor: bool = False
copy_preprocessor_fields: Optional[List[str]] = None
freeze_layers: Optional[List[str]] = None
class TransferModelWrapper:
def __init__(self, model: BaseModel):
self.model = model
self.keras_model: keras.Model = model.model
self.keras_layers_by_name: Dict[str, keras.layers.Layer] = {
layer.name: layer
for layer in self.keras_model.layers
}
self.layer_names = set(self.keras_layers_by_name.keys())
def get_layer_weights(self, layer_name: str) -> List[np.ndarray]:
return self.keras_layers_by_name[layer_name].get_weights()
def set_layer_weights(self, layer_name: str, weights: List[np.ndarray]):
LOGGER.info('setting weights of layer: %r', layer_name)
LOGGER.debug('setting weights of layer %r to:\n%s', layer_name, weights)
self.keras_layers_by_name[layer_name].set_weights(weights)
def freeze_layer(self, layer_name: str):
LOGGER.info('freezing layer: %r', layer_name)
self.keras_layers_by_name[layer_name].trainable = False
class TransferLearningSource:
def __init__(
self,
transfer_learning_config: TransferLearningConfig,
source_model: BaseModel,
source_preprocessor: WordPreprocessor
):
self.transfer_learning_config = transfer_learning_config
self.source_model = source_model
self.source_preprocessor = source_preprocessor
@staticmethod
def from_config(
transfer_learning_config: Optional[TransferLearningConfig],
download_manager: DownloadManager = None
) -> Optional['TransferLearningSource']:
if not transfer_learning_config:
LOGGER.info('no transfer learning config specified')
return None
if not transfer_learning_config.source_model_path:
LOGGER.info('no transfer learning source model specified')
return None
LOGGER.info('transfer learning config: %s', transfer_learning_config)
model_loader = ModelLoader(download_manager=download_manager)
directory = model_loader.download_model(transfer_learning_config.source_model_path)
source_model_config = model_loader.load_model_config_from_directory(directory)
source_preprocessor = model_loader.load_preprocessor_from_directory(directory)
source_model: BaseModel = get_model(
source_model_config,
source_preprocessor,
ntags=len(source_preprocessor.vocab_tag)
)
model_loader.load_model_from_directory(directory, source_model)
return TransferLearningSource(
transfer_learning_config=transfer_learning_config,
source_model=source_model,
source_preprocessor=source_preprocessor
)
def copy_preprocessor_if_enabled(self) -> Optional[WordPreprocessor]:
if self.transfer_learning_config.copy_preprocessor:
LOGGER.info('copying preprocessor')
return self.source_preprocessor
return None
def apply_preprocessor(self, target_preprocessor: WordPreprocessor):
if not self.transfer_learning_config.copy_preprocessor_fields:
LOGGER.info('no transfer learning preprocessor fields specified')
return
for field_name in self.transfer_learning_config.copy_preprocessor_fields:
LOGGER.info('copying preprocessor field: %r', field_name)
value = getattr(self.source_preprocessor, field_name)
setattr(target_preprocessor, field_name, value)
def apply_weights(self, target_model: BaseModel):
if not self.transfer_learning_config.copy_layers:
LOGGER.info('no transfer learning source layers specified')
return
wrapped_source_model = TransferModelWrapper(self.source_model)
wrapped_target_model = TransferModelWrapper(target_model)
copy_layers_map = self.transfer_learning_config.copy_layers
requested_target_layers = copy_layers_map.keys()
requested_source_layers = copy_layers_map.values()
missing_source_layers = (
set(requested_source_layers) - set(wrapped_source_model.layer_names)
)
if missing_source_layers:
raise ValueError('missing source layers for transfer learning: %s (available: %s)' % (
missing_source_layers, wrapped_source_model.layer_names
))
missing_target_layers = (
set(requested_target_layers) - set(wrapped_target_model.layer_names)
)
if missing_target_layers:
raise ValueError('missing target layers for transfer learning: %s (available: %s)' % (
missing_target_layers, wrapped_target_model.layer_names
))
for target_layer_name, source_layer_name in copy_layers_map.items():
LOGGER.info('copying layer weights: %r -> %r', source_layer_name, target_layer_name)
try:
wrapped_target_model.set_layer_weights(
target_layer_name,
wrapped_source_model.get_layer_weights(source_layer_name)
)
except Exception as exc:
raise RuntimeError(
'failed to copy layer weights (%r -> %r) due to %r' % (
source_layer_name, target_layer_name, exc
)
) from exc
def freeze_model_layers(target_model: BaseModel, layers: Optional[List[str]]):
if not layers:
return
wrapped_target_model = TransferModelWrapper(target_model)
for layer_name in layers:
wrapped_target_model.freeze_layer(layer_name)
def add_transfer_learning_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
'--transfer-source-model-path',
type=str,
help='path to model, that learned layers or parameters should be transfered from'
)
parser.add_argument(
'--transfer-copy-layers',
type=parse_dict,
help='the layers to transfer (mapping from target to source)'
)
parser.add_argument(
'--transfer-copy-preprocessor',
action='store_true',
default=False,
help='copy the whole preprocessor'
)
parser.add_argument(
'--transfer-copy-preprocessor-fields',
type=parse_comma_separated_str,
help='the preprocessor fields to transfer (e.g. "vocab_char")'
)
parser.add_argument(
'--transfer-freeze-layers',
type=parse_comma_separated_str,
help='the layers to freeze'
)
def get_transfer_learning_config_for_parsed_args(
args: argparse.Namespace
) -> TransferLearningConfig:
return TransferLearningConfig(
source_model_path=args.transfer_source_model_path,
copy_layers=args.transfer_copy_layers,
copy_preprocessor=args.transfer_copy_preprocessor,
copy_preprocessor_fields=args.transfer_copy_preprocessor_fields,
freeze_layers=args.transfer_freeze_layers
)
| 0.870721 | 0.190122 |
import logging
import re
from itertools import islice
from typing import Iterable, List, Tuple
import numpy as np
from delft.sequenceLabelling.reader import _translate_tags_grobid_to_IOB
LOGGER = logging.getLogger(__name__)
# partially copied from delft/sequenceLabelling/reader.py
def iter_load_data_and_labels_crf_lines(
lines: Iterable[str]
) -> Iterable[Tuple[List[str], List[str], List[List[str]]]]:
tokens: List[str] = []
tags: List[str] = []
features: List[List[str]] = []
for line in lines:
line = line.strip()
LOGGER.debug('line: %s', line)
if not line:
if tokens:
yield tokens, tags, features
tokens, tags, features = [], [], []
else:
pieces = re.split(' |\t', line)
token = pieces[0]
tag = pieces[len(pieces)-1]
localFeatures = pieces[1:len(pieces)-1]
tokens.append(token)
tags.append(_translate_tags_grobid_to_IOB(tag))
features.append(localFeatures)
if tokens:
yield tokens, tags, features
def iter_load_data_crf_lines(
lines: Iterable[str]
) -> Iterable[Tuple[List[str], List[List[str]]]]:
tokens: List[str] = []
features: List[List[str]] = []
for line in lines:
line = line.strip()
LOGGER.debug('line: %s', line)
if not line:
if tokens:
yield tokens, features
tokens, features = [], []
else:
pieces = re.split(' |\t', line)
token = pieces[0]
localFeatures = pieces[1:]
tokens.append(token)
features.append(localFeatures)
if tokens:
yield tokens, features
def load_data_and_labels_crf_lines(
lines: Iterable[str],
limit: int = None) -> Tuple[np.array, np.array, np.array]:
"""
Load data, features and label from a CRF matrix string
the format is as follow:
token_0 f0_0 f0_1 ... f0_n label_0
token_1 f1_0 f1_1 ... f1_n label_1
...
token_m fm_0 fm_1 ... fm_n label_m
field separator can be either space or tab
Returns:
tuple(numpy array, numpy array, numpy array): tokens, labels, features
"""
sents = []
labels = []
featureSets = []
documents = iter_load_data_and_labels_crf_lines(lines)
if limit:
LOGGER.info('limiting training data to: %s', limit)
documents = islice(documents, limit)
for tokens, tags, features in documents:
sents.append(tokens)
labels.append(tags)
featureSets.append(features)
# specifying dtype object can significantly reduce the memory consumption
# e.g. for features it could be 20 MB instead of 1 GB
return (
np.asarray(sents, dtype='object'),
np.asarray(labels, dtype='object'),
np.asarray(featureSets, dtype='object')
)
def load_data_crf_lines(
lines: Iterable[str],
limit: int = None) -> Tuple[np.array, np.array]:
"""
Load data, features (no label!) from a CRF matrix file
the format is as follow:
token_0 f0_0 f0_1 ... f0_n
token_1 f1_0 f1_1 ... f1_n
...
token_m fm_0 fm_1 ... fm_n
field separator can be either space or tab
Returns:
tuple(numpy array, numpy array): tokens, features
"""
sents = []
featureSets = []
documents = iter_load_data_crf_lines(lines)
if limit:
LOGGER.info('limiting training data to: %s', limit)
documents = islice(documents, limit)
for tokens, features in documents:
sents.append(tokens)
featureSets.append(features)
# specifying dtype object can significantly reduce the memory consumption
# e.g. for features it could be 20 MB instead of 1 GB
return (
np.asarray(sents, dtype='object'),
np.asarray(featureSets, dtype='object')
)
def load_data_and_labels_crf_file(
filepath: str,
limit: int = None) -> Tuple[np.array, np.array, np.array]:
try:
with open(filepath, 'r', encoding='utf-8') as fp:
return load_data_and_labels_crf_lines(fp, limit=limit)
except Exception as exc:
raise RuntimeError('failed to read file %r' % filepath) from exc
def load_data_crf_string(
crf_string: str,
limit: int = None) -> Tuple[np.array, np.array]:
return load_data_crf_lines(crf_string.splitlines(), limit=limit)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/reader.py
|
reader.py
|
import logging
import re
from itertools import islice
from typing import Iterable, List, Tuple
import numpy as np
from delft.sequenceLabelling.reader import _translate_tags_grobid_to_IOB
LOGGER = logging.getLogger(__name__)
# partially copied from delft/sequenceLabelling/reader.py
def iter_load_data_and_labels_crf_lines(
lines: Iterable[str]
) -> Iterable[Tuple[List[str], List[str], List[List[str]]]]:
tokens: List[str] = []
tags: List[str] = []
features: List[List[str]] = []
for line in lines:
line = line.strip()
LOGGER.debug('line: %s', line)
if not line:
if tokens:
yield tokens, tags, features
tokens, tags, features = [], [], []
else:
pieces = re.split(' |\t', line)
token = pieces[0]
tag = pieces[len(pieces)-1]
localFeatures = pieces[1:len(pieces)-1]
tokens.append(token)
tags.append(_translate_tags_grobid_to_IOB(tag))
features.append(localFeatures)
if tokens:
yield tokens, tags, features
def iter_load_data_crf_lines(
lines: Iterable[str]
) -> Iterable[Tuple[List[str], List[List[str]]]]:
tokens: List[str] = []
features: List[List[str]] = []
for line in lines:
line = line.strip()
LOGGER.debug('line: %s', line)
if not line:
if tokens:
yield tokens, features
tokens, features = [], []
else:
pieces = re.split(' |\t', line)
token = pieces[0]
localFeatures = pieces[1:]
tokens.append(token)
features.append(localFeatures)
if tokens:
yield tokens, features
def load_data_and_labels_crf_lines(
lines: Iterable[str],
limit: int = None) -> Tuple[np.array, np.array, np.array]:
"""
Load data, features and label from a CRF matrix string
the format is as follow:
token_0 f0_0 f0_1 ... f0_n label_0
token_1 f1_0 f1_1 ... f1_n label_1
...
token_m fm_0 fm_1 ... fm_n label_m
field separator can be either space or tab
Returns:
tuple(numpy array, numpy array, numpy array): tokens, labels, features
"""
sents = []
labels = []
featureSets = []
documents = iter_load_data_and_labels_crf_lines(lines)
if limit:
LOGGER.info('limiting training data to: %s', limit)
documents = islice(documents, limit)
for tokens, tags, features in documents:
sents.append(tokens)
labels.append(tags)
featureSets.append(features)
# specifying dtype object can significantly reduce the memory consumption
# e.g. for features it could be 20 MB instead of 1 GB
return (
np.asarray(sents, dtype='object'),
np.asarray(labels, dtype='object'),
np.asarray(featureSets, dtype='object')
)
def load_data_crf_lines(
lines: Iterable[str],
limit: int = None) -> Tuple[np.array, np.array]:
"""
Load data, features (no label!) from a CRF matrix file
the format is as follow:
token_0 f0_0 f0_1 ... f0_n
token_1 f1_0 f1_1 ... f1_n
...
token_m fm_0 fm_1 ... fm_n
field separator can be either space or tab
Returns:
tuple(numpy array, numpy array): tokens, features
"""
sents = []
featureSets = []
documents = iter_load_data_crf_lines(lines)
if limit:
LOGGER.info('limiting training data to: %s', limit)
documents = islice(documents, limit)
for tokens, features in documents:
sents.append(tokens)
featureSets.append(features)
# specifying dtype object can significantly reduce the memory consumption
# e.g. for features it could be 20 MB instead of 1 GB
return (
np.asarray(sents, dtype='object'),
np.asarray(featureSets, dtype='object')
)
def load_data_and_labels_crf_file(
filepath: str,
limit: int = None) -> Tuple[np.array, np.array, np.array]:
try:
with open(filepath, 'r', encoding='utf-8') as fp:
return load_data_and_labels_crf_lines(fp, limit=limit)
except Exception as exc:
raise RuntimeError('failed to read file %r' % filepath) from exc
def load_data_crf_string(
crf_string: str,
limit: int = None) -> Tuple[np.array, np.array]:
return load_data_crf_lines(crf_string.splitlines(), limit=limit)
| 0.598077 | 0.345436 |
import logging
import os
from typing import NamedTuple, Optional
import numpy as np
from delft.sequenceLabelling.evaluation import (
f1_score,
accuracy_score,
precision_score,
recall_score
)
from delft.sequenceLabelling.trainer import Trainer as _Trainer
from delft.sequenceLabelling.trainer import Scorer as _Scorer
from delft.sequenceLabelling.models import BaseModel
from sciencebeam_trainer_delft.sequence_labelling.utils.types import (
T_Batch_Tokens,
T_Batch_Features,
T_Batch_Labels
)
from sciencebeam_trainer_delft.utils.keras.callbacks import ResumableEarlyStopping
from sciencebeam_trainer_delft.sequence_labelling.evaluation import classification_report
from sciencebeam_trainer_delft.sequence_labelling.config import TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.data_generator import DataGenerator
from sciencebeam_trainer_delft.sequence_labelling.callbacks import ModelWithMetadataCheckpoint
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelSaver
LOGGER = logging.getLogger(__name__)
def get_callbacks(
model_saver: ModelSaver,
log_dir: str = None,
log_period: int = 1,
valid: tuple = (),
early_stopping: bool = True,
early_stopping_patience: int = 5,
initial_meta: Optional[dict] = None,
meta: dict = None
):
"""
Get callbacks.
Args:
log_dir (str): the destination to save logs
valid (tuple): data for validation.
early_stopping (bool): whether to use early stopping.
Returns:
list: list of callbacks
"""
callbacks = []
if valid:
callbacks.append(Scorer(*valid)) # pylint: disable=no-value-for-parameter
if early_stopping:
# Note: ensure we are not restoring weights
# as that would affect saving the model.
# The saving checkpoint need to be last,
# in order to save the state meta data of this checkpoint.
callbacks.append(ResumableEarlyStopping(
initial_meta=initial_meta,
monitor='f1',
patience=early_stopping_patience,
mode='max',
restore_best_weights=False
))
if log_dir:
epoch_dirname = 'epoch-{epoch:05d}'
assert model_saver
save_callback = ModelWithMetadataCheckpoint(
os.path.join(log_dir, epoch_dirname),
period=log_period,
model_saver=model_saver,
monitor='f1',
meta=meta
)
callbacks.append(save_callback)
return callbacks
class PredictedResults(NamedTuple):
y_pred: T_Batch_Labels
y_true: T_Batch_Labels
def get_model_results(model, valid_batches: list, preprocessor=None) -> PredictedResults:
valid_steps = len(valid_batches)
for i, (data, label) in enumerate(valid_batches):
if i == valid_steps:
break
y_true_batch = label
y_true_batch = np.argmax(y_true_batch, -1)
sequence_lengths = data[-1] # shape of (batch_size, 1)
sequence_lengths = np.reshape(sequence_lengths, (-1,))
y_pred_batch = model.predict_on_batch(data)
y_pred_batch = np.argmax(y_pred_batch, -1)
y_pred_batch = [
preprocessor.inverse_transform(y[:l]) for y, l in zip(y_pred_batch, sequence_lengths)
]
y_true_batch = [
preprocessor.inverse_transform(y[:l]) for y, l in zip(y_true_batch, sequence_lengths)
]
if i == 0:
y_pred = y_pred_batch
y_true = y_true_batch
else:
y_pred = y_pred + y_pred_batch
y_true = y_true + y_true_batch
return PredictedResults(y_pred=y_pred, y_true=y_true)
class Scorer(_Scorer):
def on_epoch_end(self, epoch: int, logs: dict = None):
prediction_results = get_model_results(
self.model, self.valid_batches, preprocessor=self.p
)
y_pred = prediction_results.y_pred
y_true = prediction_results.y_true
f1 = f1_score(y_true, y_pred)
print("\tf1 (micro): {:04.2f}".format(f1 * 100))
if self.evaluation:
self.accuracy = accuracy_score(y_true, y_pred)
self.precision = precision_score(y_true, y_pred)
self.recall = recall_score(y_true, y_pred)
self.report = classification_report(y_true, y_pred, digits=4)
print(self.report)
# save eval
if logs:
logs['f1'] = f1
self.f1 = f1
class Trainer(_Trainer):
def __init__(
self,
*args,
model_saver: ModelSaver,
training_config: TrainingConfig,
multiprocessing: bool = True,
**kwargs):
self.model_saver = model_saver
self.multiprocessing = multiprocessing
self.model: Optional[BaseModel] = None
super().__init__(*args, training_config=training_config, **kwargs)
def train( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid, y_valid,
features_train: np.array = None,
features_valid: np.array = None):
assert self.model is not None
self.model.summary()
if self.model_config.use_crf:
self.model.compile(
loss=self.model.crf.loss,
optimizer='adam'
)
else:
self.model.compile(
loss='categorical_crossentropy',
optimizer='adam'
)
self.model = self.train_model(
self.model, x_train, y_train, x_valid, y_valid,
self.training_config.max_epoch,
features_train=features_train, features_valid=features_valid
)
def get_meta(self):
training_config_meta = vars(self.training_config).copy()
try:
training_config_meta.pop('initial_meta')
except KeyError:
pass
return {
'training_config': training_config_meta
}
def create_data_generator(self, *args, name_suffix: str, **kwargs) -> DataGenerator:
return DataGenerator( # type: ignore
*args,
batch_size=self.training_config.batch_size,
input_window_stride=self.training_config.input_window_stride,
stateful=self.model_config.stateful,
preprocessor=self.preprocessor,
additional_token_feature_indices=self.model_config.additional_token_feature_indices,
text_feature_indices=self.model_config.text_feature_indices,
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
char_embed_size=self.model_config.char_embedding_size,
is_deprecated_padded_batch_text_list_enabled=(
self.model_config.is_deprecated_padded_batch_text_list_enabled
),
max_sequence_length=self.model_config.max_sequence_length,
embeddings=self.embeddings,
name='%s.%s' % (self.model_config.model_name, name_suffix),
**kwargs
)
def train_model( # pylint: disable=arguments-differ
self, local_model,
x_train, y_train,
x_valid=None, y_valid=None,
max_epoch: int = 50,
features_train: np.array = None,
features_valid: np.array = None):
""" parameter model local_model must be compiled before calling this method
this model will be returned with trained weights """
# todo: if valid set if None, create it as random segment of the shuffled train set
if self.preprocessor.return_features and features_train is None:
raise ValueError('features required')
if self.training_config.early_stop:
training_generator = self.create_data_generator(
x_train, y_train,
shuffle=True,
features=features_train,
name_suffix='training_generator'
)
validation_generator = self.create_data_generator(
x_valid, y_valid,
shuffle=False,
features=features_valid,
name_suffix='validation_generator'
)
callbacks = get_callbacks(
model_saver=self.model_saver,
log_dir=self.checkpoint_path,
log_period=self.training_config.checkpoint_epoch_interval,
early_stopping=True,
early_stopping_patience=self.training_config.patience,
initial_meta=self.training_config.initial_meta,
valid=(validation_generator, self.preprocessor),
meta=self.get_meta()
)
else:
x_train = np.concatenate((x_train, x_valid), axis=0)
y_train = np.concatenate((y_train, y_valid), axis=0)
features_all = None
if features_train is not None:
features_all = np.concatenate((features_train, features_valid), axis=0)
training_generator = self.create_data_generator(
x_train, y_train,
shuffle=True,
features=features_all,
name_suffix='training_generator'
)
callbacks = get_callbacks(
model_saver=self.model_saver,
log_dir=self.checkpoint_path,
early_stopping=False,
meta=self.get_meta()
)
nb_workers = 6
multiprocessing = self.multiprocessing
# multiple workers will not work with ELMo due to GPU memory limit (with GTX 1080Ti 11GB)
if self.embeddings and (self.embeddings.use_ELMo or self.embeddings.use_BERT):
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
# dump token context independent data for train set, done once for the training
local_model.fit_generator(
generator=training_generator,
initial_epoch=self.training_config.initial_epoch or 0,
epochs=max_epoch,
use_multiprocessing=multiprocessing,
workers=nb_workers,
callbacks=callbacks
)
return local_model
def train_nfold( # pylint: disable=arguments-differ
self,
x_train: T_Batch_Tokens,
y_train: T_Batch_Labels,
x_valid: Optional[T_Batch_Tokens] = None,
y_valid: Optional[T_Batch_Labels] = None,
features_train: Optional[T_Batch_Features] = None,
features_valid: Optional[T_Batch_Features] = None
):
""" n-fold training for the instance model
the n models are stored in self.models, and self.model left unset at this stage """
fold_count = len(self.models)
fold_size = len(x_train) // fold_count
for fold_id in range(0, fold_count):
print(
'\n------------------------ fold %s--------------------------------------'
% fold_id
)
if x_valid is None:
# segment train and valid
fold_start = fold_size * fold_id
fold_end = fold_start + fold_size
if fold_id == fold_size - 1:
fold_end = len(x_train)
train_x = np.concatenate([x_train[:fold_start], x_train[fold_end:]])
train_y = np.concatenate([y_train[:fold_start], y_train[fold_end:]])
val_x = x_train[fold_start:fold_end]
val_y = y_train[fold_start:fold_end]
if features_train is not None:
train_features = np.concatenate(
[features_train[:fold_start], features_train[fold_end:]]
)
val_features = features_train[fold_start:fold_end]
else:
train_features = None
val_features = None
else:
# reuse given segmentation
train_x = x_train
train_y = y_train
train_features = features_train
val_x = x_valid
val_y = y_valid
val_features = features_valid
foldModel = self.models[fold_id]
foldModel.summary()
if self.model_config.use_crf:
foldModel.compile(
loss=foldModel.crf.loss,
optimizer='adam'
)
else:
foldModel.compile(
loss='categorical_crossentropy',
optimizer='adam'
)
foldModel = self.train_model(
foldModel,
train_x,
train_y,
val_x,
val_y,
features_train=train_features,
features_valid=val_features,
max_epoch=self.training_config.max_epoch
)
self.models[fold_id] = foldModel
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/trainer.py
|
trainer.py
|
import logging
import os
from typing import NamedTuple, Optional
import numpy as np
from delft.sequenceLabelling.evaluation import (
f1_score,
accuracy_score,
precision_score,
recall_score
)
from delft.sequenceLabelling.trainer import Trainer as _Trainer
from delft.sequenceLabelling.trainer import Scorer as _Scorer
from delft.sequenceLabelling.models import BaseModel
from sciencebeam_trainer_delft.sequence_labelling.utils.types import (
T_Batch_Tokens,
T_Batch_Features,
T_Batch_Labels
)
from sciencebeam_trainer_delft.utils.keras.callbacks import ResumableEarlyStopping
from sciencebeam_trainer_delft.sequence_labelling.evaluation import classification_report
from sciencebeam_trainer_delft.sequence_labelling.config import TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.data_generator import DataGenerator
from sciencebeam_trainer_delft.sequence_labelling.callbacks import ModelWithMetadataCheckpoint
from sciencebeam_trainer_delft.sequence_labelling.saving import ModelSaver
LOGGER = logging.getLogger(__name__)
def get_callbacks(
model_saver: ModelSaver,
log_dir: str = None,
log_period: int = 1,
valid: tuple = (),
early_stopping: bool = True,
early_stopping_patience: int = 5,
initial_meta: Optional[dict] = None,
meta: dict = None
):
"""
Get callbacks.
Args:
log_dir (str): the destination to save logs
valid (tuple): data for validation.
early_stopping (bool): whether to use early stopping.
Returns:
list: list of callbacks
"""
callbacks = []
if valid:
callbacks.append(Scorer(*valid)) # pylint: disable=no-value-for-parameter
if early_stopping:
# Note: ensure we are not restoring weights
# as that would affect saving the model.
# The saving checkpoint need to be last,
# in order to save the state meta data of this checkpoint.
callbacks.append(ResumableEarlyStopping(
initial_meta=initial_meta,
monitor='f1',
patience=early_stopping_patience,
mode='max',
restore_best_weights=False
))
if log_dir:
epoch_dirname = 'epoch-{epoch:05d}'
assert model_saver
save_callback = ModelWithMetadataCheckpoint(
os.path.join(log_dir, epoch_dirname),
period=log_period,
model_saver=model_saver,
monitor='f1',
meta=meta
)
callbacks.append(save_callback)
return callbacks
class PredictedResults(NamedTuple):
y_pred: T_Batch_Labels
y_true: T_Batch_Labels
def get_model_results(model, valid_batches: list, preprocessor=None) -> PredictedResults:
valid_steps = len(valid_batches)
for i, (data, label) in enumerate(valid_batches):
if i == valid_steps:
break
y_true_batch = label
y_true_batch = np.argmax(y_true_batch, -1)
sequence_lengths = data[-1] # shape of (batch_size, 1)
sequence_lengths = np.reshape(sequence_lengths, (-1,))
y_pred_batch = model.predict_on_batch(data)
y_pred_batch = np.argmax(y_pred_batch, -1)
y_pred_batch = [
preprocessor.inverse_transform(y[:l]) for y, l in zip(y_pred_batch, sequence_lengths)
]
y_true_batch = [
preprocessor.inverse_transform(y[:l]) for y, l in zip(y_true_batch, sequence_lengths)
]
if i == 0:
y_pred = y_pred_batch
y_true = y_true_batch
else:
y_pred = y_pred + y_pred_batch
y_true = y_true + y_true_batch
return PredictedResults(y_pred=y_pred, y_true=y_true)
class Scorer(_Scorer):
def on_epoch_end(self, epoch: int, logs: dict = None):
prediction_results = get_model_results(
self.model, self.valid_batches, preprocessor=self.p
)
y_pred = prediction_results.y_pred
y_true = prediction_results.y_true
f1 = f1_score(y_true, y_pred)
print("\tf1 (micro): {:04.2f}".format(f1 * 100))
if self.evaluation:
self.accuracy = accuracy_score(y_true, y_pred)
self.precision = precision_score(y_true, y_pred)
self.recall = recall_score(y_true, y_pred)
self.report = classification_report(y_true, y_pred, digits=4)
print(self.report)
# save eval
if logs:
logs['f1'] = f1
self.f1 = f1
class Trainer(_Trainer):
def __init__(
self,
*args,
model_saver: ModelSaver,
training_config: TrainingConfig,
multiprocessing: bool = True,
**kwargs):
self.model_saver = model_saver
self.multiprocessing = multiprocessing
self.model: Optional[BaseModel] = None
super().__init__(*args, training_config=training_config, **kwargs)
def train( # pylint: disable=arguments-differ
self, x_train, y_train, x_valid, y_valid,
features_train: np.array = None,
features_valid: np.array = None):
assert self.model is not None
self.model.summary()
if self.model_config.use_crf:
self.model.compile(
loss=self.model.crf.loss,
optimizer='adam'
)
else:
self.model.compile(
loss='categorical_crossentropy',
optimizer='adam'
)
self.model = self.train_model(
self.model, x_train, y_train, x_valid, y_valid,
self.training_config.max_epoch,
features_train=features_train, features_valid=features_valid
)
def get_meta(self):
training_config_meta = vars(self.training_config).copy()
try:
training_config_meta.pop('initial_meta')
except KeyError:
pass
return {
'training_config': training_config_meta
}
def create_data_generator(self, *args, name_suffix: str, **kwargs) -> DataGenerator:
return DataGenerator( # type: ignore
*args,
batch_size=self.training_config.batch_size,
input_window_stride=self.training_config.input_window_stride,
stateful=self.model_config.stateful,
preprocessor=self.preprocessor,
additional_token_feature_indices=self.model_config.additional_token_feature_indices,
text_feature_indices=self.model_config.text_feature_indices,
concatenated_embeddings_token_count=(
self.model_config.concatenated_embeddings_token_count
),
char_embed_size=self.model_config.char_embedding_size,
is_deprecated_padded_batch_text_list_enabled=(
self.model_config.is_deprecated_padded_batch_text_list_enabled
),
max_sequence_length=self.model_config.max_sequence_length,
embeddings=self.embeddings,
name='%s.%s' % (self.model_config.model_name, name_suffix),
**kwargs
)
def train_model( # pylint: disable=arguments-differ
self, local_model,
x_train, y_train,
x_valid=None, y_valid=None,
max_epoch: int = 50,
features_train: np.array = None,
features_valid: np.array = None):
""" parameter model local_model must be compiled before calling this method
this model will be returned with trained weights """
# todo: if valid set if None, create it as random segment of the shuffled train set
if self.preprocessor.return_features and features_train is None:
raise ValueError('features required')
if self.training_config.early_stop:
training_generator = self.create_data_generator(
x_train, y_train,
shuffle=True,
features=features_train,
name_suffix='training_generator'
)
validation_generator = self.create_data_generator(
x_valid, y_valid,
shuffle=False,
features=features_valid,
name_suffix='validation_generator'
)
callbacks = get_callbacks(
model_saver=self.model_saver,
log_dir=self.checkpoint_path,
log_period=self.training_config.checkpoint_epoch_interval,
early_stopping=True,
early_stopping_patience=self.training_config.patience,
initial_meta=self.training_config.initial_meta,
valid=(validation_generator, self.preprocessor),
meta=self.get_meta()
)
else:
x_train = np.concatenate((x_train, x_valid), axis=0)
y_train = np.concatenate((y_train, y_valid), axis=0)
features_all = None
if features_train is not None:
features_all = np.concatenate((features_train, features_valid), axis=0)
training_generator = self.create_data_generator(
x_train, y_train,
shuffle=True,
features=features_all,
name_suffix='training_generator'
)
callbacks = get_callbacks(
model_saver=self.model_saver,
log_dir=self.checkpoint_path,
early_stopping=False,
meta=self.get_meta()
)
nb_workers = 6
multiprocessing = self.multiprocessing
# multiple workers will not work with ELMo due to GPU memory limit (with GTX 1080Ti 11GB)
if self.embeddings and (self.embeddings.use_ELMo or self.embeddings.use_BERT):
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
# dump token context independent data for train set, done once for the training
local_model.fit_generator(
generator=training_generator,
initial_epoch=self.training_config.initial_epoch or 0,
epochs=max_epoch,
use_multiprocessing=multiprocessing,
workers=nb_workers,
callbacks=callbacks
)
return local_model
def train_nfold( # pylint: disable=arguments-differ
self,
x_train: T_Batch_Tokens,
y_train: T_Batch_Labels,
x_valid: Optional[T_Batch_Tokens] = None,
y_valid: Optional[T_Batch_Labels] = None,
features_train: Optional[T_Batch_Features] = None,
features_valid: Optional[T_Batch_Features] = None
):
""" n-fold training for the instance model
the n models are stored in self.models, and self.model left unset at this stage """
fold_count = len(self.models)
fold_size = len(x_train) // fold_count
for fold_id in range(0, fold_count):
print(
'\n------------------------ fold %s--------------------------------------'
% fold_id
)
if x_valid is None:
# segment train and valid
fold_start = fold_size * fold_id
fold_end = fold_start + fold_size
if fold_id == fold_size - 1:
fold_end = len(x_train)
train_x = np.concatenate([x_train[:fold_start], x_train[fold_end:]])
train_y = np.concatenate([y_train[:fold_start], y_train[fold_end:]])
val_x = x_train[fold_start:fold_end]
val_y = y_train[fold_start:fold_end]
if features_train is not None:
train_features = np.concatenate(
[features_train[:fold_start], features_train[fold_end:]]
)
val_features = features_train[fold_start:fold_end]
else:
train_features = None
val_features = None
else:
# reuse given segmentation
train_x = x_train
train_y = y_train
train_features = features_train
val_x = x_valid
val_y = y_valid
val_features = features_valid
foldModel = self.models[fold_id]
foldModel.summary()
if self.model_config.use_crf:
foldModel.compile(
loss=foldModel.crf.loss,
optimizer='adam'
)
else:
foldModel.compile(
loss='categorical_crossentropy',
optimizer='adam'
)
foldModel = self.train_model(
foldModel,
train_x,
train_y,
val_x,
val_y,
features_train=train_features,
features_valid=val_features,
max_epoch=self.training_config.max_epoch
)
self.models[fold_id] = foldModel
| 0.873728 | 0.280898 |
import logging
import json
from typing import List, Type, Union
from keras.models import Model
from keras.layers.merge import Concatenate
from keras.layers import (
Dense, LSTM, Bidirectional, Embedding, Input, Dropout,
TimeDistributed
)
import delft.sequenceLabelling.wrapper
from delft.utilities.layers import ChainCRF
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.models import get_model as _get_model, BidLSTM_CRF_FEATURES
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig
LOGGER = logging.getLogger(__name__)
class CustomModel(BaseModel):
def __init__(
self, config, ntags,
require_casing: bool = False,
use_crf: bool = False,
supports_features: bool = False,
require_features_indices_input: bool = False,
stateful: bool = False):
super().__init__(config, ntags)
self.require_casing = require_casing
self.use_crf = use_crf
self.supports_features = supports_features
self.require_features_indices_input = require_features_indices_input
self.stateful = stateful
def _concatenate_inputs(inputs: list, **kwargs):
if len(inputs) == 1:
return inputs[0]
return Concatenate(**kwargs)(inputs)
# renamed copy of BidLSTM_CRF to demonstrate a custom model
class CustomBidLSTM_CRF(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
def __init__(self, config: ModelConfig, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
stateful=config.stateful
)
stateful = self.stateful
# stateful RNNs require the batch size to be passed in
input_batch_size = config.batch_size if stateful else None
model_inputs = []
lstm_inputs = []
# build input, directly feed with word embedding by the data generator
word_input = Input(
shape=(None, config.word_embedding_size),
batch_shape=(input_batch_size, None, config.word_embedding_size),
name='word_input'
)
model_inputs.append(word_input)
lstm_inputs.append(word_input)
# build character based embedding
char_input = Input(
shape=(None, config.max_char_length),
batch_shape=(input_batch_size, None, config.max_char_length),
dtype='int32',
name='char_input'
)
model_inputs.append(char_input)
if config.char_embedding_size:
assert config.char_vocab_size, 'config.char_vocab_size required'
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=config.char_input_mask_zero,
name='char_embeddings_embedding'
), name='char_embeddings')(char_input)
chars = TimeDistributed(
Bidirectional(LSTM(
config.num_char_lstm_units,
dropout=config.char_input_dropout,
recurrent_dropout=config.char_lstm_dropout,
return_sequences=False
)),
name='char_lstm'
)(char_embeddings)
lstm_inputs.append(chars)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
LOGGER.debug('model, config.use_features: %s', config.use_features)
if config.use_features:
LOGGER.info('model using features')
assert config.max_feature_size > 0
features_input = Input(
batch_shape=(input_batch_size, None, config.max_feature_size),
name='features_input'
)
model_inputs.append(features_input)
features = features_input
if config.features_embedding_size:
features = TimeDistributed(Dense(
config.features_embedding_size,
name='features_embeddings_dense'
), name='features_embeddings')(features)
LOGGER.info(
'word_input=%s, chars=%s, features=%s',
word_input, chars, features
)
lstm_inputs.append(features)
x = _concatenate_inputs(lstm_inputs, name='word_lstm_input')
x = Dropout(config.dropout, name='word_lstm_input_dropout')(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout,
stateful=stateful,
), name='word_lstm')(x)
x = Dropout(config.dropout, name='word_lstm_output_dropout')(x)
x = Dense(
config.num_word_lstm_units, name='word_lstm_dense', activation='tanh'
)(x)
x = Dense(ntags, name='dense_ntags')(x)
self.crf = ChainCRF(name='crf')
pred = self.crf(x)
model_inputs.append(length_input)
self.model = Model(inputs=model_inputs, outputs=[pred])
self.config = config
# copied from
# https://github.com/kermitt2/delft/blob/d2f8390ac01779cab959f57aa6e1a8f1d2723505/
# delft/sequenceLabelling/models.py
class CustomBidLSTM_CRF_FEATURES(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling which create features
from additional orthogonal information generated by GROBID.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
name = 'CustomBidLSTM_CRF_FEATURES'
def __init__(self, config, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
require_features_indices_input=True
)
# build input, directly feed with word embedding by the data generator
word_input = Input(shape=(None, config.word_embedding_size), name='word_input')
# build character based embedding
char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=True,
name='char_embeddings'
))(char_input)
chars = TimeDistributed(Bidirectional(LSTM(
config.num_char_lstm_units,
return_sequences=False
)))(char_embeddings)
# layout features input and embeddings
features_input = Input(
shape=(None, len(config.features_indices)),
dtype='float32',
name='features_input'
)
assert config.features_vocabulary_size, "config.features_vocabulary_size required"
assert config.features_embedding_size, "config.features_embedding_size required"
# features_vocabulary_size (default 12) * number_of_features + 1
# (the zero is reserved for masking / padding)
features_embedding = TimeDistributed(
Embedding(
input_dim=config.features_vocabulary_size * len(config.features_indices) + 1,
output_dim=config.features_embedding_size,
mask_zero=True,
trainable=True,
name='features_embedding'),
name="features_embedding_td_1"
)(features_input)
assert config.features_lstm_units, "config.features_lstm_units required"
features_embedding_bd = TimeDistributed(
Bidirectional(LSTM(config.features_lstm_units, return_sequences=False)),
name="features_embedding_td_2"
)(features_embedding)
features_embedding_out = Dropout(config.dropout)(features_embedding_bd)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
x = Concatenate()([word_input, chars, features_embedding_out])
x = Dropout(config.dropout)(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout
))(x)
x = Dropout(config.dropout)(x)
x = Dense(config.num_word_lstm_units, activation='tanh')(x)
x = Dense(ntags)(x)
self.crf = ChainCRF()
pred = self.crf(x)
self.model = Model(
inputs=[word_input, char_input, features_input, length_input],
outputs=[pred]
)
self.config = config
DEFAULT_MODEL_NAMES = [
'BidLSTM_CRF', 'BidLSTM_CNN', 'BidLSTM_CNN_CRF', 'BidGRU_CRF', 'BidLSTM_CRF_CASING',
BidLSTM_CRF_FEATURES.name
]
MODEL_MAP = {
'CustomBidLSTM_CRF': CustomBidLSTM_CRF,
CustomBidLSTM_CRF_FEATURES.name: CustomBidLSTM_CRF_FEATURES
}
IMPLICIT_MODEL_CONFIG_PROPS_MAP = {
BidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
),
CustomBidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
)
}
def register_model(name: str, model_class: Type[CustomModel]):
MODEL_MAP[name] = model_class
def updated_implicit_model_config_props(model_config: ModelConfig):
implicit_model_config_props = IMPLICIT_MODEL_CONFIG_PROPS_MAP.get(model_config.model_type)
if not implicit_model_config_props:
return
for key, value in implicit_model_config_props.items():
setattr(model_config, key, value)
def _create_model(
model_class: Type[CustomModel],
config: ModelConfig,
ntags=None) -> CustomModel:
return model_class(config, ntags=ntags)
def is_model_stateful(model: Union[BaseModel, CustomModel]) -> bool:
try:
return model.stateful
except AttributeError:
return False
def get_model(config, preprocessor, ntags=None):
LOGGER.info(
'get_model, config: %s, ntags=%s',
json.dumps(vars(config), indent=4),
ntags
)
model_class = MODEL_MAP.get(config.model_type)
if not model_class:
return _get_model(config, preprocessor, ntags=ntags)
model = _create_model(model_class, config, ntags=ntags)
config.use_crf = model.use_crf
preprocessor.return_casing = model.require_casing
if config.use_features and not model.supports_features:
LOGGER.warning('features enabled but not supported by model (disabling)')
config.use_features = False
preprocessor.return_features = config.use_features
return model
def get_model_names() -> List[str]:
return sorted(set(DEFAULT_MODEL_NAMES) | set(MODEL_MAP.keys()))
def patch_get_model():
delft.sequenceLabelling.wrapper.get_model = get_model
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/models.py
|
models.py
|
import logging
import json
from typing import List, Type, Union
from keras.models import Model
from keras.layers.merge import Concatenate
from keras.layers import (
Dense, LSTM, Bidirectional, Embedding, Input, Dropout,
TimeDistributed
)
import delft.sequenceLabelling.wrapper
from delft.utilities.layers import ChainCRF
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.models import get_model as _get_model, BidLSTM_CRF_FEATURES
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig
LOGGER = logging.getLogger(__name__)
class CustomModel(BaseModel):
def __init__(
self, config, ntags,
require_casing: bool = False,
use_crf: bool = False,
supports_features: bool = False,
require_features_indices_input: bool = False,
stateful: bool = False):
super().__init__(config, ntags)
self.require_casing = require_casing
self.use_crf = use_crf
self.supports_features = supports_features
self.require_features_indices_input = require_features_indices_input
self.stateful = stateful
def _concatenate_inputs(inputs: list, **kwargs):
if len(inputs) == 1:
return inputs[0]
return Concatenate(**kwargs)(inputs)
# renamed copy of BidLSTM_CRF to demonstrate a custom model
class CustomBidLSTM_CRF(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
def __init__(self, config: ModelConfig, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
stateful=config.stateful
)
stateful = self.stateful
# stateful RNNs require the batch size to be passed in
input_batch_size = config.batch_size if stateful else None
model_inputs = []
lstm_inputs = []
# build input, directly feed with word embedding by the data generator
word_input = Input(
shape=(None, config.word_embedding_size),
batch_shape=(input_batch_size, None, config.word_embedding_size),
name='word_input'
)
model_inputs.append(word_input)
lstm_inputs.append(word_input)
# build character based embedding
char_input = Input(
shape=(None, config.max_char_length),
batch_shape=(input_batch_size, None, config.max_char_length),
dtype='int32',
name='char_input'
)
model_inputs.append(char_input)
if config.char_embedding_size:
assert config.char_vocab_size, 'config.char_vocab_size required'
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=config.char_input_mask_zero,
name='char_embeddings_embedding'
), name='char_embeddings')(char_input)
chars = TimeDistributed(
Bidirectional(LSTM(
config.num_char_lstm_units,
dropout=config.char_input_dropout,
recurrent_dropout=config.char_lstm_dropout,
return_sequences=False
)),
name='char_lstm'
)(char_embeddings)
lstm_inputs.append(chars)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
LOGGER.debug('model, config.use_features: %s', config.use_features)
if config.use_features:
LOGGER.info('model using features')
assert config.max_feature_size > 0
features_input = Input(
batch_shape=(input_batch_size, None, config.max_feature_size),
name='features_input'
)
model_inputs.append(features_input)
features = features_input
if config.features_embedding_size:
features = TimeDistributed(Dense(
config.features_embedding_size,
name='features_embeddings_dense'
), name='features_embeddings')(features)
LOGGER.info(
'word_input=%s, chars=%s, features=%s',
word_input, chars, features
)
lstm_inputs.append(features)
x = _concatenate_inputs(lstm_inputs, name='word_lstm_input')
x = Dropout(config.dropout, name='word_lstm_input_dropout')(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout,
stateful=stateful,
), name='word_lstm')(x)
x = Dropout(config.dropout, name='word_lstm_output_dropout')(x)
x = Dense(
config.num_word_lstm_units, name='word_lstm_dense', activation='tanh'
)(x)
x = Dense(ntags, name='dense_ntags')(x)
self.crf = ChainCRF(name='crf')
pred = self.crf(x)
model_inputs.append(length_input)
self.model = Model(inputs=model_inputs, outputs=[pred])
self.config = config
# copied from
# https://github.com/kermitt2/delft/blob/d2f8390ac01779cab959f57aa6e1a8f1d2723505/
# delft/sequenceLabelling/models.py
class CustomBidLSTM_CRF_FEATURES(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling which create features
from additional orthogonal information generated by GROBID.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
name = 'CustomBidLSTM_CRF_FEATURES'
def __init__(self, config, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
require_features_indices_input=True
)
# build input, directly feed with word embedding by the data generator
word_input = Input(shape=(None, config.word_embedding_size), name='word_input')
# build character based embedding
char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=True,
name='char_embeddings'
))(char_input)
chars = TimeDistributed(Bidirectional(LSTM(
config.num_char_lstm_units,
return_sequences=False
)))(char_embeddings)
# layout features input and embeddings
features_input = Input(
shape=(None, len(config.features_indices)),
dtype='float32',
name='features_input'
)
assert config.features_vocabulary_size, "config.features_vocabulary_size required"
assert config.features_embedding_size, "config.features_embedding_size required"
# features_vocabulary_size (default 12) * number_of_features + 1
# (the zero is reserved for masking / padding)
features_embedding = TimeDistributed(
Embedding(
input_dim=config.features_vocabulary_size * len(config.features_indices) + 1,
output_dim=config.features_embedding_size,
mask_zero=True,
trainable=True,
name='features_embedding'),
name="features_embedding_td_1"
)(features_input)
assert config.features_lstm_units, "config.features_lstm_units required"
features_embedding_bd = TimeDistributed(
Bidirectional(LSTM(config.features_lstm_units, return_sequences=False)),
name="features_embedding_td_2"
)(features_embedding)
features_embedding_out = Dropout(config.dropout)(features_embedding_bd)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
x = Concatenate()([word_input, chars, features_embedding_out])
x = Dropout(config.dropout)(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout
))(x)
x = Dropout(config.dropout)(x)
x = Dense(config.num_word_lstm_units, activation='tanh')(x)
x = Dense(ntags)(x)
self.crf = ChainCRF()
pred = self.crf(x)
self.model = Model(
inputs=[word_input, char_input, features_input, length_input],
outputs=[pred]
)
self.config = config
DEFAULT_MODEL_NAMES = [
'BidLSTM_CRF', 'BidLSTM_CNN', 'BidLSTM_CNN_CRF', 'BidGRU_CRF', 'BidLSTM_CRF_CASING',
BidLSTM_CRF_FEATURES.name
]
MODEL_MAP = {
'CustomBidLSTM_CRF': CustomBidLSTM_CRF,
CustomBidLSTM_CRF_FEATURES.name: CustomBidLSTM_CRF_FEATURES
}
IMPLICIT_MODEL_CONFIG_PROPS_MAP = {
BidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
),
CustomBidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
)
}
def register_model(name: str, model_class: Type[CustomModel]):
MODEL_MAP[name] = model_class
def updated_implicit_model_config_props(model_config: ModelConfig):
implicit_model_config_props = IMPLICIT_MODEL_CONFIG_PROPS_MAP.get(model_config.model_type)
if not implicit_model_config_props:
return
for key, value in implicit_model_config_props.items():
setattr(model_config, key, value)
def _create_model(
model_class: Type[CustomModel],
config: ModelConfig,
ntags=None) -> CustomModel:
return model_class(config, ntags=ntags)
def is_model_stateful(model: Union[BaseModel, CustomModel]) -> bool:
try:
return model.stateful
except AttributeError:
return False
def get_model(config, preprocessor, ntags=None):
LOGGER.info(
'get_model, config: %s, ntags=%s',
json.dumps(vars(config), indent=4),
ntags
)
model_class = MODEL_MAP.get(config.model_type)
if not model_class:
return _get_model(config, preprocessor, ntags=ntags)
model = _create_model(model_class, config, ntags=ntags)
config.use_crf = model.use_crf
preprocessor.return_casing = model.require_casing
if config.use_features and not model.supports_features:
LOGGER.warning('features enabled but not supported by model (disabling)')
config.use_features = False
preprocessor.return_features = config.use_features
return model
def get_model_names() -> List[str]:
return sorted(set(DEFAULT_MODEL_NAMES) | set(MODEL_MAP.keys()))
def patch_get_model():
delft.sequenceLabelling.wrapper.get_model = get_model
| 0.912782 | 0.230432 |
import logging
from collections import Counter
from itertools import zip_longest
from typing import List, Optional
import numpy as np
from delft.utilities.Tokenizer import tokenizeAndFilterSimple
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform import (
DatasetTransformer
)
from sciencebeam_trainer_delft.sequence_labelling.typing import (
T_Batch_Tokens,
T_Batch_Features,
T_Batch_Labels
)
LOGGER = logging.getLogger(__name__)
NBSP = '\u00A0'
class LineStatus:
# replicate line status used in GROBID
LINESTART = 'LINESTART'
LINEIN = 'LINEIN'
LINEEND = 'LINEEND'
def strip_tag_prefix(tag: Optional[str]) -> str:
if tag and (tag.startswith('B-') or tag.startswith('I-')):
return tag[2:]
return tag or ''
def get_next_transform_token_y(token_y: str) -> str:
if token_y and token_y.startswith('B-'):
return 'I-' + token_y[2:]
return token_y
def inverse_transform_token_y(unrolled_token_y: List[str], previous_tag: Optional[str]) -> str:
tags_with_stripped_prefix = [strip_tag_prefix(tag) for tag in unrolled_token_y]
tag_counts = Counter(tags_with_stripped_prefix)
top_tag = tag_counts.most_common(1)[0][0]
LOGGER.debug('tag_counts: %s, top_tag=%r', tag_counts, top_tag)
if f'B-{top_tag}' in unrolled_token_y or f'I-{top_tag}' in unrolled_token_y:
if top_tag != strip_tag_prefix(previous_tag):
return f'B-{top_tag}'
return f'I-{top_tag}'
return top_tag
def get_line_status(
token_index: int,
line_length: int
):
if token_index == 0:
return LineStatus.LINESTART
if token_index == line_length - 1:
return LineStatus.LINEEND
return LineStatus.LINEIN
def get_transformed_features(
token_features: List[str],
unrolled_token_index: int,
unrolled_tokens_length: int,
line_status_enabled: bool = True
):
if not line_status_enabled:
return token_features
return list(token_features) + [get_line_status(unrolled_token_index, unrolled_tokens_length)]
class UnrollingTextFeatureDatasetTransformer(DatasetTransformer):
def __init__(
self,
unroll_text_feature_index: int,
used_features_indices: Optional[List[int]] = None
):
# Note: used_features_indices is used to determine, whether to add the line status
# (i.e. no need to add it if it is not used)
self.unroll_text_feature_index = unroll_text_feature_index
self.used_features_indices = used_features_indices
self._saved_x: Optional[T_Batch_Tokens] = None
self._saved_features: Optional[T_Batch_Features] = None
self._unrolled_token_lengths: Optional[List[List[int]]] = None
def tokenize(self, text: str) -> List[str]:
return tokenizeAndFilterSimple(text.replace(NBSP, ' '))
def fit_transform(
self,
x: T_Batch_Tokens,
y: Optional[T_Batch_Labels],
features: Optional[T_Batch_Features]
):
assert features is not None
x_transformed = []
_y_transformed = []
features_transformed = []
line_status_enabled: Optional[bool] = None
unrolled_token_lengths = []
for y_doc, features_doc in zip_longest(
y if y is not None else [],
features,
fillvalue=[]
):
x_doc_transformed = []
y_doc_transformed = []
features_doc_transformed = []
unrolled_token_lengths_doc = []
for features_row, y_row in zip_longest(features_doc, y_doc, fillvalue=None):
text = features_row[self.unroll_text_feature_index]
if line_status_enabled is None:
line_status_enabled = (
self.used_features_indices is not None
and len(features_row) in self.used_features_indices
)
tokens = self.tokenize(text)
assert tokens
assert y is None or y_row is not None
tokens_length = len(tokens)
for unrolled_token_index, token in enumerate(tokens):
x_doc_transformed.append(token)
y_doc_transformed.append(y_row)
features_doc_transformed.append(
get_transformed_features(
features_row,
unrolled_token_index=unrolled_token_index,
unrolled_tokens_length=tokens_length,
line_status_enabled=line_status_enabled
)
)
y_row = get_next_transform_token_y(y_row)
unrolled_token_lengths_doc.append(tokens_length)
x_transformed.append(x_doc_transformed)
_y_transformed.append(y_doc_transformed)
features_transformed.append(features_doc_transformed)
unrolled_token_lengths.append(unrolled_token_lengths_doc)
LOGGER.debug('x_transformed: %s', x_transformed)
LOGGER.debug('y_transformed: %s', _y_transformed)
LOGGER.debug('features_transformed: %s', features_transformed)
y_transformed = _y_transformed if y is not None else None
self._saved_x = x
self._saved_features = features
self._unrolled_token_lengths = unrolled_token_lengths
if isinstance(x, np.ndarray):
x_transformed = np.asarray(x_transformed, dtype='object')
if isinstance(y, np.ndarray):
y_transformed = np.asarray(y_transformed, dtype='object')
if isinstance(features, np.ndarray):
features_transformed = np.asarray(features_transformed, dtype='object')
return x_transformed, y_transformed, features_transformed
def inverse_transform(
self,
x: Optional[T_Batch_Tokens],
y: Optional[T_Batch_Labels],
features: Optional[T_Batch_Features]
):
if x is not None:
x = self._saved_x
if features is not None:
features = self._saved_features
inverse_transformed_y = None
if y is not None:
inverse_transformed_y = []
assert self._saved_x is not None
assert self._saved_features is not None
assert self._unrolled_token_lengths is not None
for x_doc, features_doc, y_doc, unrolled_token_lengths_doc in zip(
self._saved_x, self._saved_features, y, self._unrolled_token_lengths
):
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('unrolled_token_lengths_doc: %s', unrolled_token_lengths_doc)
LOGGER.debug('y_doc: %s', y_doc)
LOGGER.debug('xy_doc: %s', list(zip(x_doc, y_doc)))
index = 0
inverse_transformed_y_doc = []
previous_tag = None
for x_token, features_token, unrolled_token_length in zip(
x_doc, features_doc, unrolled_token_lengths_doc
):
if index >= len(y_doc):
# y_doc may be truncated using max sequence length
break
y_tokens = y_doc[index:index + unrolled_token_length]
if LOGGER.isEnabledFor(logging.DEBUG):
tokens = self.tokenize(features_token[self.unroll_text_feature_index])
LOGGER.debug(
'inverse transforming: indices=[%d:%d], x=%r, f=%r, tokens_y=%r',
index, index + unrolled_token_length,
x_token, features_token, list(zip_longest(tokens, y_tokens))
)
y_token = inverse_transform_token_y(y_tokens, previous_tag=previous_tag)
previous_tag = y_token
inverse_transformed_y_doc.append(y_token)
index += unrolled_token_length
inverse_transformed_y.append(inverse_transformed_y_doc)
if isinstance(y, np.ndarray):
inverse_transformed_y = np.asarray(inverse_transformed_y, dtype='object')
return x, inverse_transformed_y, features
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/dataset_transform/unroll_transform.py
|
unroll_transform.py
|
import logging
from collections import Counter
from itertools import zip_longest
from typing import List, Optional
import numpy as np
from delft.utilities.Tokenizer import tokenizeAndFilterSimple
from sciencebeam_trainer_delft.sequence_labelling.dataset_transform import (
DatasetTransformer
)
from sciencebeam_trainer_delft.sequence_labelling.typing import (
T_Batch_Tokens,
T_Batch_Features,
T_Batch_Labels
)
LOGGER = logging.getLogger(__name__)
NBSP = '\u00A0'
class LineStatus:
# replicate line status used in GROBID
LINESTART = 'LINESTART'
LINEIN = 'LINEIN'
LINEEND = 'LINEEND'
def strip_tag_prefix(tag: Optional[str]) -> str:
if tag and (tag.startswith('B-') or tag.startswith('I-')):
return tag[2:]
return tag or ''
def get_next_transform_token_y(token_y: str) -> str:
if token_y and token_y.startswith('B-'):
return 'I-' + token_y[2:]
return token_y
def inverse_transform_token_y(unrolled_token_y: List[str], previous_tag: Optional[str]) -> str:
tags_with_stripped_prefix = [strip_tag_prefix(tag) for tag in unrolled_token_y]
tag_counts = Counter(tags_with_stripped_prefix)
top_tag = tag_counts.most_common(1)[0][0]
LOGGER.debug('tag_counts: %s, top_tag=%r', tag_counts, top_tag)
if f'B-{top_tag}' in unrolled_token_y or f'I-{top_tag}' in unrolled_token_y:
if top_tag != strip_tag_prefix(previous_tag):
return f'B-{top_tag}'
return f'I-{top_tag}'
return top_tag
def get_line_status(
token_index: int,
line_length: int
):
if token_index == 0:
return LineStatus.LINESTART
if token_index == line_length - 1:
return LineStatus.LINEEND
return LineStatus.LINEIN
def get_transformed_features(
token_features: List[str],
unrolled_token_index: int,
unrolled_tokens_length: int,
line_status_enabled: bool = True
):
if not line_status_enabled:
return token_features
return list(token_features) + [get_line_status(unrolled_token_index, unrolled_tokens_length)]
class UnrollingTextFeatureDatasetTransformer(DatasetTransformer):
def __init__(
self,
unroll_text_feature_index: int,
used_features_indices: Optional[List[int]] = None
):
# Note: used_features_indices is used to determine, whether to add the line status
# (i.e. no need to add it if it is not used)
self.unroll_text_feature_index = unroll_text_feature_index
self.used_features_indices = used_features_indices
self._saved_x: Optional[T_Batch_Tokens] = None
self._saved_features: Optional[T_Batch_Features] = None
self._unrolled_token_lengths: Optional[List[List[int]]] = None
def tokenize(self, text: str) -> List[str]:
return tokenizeAndFilterSimple(text.replace(NBSP, ' '))
def fit_transform(
self,
x: T_Batch_Tokens,
y: Optional[T_Batch_Labels],
features: Optional[T_Batch_Features]
):
assert features is not None
x_transformed = []
_y_transformed = []
features_transformed = []
line_status_enabled: Optional[bool] = None
unrolled_token_lengths = []
for y_doc, features_doc in zip_longest(
y if y is not None else [],
features,
fillvalue=[]
):
x_doc_transformed = []
y_doc_transformed = []
features_doc_transformed = []
unrolled_token_lengths_doc = []
for features_row, y_row in zip_longest(features_doc, y_doc, fillvalue=None):
text = features_row[self.unroll_text_feature_index]
if line_status_enabled is None:
line_status_enabled = (
self.used_features_indices is not None
and len(features_row) in self.used_features_indices
)
tokens = self.tokenize(text)
assert tokens
assert y is None or y_row is not None
tokens_length = len(tokens)
for unrolled_token_index, token in enumerate(tokens):
x_doc_transformed.append(token)
y_doc_transformed.append(y_row)
features_doc_transformed.append(
get_transformed_features(
features_row,
unrolled_token_index=unrolled_token_index,
unrolled_tokens_length=tokens_length,
line_status_enabled=line_status_enabled
)
)
y_row = get_next_transform_token_y(y_row)
unrolled_token_lengths_doc.append(tokens_length)
x_transformed.append(x_doc_transformed)
_y_transformed.append(y_doc_transformed)
features_transformed.append(features_doc_transformed)
unrolled_token_lengths.append(unrolled_token_lengths_doc)
LOGGER.debug('x_transformed: %s', x_transformed)
LOGGER.debug('y_transformed: %s', _y_transformed)
LOGGER.debug('features_transformed: %s', features_transformed)
y_transformed = _y_transformed if y is not None else None
self._saved_x = x
self._saved_features = features
self._unrolled_token_lengths = unrolled_token_lengths
if isinstance(x, np.ndarray):
x_transformed = np.asarray(x_transformed, dtype='object')
if isinstance(y, np.ndarray):
y_transformed = np.asarray(y_transformed, dtype='object')
if isinstance(features, np.ndarray):
features_transformed = np.asarray(features_transformed, dtype='object')
return x_transformed, y_transformed, features_transformed
def inverse_transform(
self,
x: Optional[T_Batch_Tokens],
y: Optional[T_Batch_Labels],
features: Optional[T_Batch_Features]
):
if x is not None:
x = self._saved_x
if features is not None:
features = self._saved_features
inverse_transformed_y = None
if y is not None:
inverse_transformed_y = []
assert self._saved_x is not None
assert self._saved_features is not None
assert self._unrolled_token_lengths is not None
for x_doc, features_doc, y_doc, unrolled_token_lengths_doc in zip(
self._saved_x, self._saved_features, y, self._unrolled_token_lengths
):
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('unrolled_token_lengths_doc: %s', unrolled_token_lengths_doc)
LOGGER.debug('y_doc: %s', y_doc)
LOGGER.debug('xy_doc: %s', list(zip(x_doc, y_doc)))
index = 0
inverse_transformed_y_doc = []
previous_tag = None
for x_token, features_token, unrolled_token_length in zip(
x_doc, features_doc, unrolled_token_lengths_doc
):
if index >= len(y_doc):
# y_doc may be truncated using max sequence length
break
y_tokens = y_doc[index:index + unrolled_token_length]
if LOGGER.isEnabledFor(logging.DEBUG):
tokens = self.tokenize(features_token[self.unroll_text_feature_index])
LOGGER.debug(
'inverse transforming: indices=[%d:%d], x=%r, f=%r, tokens_y=%r',
index, index + unrolled_token_length,
x_token, features_token, list(zip_longest(tokens, y_tokens))
)
y_token = inverse_transform_token_y(y_tokens, previous_tag=previous_tag)
previous_tag = y_token
inverse_transformed_y_doc.append(y_token)
index += unrolled_token_length
inverse_transformed_y.append(inverse_transformed_y_doc)
if isinstance(y, np.ndarray):
inverse_transformed_y = np.asarray(inverse_transformed_y, dtype='object')
return x, inverse_transformed_y, features
| 0.821868 | 0.254257 |
import logging
import tempfile
import os
from pathlib import Path
from typing import Iterable, IO, List, Optional, Tuple
import numpy as np
from delft.sequenceLabelling.reader import (
_translate_tags_grobid_to_IOB as translate_tags_grobid_to_IOB
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import ClassificationResult
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.io import copy_file
from sciencebeam_trainer_delft.sequence_labelling.config import TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import (
WapitiModel,
WapitiWrapper,
format_feature_line
)
LOGGER = logging.getLogger(__name__)
def translate_tags_IOB_to_grobid(tag: str) -> str:
"""
Convert labels from IOB2 to the ones used by GROBID (expected by the wapiti model)
"""
if tag == 'O':
# outside
return '<other>'
elif tag.startswith('B-'):
# begin
return 'I-' + tag[2:]
elif tag.startswith('I-'):
# inside
return '' + tag[2:]
else:
return tag
def iter_doc_formatted_input_data(
x_doc: np.array, features_doc: np.array) -> Iterable[str]:
for x_token, f_token in zip(x_doc, features_doc):
try:
yield format_feature_line([x_token] + list(f_token))
except TypeError as error:
raise RuntimeError(
'failed to concatenate: x=<%s>, f=<%s>' % (x_token, f_token)
) from error
# blank lines to mark the end of the document
yield ''
yield ''
def iter_formatted_input_data(
x: np.array, features: np.array) -> Iterable[str]:
return (
line + '\n'
for x_doc, f_doc in zip(x, features)
for line in iter_doc_formatted_input_data(x_doc, f_doc)
)
def write_wapiti_input_data(fp: IO, x: np.array, features: np.array):
fp.writelines(iter_formatted_input_data(
x, features
))
def iter_read_tagged_result(fp: IO) -> Iterable[List[Tuple[str, str]]]:
token_and_label_pairs: List[Tuple[str, str]] = []
for line in fp:
LOGGER.debug('line: %r', line)
line = line.rstrip()
if not line:
if token_and_label_pairs:
yield token_and_label_pairs
token_and_label_pairs = []
continue
values = line.replace('\t', ' ').split(' ')
if len(values) < 2:
raise ValueError('should have multiple values, but got: [%s]' % line)
token_and_label_pairs.append((
values[0],
translate_tags_grobid_to_IOB(values[-1])
))
if token_and_label_pairs:
yield token_and_label_pairs
def convert_wapiti_model_result_to_document_tagged_result(
x_doc: List[str],
wapiti_model_result: List[List[str]]) -> List[Tuple[str, str]]:
return [
(
x_token,
translate_tags_grobid_to_IOB(result_token[-1])
)
for x_token, result_token in zip(x_doc, wapiti_model_result)
]
class WapitiModelAdapter:
def __init__(self, wapiti_wrapper: WapitiWrapper, model_file_path: str, model_path: str = None):
self.wapiti_wrapper = wapiti_wrapper
self.model_file_path = model_file_path
self.model_path = model_path
self._wapiti_model: Optional[WapitiModel] = None
@property
def wapiti_model(self) -> WapitiModel:
if self._wapiti_model is not None:
return self._wapiti_model
wapiti_model = self.wapiti_wrapper.load_model(self.model_file_path)
self._wapiti_model = wapiti_model
return wapiti_model
@staticmethod
def load_from(
model_path: str,
download_manager: DownloadManager,
wapiti_binary_path: str = None) -> 'WapitiModelAdapter':
model_file_path = os.path.join(model_path, 'model.wapiti.gz')
local_model_file_path = None
try:
local_model_file_path = download_manager.download_if_url(model_file_path)
except FileNotFoundError:
pass
if not local_model_file_path or not os.path.isfile(str(local_model_file_path)):
model_file_path = os.path.splitext(model_file_path)[0]
local_model_file_path = download_manager.download_if_url(model_file_path)
LOGGER.debug('local_model_file_path: %s', local_model_file_path)
if local_model_file_path.endswith('.gz'):
local_uncompressed_file_path = os.path.splitext(local_model_file_path)[0]
copy_file(local_model_file_path, local_uncompressed_file_path, overwrite=False)
local_model_file_path = local_uncompressed_file_path
return WapitiModelAdapter(
WapitiWrapper(
wapiti_binary_path=wapiti_binary_path
),
model_file_path=local_model_file_path,
model_path=model_path
)
def _get_model_name(self) -> str:
return os.path.basename(os.path.dirname(self.model_file_path))
def iter_tag_using_model(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
# Note: this method doesn't currently seem to work reliable and needs to be investigated
# The evaluation always shows zero.
assert not output_format, 'output_format not supported'
for x_doc, f_doc in zip(x, features):
LOGGER.debug('x_doc=%s, f_doc=%s', x_doc, f_doc)
result = self.wapiti_model.label_features([
[x_token] + list(f_token)
for x_token, f_token in zip(x_doc, f_doc)
])
yield convert_wapiti_model_result_to_document_tagged_result(
x_doc,
result
)
def iter_tag_using_wrapper(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
assert not output_format, 'output_format not supported'
with tempfile.TemporaryDirectory(suffix='wapiti') as temp_dir:
data_path = Path(temp_dir).joinpath('input.data')
output_data_path = Path(temp_dir).joinpath('output.data')
with data_path.open(mode='w') as fp:
write_wapiti_input_data(
fp, x=x, features=features
)
self.wapiti_wrapper.label(
model_path=self.model_file_path,
data_path=str(data_path),
output_data_path=str(output_data_path),
output_only_labels=False
)
with output_data_path.open(mode='r') as output_data_fp:
yield from iter_read_tagged_result(output_data_fp)
def iter_tag(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
return self.iter_tag_using_wrapper(x, features, output_format)
def tag(
self,
x: np.array,
features: np.array,
output_format: str = None) -> List[List[Tuple[str, str]]]:
assert not output_format, 'output_format not supported'
return list(self.iter_tag(x, features))
def eval(self, x_test, y_test, features: np.array = None):
self.eval_single(x_test, y_test, features=features)
@property
def model_summary_props(self) -> dict:
return {
'model_type': 'wapiti'
}
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
tag_result = self.tag(x_test, features)
y_true = [
y_token
for y_doc in y_test
for y_token in y_doc
]
y_pred = [
tag_result_token[-1]
for tag_result_doc in tag_result
for tag_result_token in tag_result_doc
]
return ClassificationResult(
y_pred=y_pred,
y_true=y_true
)
def eval_single(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
classification_result = self.get_evaluation_result(
x_test=x_test,
y_test=y_test,
features=features
)
print(classification_result.get_formatted_report(digits=4))
def iter_doc_formatted_training_data(
x_doc: np.array, y_doc: np.array, features_doc: np.array) -> Iterable[str]:
for x_token, y_token, f_token in zip(x_doc, y_doc, features_doc):
yield format_feature_line([x_token] + f_token + [translate_tags_IOB_to_grobid(y_token)])
# blank lines to mark the end of the document
yield ''
yield ''
def iter_formatted_training_data(
x: np.array, y: np.array, features: np.array) -> Iterable[str]:
return (
line + '\n'
for x_doc, y_doc, f_doc in zip(x, y, features)
for line in iter_doc_formatted_training_data(x_doc, y_doc, f_doc)
)
def write_wapiti_train_data(fp: IO, x: np.array, y: np.array, features: np.array):
fp.writelines(iter_formatted_training_data(
x, y, features
))
class WapitiModelTrainAdapter:
def __init__(
self,
model_name: str,
template_path: str,
temp_model_path: str,
max_epoch: int,
download_manager: DownloadManager,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
self.model_name = model_name
self.template_path = template_path
self.temp_model_path = temp_model_path
self.max_epoch = max_epoch
self.download_manager = download_manager
self.gzip_enabled = gzip_enabled
self.wapiti_binary_path = wapiti_binary_path
self.wapiti_train_args = wapiti_train_args
self._model_adapter: Optional[WapitiModelAdapter] = None
# additional properties to keep "compatibility" with wrapper.Sequence
self.log_dir = None
self.model_path = None
self.training_config = TrainingConfig(initial_epoch=0)
def train(
self,
x_train: np.array,
y_train: np.array,
x_valid: np.array = None,
y_valid: np.array = None,
features_train: np.array = None,
features_valid: np.array = None):
local_template_path = self.download_manager.download_if_url(self.template_path)
LOGGER.info('local_template_path: %s', local_template_path)
if not self.temp_model_path:
self.temp_model_path = '/tmp/model.wapiti'
with tempfile.TemporaryDirectory(suffix='wapiti') as temp_dir:
data_path = Path(temp_dir).joinpath('train.data')
with data_path.open(mode='w') as fp:
write_wapiti_train_data(
fp, x=x_train, y=y_train, features=features_train
)
if x_valid is not None:
write_wapiti_train_data(
fp, x=x_valid, y=y_valid, features=features_valid
)
WapitiWrapper(wapiti_binary_path=self.wapiti_binary_path).train(
data_path=str(data_path),
output_model_path=self.temp_model_path,
template_path=local_template_path,
max_iter=self.max_epoch,
**(self.wapiti_train_args or {})
)
LOGGER.info('wapiti model trained: %s', self.temp_model_path)
def get_model_adapter(self) -> WapitiModelAdapter:
if self._model_adapter is not None:
return self._model_adapter
assert self.temp_model_path, "temp_model_path required"
model_adapter = WapitiModelAdapter.load_from(
os.path.dirname(self.temp_model_path),
download_manager=self.download_manager,
wapiti_binary_path=self.wapiti_binary_path
)
self._model_adapter = model_adapter
return model_adapter
@property
def last_checkpoint_path(self) -> Optional[str]:
return None
@property
def model_summary_props(self) -> dict:
return self.get_model_adapter().model_summary_props
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
return self.get_model_adapter().get_evaluation_result(
x_test, y_test, features=features
)
def eval(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
self.get_model_adapter().eval(
x_test, y_test, features=features
)
def get_model_output_path(self, output_path: str = None) -> str:
assert output_path, "output_path required"
return os.path.join(output_path, self.model_name)
def save(self, output_path: str = None):
model_output_path = self.get_model_output_path(output_path)
assert self.temp_model_path, "temp_model_path required"
if not Path(self.temp_model_path).exists():
raise FileNotFoundError("temp_model_path does not exist: %s" % self.temp_model_path)
model_file_path = os.path.join(model_output_path, 'model.wapiti')
if self.gzip_enabled:
model_file_path += '.gz'
LOGGER.info('saving to %s', model_file_path)
copy_file(self.temp_model_path, model_file_path)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/engines/wapiti_adapters.py
|
wapiti_adapters.py
|
import logging
import tempfile
import os
from pathlib import Path
from typing import Iterable, IO, List, Optional, Tuple
import numpy as np
from delft.sequenceLabelling.reader import (
_translate_tags_grobid_to_IOB as translate_tags_grobid_to_IOB
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import ClassificationResult
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.io import copy_file
from sciencebeam_trainer_delft.sequence_labelling.config import TrainingConfig
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import (
WapitiModel,
WapitiWrapper,
format_feature_line
)
LOGGER = logging.getLogger(__name__)
def translate_tags_IOB_to_grobid(tag: str) -> str:
"""
Convert labels from IOB2 to the ones used by GROBID (expected by the wapiti model)
"""
if tag == 'O':
# outside
return '<other>'
elif tag.startswith('B-'):
# begin
return 'I-' + tag[2:]
elif tag.startswith('I-'):
# inside
return '' + tag[2:]
else:
return tag
def iter_doc_formatted_input_data(
x_doc: np.array, features_doc: np.array) -> Iterable[str]:
for x_token, f_token in zip(x_doc, features_doc):
try:
yield format_feature_line([x_token] + list(f_token))
except TypeError as error:
raise RuntimeError(
'failed to concatenate: x=<%s>, f=<%s>' % (x_token, f_token)
) from error
# blank lines to mark the end of the document
yield ''
yield ''
def iter_formatted_input_data(
x: np.array, features: np.array) -> Iterable[str]:
return (
line + '\n'
for x_doc, f_doc in zip(x, features)
for line in iter_doc_formatted_input_data(x_doc, f_doc)
)
def write_wapiti_input_data(fp: IO, x: np.array, features: np.array):
fp.writelines(iter_formatted_input_data(
x, features
))
def iter_read_tagged_result(fp: IO) -> Iterable[List[Tuple[str, str]]]:
token_and_label_pairs: List[Tuple[str, str]] = []
for line in fp:
LOGGER.debug('line: %r', line)
line = line.rstrip()
if not line:
if token_and_label_pairs:
yield token_and_label_pairs
token_and_label_pairs = []
continue
values = line.replace('\t', ' ').split(' ')
if len(values) < 2:
raise ValueError('should have multiple values, but got: [%s]' % line)
token_and_label_pairs.append((
values[0],
translate_tags_grobid_to_IOB(values[-1])
))
if token_and_label_pairs:
yield token_and_label_pairs
def convert_wapiti_model_result_to_document_tagged_result(
x_doc: List[str],
wapiti_model_result: List[List[str]]) -> List[Tuple[str, str]]:
return [
(
x_token,
translate_tags_grobid_to_IOB(result_token[-1])
)
for x_token, result_token in zip(x_doc, wapiti_model_result)
]
class WapitiModelAdapter:
def __init__(self, wapiti_wrapper: WapitiWrapper, model_file_path: str, model_path: str = None):
self.wapiti_wrapper = wapiti_wrapper
self.model_file_path = model_file_path
self.model_path = model_path
self._wapiti_model: Optional[WapitiModel] = None
@property
def wapiti_model(self) -> WapitiModel:
if self._wapiti_model is not None:
return self._wapiti_model
wapiti_model = self.wapiti_wrapper.load_model(self.model_file_path)
self._wapiti_model = wapiti_model
return wapiti_model
@staticmethod
def load_from(
model_path: str,
download_manager: DownloadManager,
wapiti_binary_path: str = None) -> 'WapitiModelAdapter':
model_file_path = os.path.join(model_path, 'model.wapiti.gz')
local_model_file_path = None
try:
local_model_file_path = download_manager.download_if_url(model_file_path)
except FileNotFoundError:
pass
if not local_model_file_path or not os.path.isfile(str(local_model_file_path)):
model_file_path = os.path.splitext(model_file_path)[0]
local_model_file_path = download_manager.download_if_url(model_file_path)
LOGGER.debug('local_model_file_path: %s', local_model_file_path)
if local_model_file_path.endswith('.gz'):
local_uncompressed_file_path = os.path.splitext(local_model_file_path)[0]
copy_file(local_model_file_path, local_uncompressed_file_path, overwrite=False)
local_model_file_path = local_uncompressed_file_path
return WapitiModelAdapter(
WapitiWrapper(
wapiti_binary_path=wapiti_binary_path
),
model_file_path=local_model_file_path,
model_path=model_path
)
def _get_model_name(self) -> str:
return os.path.basename(os.path.dirname(self.model_file_path))
def iter_tag_using_model(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
# Note: this method doesn't currently seem to work reliable and needs to be investigated
# The evaluation always shows zero.
assert not output_format, 'output_format not supported'
for x_doc, f_doc in zip(x, features):
LOGGER.debug('x_doc=%s, f_doc=%s', x_doc, f_doc)
result = self.wapiti_model.label_features([
[x_token] + list(f_token)
for x_token, f_token in zip(x_doc, f_doc)
])
yield convert_wapiti_model_result_to_document_tagged_result(
x_doc,
result
)
def iter_tag_using_wrapper(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
assert not output_format, 'output_format not supported'
with tempfile.TemporaryDirectory(suffix='wapiti') as temp_dir:
data_path = Path(temp_dir).joinpath('input.data')
output_data_path = Path(temp_dir).joinpath('output.data')
with data_path.open(mode='w') as fp:
write_wapiti_input_data(
fp, x=x, features=features
)
self.wapiti_wrapper.label(
model_path=self.model_file_path,
data_path=str(data_path),
output_data_path=str(output_data_path),
output_only_labels=False
)
with output_data_path.open(mode='r') as output_data_fp:
yield from iter_read_tagged_result(output_data_fp)
def iter_tag(
self,
x: np.array,
features: np.array,
output_format: str = None) -> Iterable[List[Tuple[str, str]]]:
return self.iter_tag_using_wrapper(x, features, output_format)
def tag(
self,
x: np.array,
features: np.array,
output_format: str = None) -> List[List[Tuple[str, str]]]:
assert not output_format, 'output_format not supported'
return list(self.iter_tag(x, features))
def eval(self, x_test, y_test, features: np.array = None):
self.eval_single(x_test, y_test, features=features)
@property
def model_summary_props(self) -> dict:
return {
'model_type': 'wapiti'
}
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
tag_result = self.tag(x_test, features)
y_true = [
y_token
for y_doc in y_test
for y_token in y_doc
]
y_pred = [
tag_result_token[-1]
for tag_result_doc in tag_result
for tag_result_token in tag_result_doc
]
return ClassificationResult(
y_pred=y_pred,
y_true=y_true
)
def eval_single(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
classification_result = self.get_evaluation_result(
x_test=x_test,
y_test=y_test,
features=features
)
print(classification_result.get_formatted_report(digits=4))
def iter_doc_formatted_training_data(
x_doc: np.array, y_doc: np.array, features_doc: np.array) -> Iterable[str]:
for x_token, y_token, f_token in zip(x_doc, y_doc, features_doc):
yield format_feature_line([x_token] + f_token + [translate_tags_IOB_to_grobid(y_token)])
# blank lines to mark the end of the document
yield ''
yield ''
def iter_formatted_training_data(
x: np.array, y: np.array, features: np.array) -> Iterable[str]:
return (
line + '\n'
for x_doc, y_doc, f_doc in zip(x, y, features)
for line in iter_doc_formatted_training_data(x_doc, y_doc, f_doc)
)
def write_wapiti_train_data(fp: IO, x: np.array, y: np.array, features: np.array):
fp.writelines(iter_formatted_training_data(
x, y, features
))
class WapitiModelTrainAdapter:
def __init__(
self,
model_name: str,
template_path: str,
temp_model_path: str,
max_epoch: int,
download_manager: DownloadManager,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
self.model_name = model_name
self.template_path = template_path
self.temp_model_path = temp_model_path
self.max_epoch = max_epoch
self.download_manager = download_manager
self.gzip_enabled = gzip_enabled
self.wapiti_binary_path = wapiti_binary_path
self.wapiti_train_args = wapiti_train_args
self._model_adapter: Optional[WapitiModelAdapter] = None
# additional properties to keep "compatibility" with wrapper.Sequence
self.log_dir = None
self.model_path = None
self.training_config = TrainingConfig(initial_epoch=0)
def train(
self,
x_train: np.array,
y_train: np.array,
x_valid: np.array = None,
y_valid: np.array = None,
features_train: np.array = None,
features_valid: np.array = None):
local_template_path = self.download_manager.download_if_url(self.template_path)
LOGGER.info('local_template_path: %s', local_template_path)
if not self.temp_model_path:
self.temp_model_path = '/tmp/model.wapiti'
with tempfile.TemporaryDirectory(suffix='wapiti') as temp_dir:
data_path = Path(temp_dir).joinpath('train.data')
with data_path.open(mode='w') as fp:
write_wapiti_train_data(
fp, x=x_train, y=y_train, features=features_train
)
if x_valid is not None:
write_wapiti_train_data(
fp, x=x_valid, y=y_valid, features=features_valid
)
WapitiWrapper(wapiti_binary_path=self.wapiti_binary_path).train(
data_path=str(data_path),
output_model_path=self.temp_model_path,
template_path=local_template_path,
max_iter=self.max_epoch,
**(self.wapiti_train_args or {})
)
LOGGER.info('wapiti model trained: %s', self.temp_model_path)
def get_model_adapter(self) -> WapitiModelAdapter:
if self._model_adapter is not None:
return self._model_adapter
assert self.temp_model_path, "temp_model_path required"
model_adapter = WapitiModelAdapter.load_from(
os.path.dirname(self.temp_model_path),
download_manager=self.download_manager,
wapiti_binary_path=self.wapiti_binary_path
)
self._model_adapter = model_adapter
return model_adapter
@property
def last_checkpoint_path(self) -> Optional[str]:
return None
@property
def model_summary_props(self) -> dict:
return self.get_model_adapter().model_summary_props
def get_evaluation_result(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None) -> ClassificationResult:
return self.get_model_adapter().get_evaluation_result(
x_test, y_test, features=features
)
def eval(
self,
x_test: List[List[str]],
y_test: List[List[str]],
features: List[List[List[str]]] = None):
self.get_model_adapter().eval(
x_test, y_test, features=features
)
def get_model_output_path(self, output_path: str = None) -> str:
assert output_path, "output_path required"
return os.path.join(output_path, self.model_name)
def save(self, output_path: str = None):
model_output_path = self.get_model_output_path(output_path)
assert self.temp_model_path, "temp_model_path required"
if not Path(self.temp_model_path).exists():
raise FileNotFoundError("temp_model_path does not exist: %s" % self.temp_model_path)
model_file_path = os.path.join(model_output_path, 'model.wapiti')
if self.gzip_enabled:
model_file_path += '.gz'
LOGGER.info('saving to %s', model_file_path)
copy_file(self.temp_model_path, model_file_path)
| 0.690768 | 0.297062 |
import logging
import threading
import os
import sys
from collections import Counter
from itertools import islice
from multiprocessing import cpu_count
from typing import IO, List, Iterable, Optional, cast
import subprocess
LOGGER = logging.getLogger(__name__)
DEFAULT_STOP_EPSILON_VALUE = '0.00001'
DEFAULT_STOP_WINDOW_SIZE = 20
DEFAULT_INVALID_CHARACTER_PLACEHOLDER = '?'
INVAID_CHARACTER_START_ORD = 0x6EE80
def format_feature_line(feature_line: List[str]) -> str:
return '\t'.join(feature_line)
def replace_invalid_characters(text: str, placeholder: str = DEFAULT_INVALID_CHARACTER_PLACEHOLDER):
return ''.join((
c if ord(c) < INVAID_CHARACTER_START_ORD else placeholder
for c in text
))
def lines_to_log(logger: logging.Logger, level: int, message: str, lines: Iterable[str]):
LOGGER.debug('lines: %s', lines)
for line in lines:
if isinstance(line, bytes):
line = line.decode('utf-8')
line = line.rstrip()
logger.log(level, message, line)
class WapitiModel:
def __init__(self, process: subprocess.Popen):
self.process = process
@property
def process_stdin(self) -> IO:
stdin = self.process.stdin
assert stdin
return stdin
@property
def process_stdout(self) -> IO:
stdout = self.process.stdout
assert stdout
return stdout
def iter_read_lines(self) -> Iterable[str]:
while self.process.poll() is None:
line = self.process_stdout.readline().decode('utf-8').rstrip()
LOGGER.debug('read line: %s', line)
yield line
def iter_label(self, data: str) -> Iterable[str]:
self.process_stdin.write((data + '\n\n\n').encode('utf-8'))
self.process_stdin.flush()
yield from self.iter_read_lines()
def label_lines(self, lines: List[str], clean_input: bool = False) -> List[str]:
LOGGER.debug('lines: %s', lines)
for line in lines + ['', '']:
if clean_input:
cleaned_line = replace_invalid_characters(line, placeholder='?')
else:
cleaned_line = line
try:
LOGGER.debug('writing line: %s', cleaned_line)
LOGGER.debug('line counts: %s', Counter(cleaned_line))
self.process_stdin.write(
(cleaned_line + '\n').encode('utf-8')
)
self.process_stdin.flush()
except BrokenPipeError:
LOGGER.error('failed to write line: %s', [(c, hex(ord(c))) for c in cleaned_line])
raise
self.process_stdin.flush()
labelled_lines = list(islice(self.iter_read_lines(), len(lines) + 1))
LOGGER.debug('labelled_lines: %s', labelled_lines)
return labelled_lines[:-1]
def label_raw_text(self, data: str) -> str:
return '\n'.join(self.label_lines(data.splitlines()))
def label_features(self, features: List[List[str]]) -> List[List[str]]:
lines = [
format_feature_line(feature_line)
for feature_line in features
]
return [
[
token_features[0],
labelled_line.rsplit('\t', maxsplit=1)[-1]
]
for labelled_line, token_features in zip(self.label_lines(lines), features)
]
class WapitiWrapper:
def __init__(self, wapiti_binary_path: str = None):
self.wapiti_binary_path = wapiti_binary_path or 'wapiti'
def check_available(self):
self.run_wapiti(['--version'])
def load_model(
self,
model_path: str,
output_only_labels: bool = True,
stderr_to_log_enabled: bool = True) -> WapitiModel:
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('wapiti model not found: %s' % model_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
command = [self.wapiti_binary_path] + args
LOGGER.debug('running wapiti: %s', command)
process = subprocess.Popen( # pylint: disable=consider-using-with
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=sys.stderr if not stderr_to_log_enabled else subprocess.PIPE
)
process.poll()
if stderr_to_log_enabled:
t = threading.Thread(target=lambda: lines_to_log(
LOGGER, logging.INFO, 'wapiti, stderr: %s',
cast(Iterable[str], process.stderr)
))
t.daemon = True
t.start()
return WapitiModel(process=process)
def run_wapiti(self, args: List[str]):
command = [self.wapiti_binary_path] + args
LOGGER.info('calling wapiti: %s', command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
) as process:
assert process.stdout
with process.stdout:
lines_to_log(
LOGGER,
logging.INFO,
'wapiti: %s',
cast(Iterable[str], process.stdout)
)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode,
command
)
LOGGER.debug('wapiti call succeeded')
def label(
self,
model_path: str,
data_path: str,
output_data_path: str,
output_only_labels: bool = True):
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('model file not found: %s' % model_path)
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
args.append(str(data_path))
args.append(str(output_data_path))
self.run_wapiti(args)
def train(
self,
data_path: str,
output_model_path: str,
template_path: Optional[str] = None,
max_iter: Optional[int] = None,
num_threads: Optional[int] = None,
stop_epsilon_value: Optional[str] = None,
stop_window_size: Optional[int] = None
):
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
if not num_threads:
num_threads = cpu_count()
if not stop_epsilon_value:
stop_epsilon_value = DEFAULT_STOP_EPSILON_VALUE
if not stop_window_size:
stop_window_size = DEFAULT_STOP_WINDOW_SIZE
args = ['train']
if template_path:
if not os.path.isfile(str(template_path)):
raise FileNotFoundError('template file not found: %s' % template_path)
args.append('--pattern')
args.append(str(template_path))
if max_iter:
args.append('--maxiter')
args.append(str(max_iter))
args.append('--nthread')
args.append(str(num_threads))
args.append('--stopeps')
args.append(str(stop_epsilon_value))
args.append('--stopwin')
args.append(str(stop_window_size))
args.append(str(data_path))
args.append(str(output_model_path))
self.run_wapiti(args)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/engines/wapiti.py
|
wapiti.py
|
import logging
import threading
import os
import sys
from collections import Counter
from itertools import islice
from multiprocessing import cpu_count
from typing import IO, List, Iterable, Optional, cast
import subprocess
LOGGER = logging.getLogger(__name__)
DEFAULT_STOP_EPSILON_VALUE = '0.00001'
DEFAULT_STOP_WINDOW_SIZE = 20
DEFAULT_INVALID_CHARACTER_PLACEHOLDER = '?'
INVAID_CHARACTER_START_ORD = 0x6EE80
def format_feature_line(feature_line: List[str]) -> str:
return '\t'.join(feature_line)
def replace_invalid_characters(text: str, placeholder: str = DEFAULT_INVALID_CHARACTER_PLACEHOLDER):
return ''.join((
c if ord(c) < INVAID_CHARACTER_START_ORD else placeholder
for c in text
))
def lines_to_log(logger: logging.Logger, level: int, message: str, lines: Iterable[str]):
LOGGER.debug('lines: %s', lines)
for line in lines:
if isinstance(line, bytes):
line = line.decode('utf-8')
line = line.rstrip()
logger.log(level, message, line)
class WapitiModel:
def __init__(self, process: subprocess.Popen):
self.process = process
@property
def process_stdin(self) -> IO:
stdin = self.process.stdin
assert stdin
return stdin
@property
def process_stdout(self) -> IO:
stdout = self.process.stdout
assert stdout
return stdout
def iter_read_lines(self) -> Iterable[str]:
while self.process.poll() is None:
line = self.process_stdout.readline().decode('utf-8').rstrip()
LOGGER.debug('read line: %s', line)
yield line
def iter_label(self, data: str) -> Iterable[str]:
self.process_stdin.write((data + '\n\n\n').encode('utf-8'))
self.process_stdin.flush()
yield from self.iter_read_lines()
def label_lines(self, lines: List[str], clean_input: bool = False) -> List[str]:
LOGGER.debug('lines: %s', lines)
for line in lines + ['', '']:
if clean_input:
cleaned_line = replace_invalid_characters(line, placeholder='?')
else:
cleaned_line = line
try:
LOGGER.debug('writing line: %s', cleaned_line)
LOGGER.debug('line counts: %s', Counter(cleaned_line))
self.process_stdin.write(
(cleaned_line + '\n').encode('utf-8')
)
self.process_stdin.flush()
except BrokenPipeError:
LOGGER.error('failed to write line: %s', [(c, hex(ord(c))) for c in cleaned_line])
raise
self.process_stdin.flush()
labelled_lines = list(islice(self.iter_read_lines(), len(lines) + 1))
LOGGER.debug('labelled_lines: %s', labelled_lines)
return labelled_lines[:-1]
def label_raw_text(self, data: str) -> str:
return '\n'.join(self.label_lines(data.splitlines()))
def label_features(self, features: List[List[str]]) -> List[List[str]]:
lines = [
format_feature_line(feature_line)
for feature_line in features
]
return [
[
token_features[0],
labelled_line.rsplit('\t', maxsplit=1)[-1]
]
for labelled_line, token_features in zip(self.label_lines(lines), features)
]
class WapitiWrapper:
def __init__(self, wapiti_binary_path: str = None):
self.wapiti_binary_path = wapiti_binary_path or 'wapiti'
def check_available(self):
self.run_wapiti(['--version'])
def load_model(
self,
model_path: str,
output_only_labels: bool = True,
stderr_to_log_enabled: bool = True) -> WapitiModel:
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('wapiti model not found: %s' % model_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
command = [self.wapiti_binary_path] + args
LOGGER.debug('running wapiti: %s', command)
process = subprocess.Popen( # pylint: disable=consider-using-with
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=sys.stderr if not stderr_to_log_enabled else subprocess.PIPE
)
process.poll()
if stderr_to_log_enabled:
t = threading.Thread(target=lambda: lines_to_log(
LOGGER, logging.INFO, 'wapiti, stderr: %s',
cast(Iterable[str], process.stderr)
))
t.daemon = True
t.start()
return WapitiModel(process=process)
def run_wapiti(self, args: List[str]):
command = [self.wapiti_binary_path] + args
LOGGER.info('calling wapiti: %s', command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
) as process:
assert process.stdout
with process.stdout:
lines_to_log(
LOGGER,
logging.INFO,
'wapiti: %s',
cast(Iterable[str], process.stdout)
)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode,
command
)
LOGGER.debug('wapiti call succeeded')
def label(
self,
model_path: str,
data_path: str,
output_data_path: str,
output_only_labels: bool = True):
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('model file not found: %s' % model_path)
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
args.append(str(data_path))
args.append(str(output_data_path))
self.run_wapiti(args)
def train(
self,
data_path: str,
output_model_path: str,
template_path: Optional[str] = None,
max_iter: Optional[int] = None,
num_threads: Optional[int] = None,
stop_epsilon_value: Optional[str] = None,
stop_window_size: Optional[int] = None
):
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
if not num_threads:
num_threads = cpu_count()
if not stop_epsilon_value:
stop_epsilon_value = DEFAULT_STOP_EPSILON_VALUE
if not stop_window_size:
stop_window_size = DEFAULT_STOP_WINDOW_SIZE
args = ['train']
if template_path:
if not os.path.isfile(str(template_path)):
raise FileNotFoundError('template file not found: %s' % template_path)
args.append('--pattern')
args.append(str(template_path))
if max_iter:
args.append('--maxiter')
args.append(str(max_iter))
args.append('--nthread')
args.append(str(num_threads))
args.append('--stopeps')
args.append(str(stop_epsilon_value))
args.append('--stopwin')
args.append(str(stop_window_size))
args.append(str(data_path))
args.append(str(output_model_path))
self.run_wapiti(args)
| 0.557845 | 0.169612 |
import argparse
import logging
from typing import Optional
import requests
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
ClassificationResult
)
LOGGER = logging.getLogger(__name__)
DEFAULT_TRAIN_START_MESSAGE_FORMAT = '\n'.join([
'Model training started',
'model_path: `{model_path}`',
'checkpoints_path: `{checkpoints_path}`',
'resume_train_model_path: `{resume_train_model_path}`',
'initial_epoch: `{initial_epoch}`'
])
DEFAULT_TRAIN_SUCCESS_MESSAGE_FORMAT = '\n'.join([
'Model training complete',
'model_path: `{model_path}`',
'last_checkpoint_path: `{last_checkpoint_path}`'
])
DEFAULT_TRAIN_EVAL_SUCCESS_MESSAGE_FORMAT = '\n'.join([
'Model training complete, f1: `{classification_result.f1:.4f}`',
'model_path: `{model_path}`',
'last_checkpoint_path: `{last_checkpoint_path}`',
'```\n{classification_result.text_formatted_report}\n```'
])
DEFAULT_TRAIN_ERROR_MESSAGE_FORMAT = (
'Model training failed due to: `{error}`\nmodel_path: `{model_path}`'
)
def get_rendered_notification_message(message_format: str, **kwargs):
return message_format.format(**kwargs)
def get_fallback_notification_message(message_format: str, conversion_error: str, args: dict):
return 'failed to format %r due to %s (args: %s)' % (message_format, conversion_error, args)
def safe_rendered_notification_message(message_format: str, **kwargs):
try:
return get_rendered_notification_message(message_format, **kwargs)
except Exception as exc: # pylint: disable=broad-except
LOGGER.warning(
'failed to convert message due to: %s', exc, exc_info=exc
)
return get_fallback_notification_message(message_format, str(exc), kwargs)
class TrainNotificationManager:
def __init__(
self,
notification_url: str,
notification_train_start_message: str,
notification_train_success_message: str,
notification_train_eval_success_message: str,
notification_error_message: str):
self.notification_url = notification_url
self.notification_train_start_message = notification_train_start_message
self.notification_train_success_message = notification_train_success_message
self.notification_train_eval_success_message = notification_train_eval_success_message
self.notification_error_message = notification_error_message
def send_notification(self, message_format: str, **kwargs):
message = safe_rendered_notification_message(message_format, **kwargs)
if not message or not self.notification_url:
LOGGER.info('not sending notification: %r (url: %r)', message, self.notification_url)
return
data = {
'text': message
}
LOGGER.info('sending notification: %r (url: %r)', message, self.notification_url)
requests.post(self.notification_url, json=data)
def notify_error(self, model_path: str, error: str):
self.send_notification(
self.notification_error_message,
model_path=model_path,
error=error
)
def notify_start(
self,
model_path: str,
checkpoints_path: Optional[str],
resume_train_model_path: Optional[str],
initial_epoch: int
):
self.send_notification(
self.notification_train_start_message,
model_path=model_path,
checkpoints_path=checkpoints_path,
resume_train_model_path=resume_train_model_path,
initial_epoch=initial_epoch
)
def notify_success(
self,
model_path: str,
last_checkpoint_path: str = None,
classification_result: ClassificationResult = None):
if classification_result is None:
self.send_notification(
self.notification_train_success_message,
model_path=model_path,
last_checkpoint_path=last_checkpoint_path
)
else:
self.send_notification(
self.notification_train_eval_success_message,
model_path=model_path,
last_checkpoint_path=last_checkpoint_path,
classification_result=classification_result
)
def add_train_notification_arguments(parser: argparse.ArgumentParser):
notification_group = parser.add_argument_group('notification')
notification_group.add_argument(
"--notification-url",
help="A URL to post to on success error (e.g. a Slack Webhook URL)"
)
notification_group.add_argument(
"--notification-train-start-message",
default=DEFAULT_TRAIN_START_MESSAGE_FORMAT,
help="Model training start notification message"
)
notification_group.add_argument(
"--notification-train-success-message",
default=DEFAULT_TRAIN_SUCCESS_MESSAGE_FORMAT,
help="Model training success notification message"
)
notification_group.add_argument(
"--notification-train-eval-success-message",
default=DEFAULT_TRAIN_EVAL_SUCCESS_MESSAGE_FORMAT,
help="Model training and evaluation success notification message"
)
notification_group.add_argument(
"--notification-error-message",
default=DEFAULT_TRAIN_ERROR_MESSAGE_FORMAT,
help="Model training failed notification message"
)
def get_train_notification_manager(args: argparse.Namespace) -> TrainNotificationManager:
return TrainNotificationManager(
notification_url=args.notification_url,
notification_train_start_message=args.notification_train_start_message,
notification_train_success_message=args.notification_train_success_message,
notification_train_eval_success_message=args.notification_train_eval_success_message,
notification_error_message=args.notification_error_message
)
def notify_train_start(
train_notification_manager: Optional[TrainNotificationManager] = None,
**kwargs
):
if train_notification_manager is not None:
train_notification_manager.notify_start(**kwargs)
def notify_train_success(
train_notification_manager: TrainNotificationManager = None,
**kwargs):
if train_notification_manager is not None:
train_notification_manager.notify_success(**kwargs)
def notify_train_error(
train_notification_manager: TrainNotificationManager = None,
**kwargs):
if train_notification_manager is not None:
train_notification_manager.notify_error(**kwargs)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/utils/train_notify.py
|
train_notify.py
|
import argparse
import logging
from typing import Optional
import requests
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
ClassificationResult
)
LOGGER = logging.getLogger(__name__)
DEFAULT_TRAIN_START_MESSAGE_FORMAT = '\n'.join([
'Model training started',
'model_path: `{model_path}`',
'checkpoints_path: `{checkpoints_path}`',
'resume_train_model_path: `{resume_train_model_path}`',
'initial_epoch: `{initial_epoch}`'
])
DEFAULT_TRAIN_SUCCESS_MESSAGE_FORMAT = '\n'.join([
'Model training complete',
'model_path: `{model_path}`',
'last_checkpoint_path: `{last_checkpoint_path}`'
])
DEFAULT_TRAIN_EVAL_SUCCESS_MESSAGE_FORMAT = '\n'.join([
'Model training complete, f1: `{classification_result.f1:.4f}`',
'model_path: `{model_path}`',
'last_checkpoint_path: `{last_checkpoint_path}`',
'```\n{classification_result.text_formatted_report}\n```'
])
DEFAULT_TRAIN_ERROR_MESSAGE_FORMAT = (
'Model training failed due to: `{error}`\nmodel_path: `{model_path}`'
)
def get_rendered_notification_message(message_format: str, **kwargs):
return message_format.format(**kwargs)
def get_fallback_notification_message(message_format: str, conversion_error: str, args: dict):
return 'failed to format %r due to %s (args: %s)' % (message_format, conversion_error, args)
def safe_rendered_notification_message(message_format: str, **kwargs):
try:
return get_rendered_notification_message(message_format, **kwargs)
except Exception as exc: # pylint: disable=broad-except
LOGGER.warning(
'failed to convert message due to: %s', exc, exc_info=exc
)
return get_fallback_notification_message(message_format, str(exc), kwargs)
class TrainNotificationManager:
def __init__(
self,
notification_url: str,
notification_train_start_message: str,
notification_train_success_message: str,
notification_train_eval_success_message: str,
notification_error_message: str):
self.notification_url = notification_url
self.notification_train_start_message = notification_train_start_message
self.notification_train_success_message = notification_train_success_message
self.notification_train_eval_success_message = notification_train_eval_success_message
self.notification_error_message = notification_error_message
def send_notification(self, message_format: str, **kwargs):
message = safe_rendered_notification_message(message_format, **kwargs)
if not message or not self.notification_url:
LOGGER.info('not sending notification: %r (url: %r)', message, self.notification_url)
return
data = {
'text': message
}
LOGGER.info('sending notification: %r (url: %r)', message, self.notification_url)
requests.post(self.notification_url, json=data)
def notify_error(self, model_path: str, error: str):
self.send_notification(
self.notification_error_message,
model_path=model_path,
error=error
)
def notify_start(
self,
model_path: str,
checkpoints_path: Optional[str],
resume_train_model_path: Optional[str],
initial_epoch: int
):
self.send_notification(
self.notification_train_start_message,
model_path=model_path,
checkpoints_path=checkpoints_path,
resume_train_model_path=resume_train_model_path,
initial_epoch=initial_epoch
)
def notify_success(
self,
model_path: str,
last_checkpoint_path: str = None,
classification_result: ClassificationResult = None):
if classification_result is None:
self.send_notification(
self.notification_train_success_message,
model_path=model_path,
last_checkpoint_path=last_checkpoint_path
)
else:
self.send_notification(
self.notification_train_eval_success_message,
model_path=model_path,
last_checkpoint_path=last_checkpoint_path,
classification_result=classification_result
)
def add_train_notification_arguments(parser: argparse.ArgumentParser):
notification_group = parser.add_argument_group('notification')
notification_group.add_argument(
"--notification-url",
help="A URL to post to on success error (e.g. a Slack Webhook URL)"
)
notification_group.add_argument(
"--notification-train-start-message",
default=DEFAULT_TRAIN_START_MESSAGE_FORMAT,
help="Model training start notification message"
)
notification_group.add_argument(
"--notification-train-success-message",
default=DEFAULT_TRAIN_SUCCESS_MESSAGE_FORMAT,
help="Model training success notification message"
)
notification_group.add_argument(
"--notification-train-eval-success-message",
default=DEFAULT_TRAIN_EVAL_SUCCESS_MESSAGE_FORMAT,
help="Model training and evaluation success notification message"
)
notification_group.add_argument(
"--notification-error-message",
default=DEFAULT_TRAIN_ERROR_MESSAGE_FORMAT,
help="Model training failed notification message"
)
def get_train_notification_manager(args: argparse.Namespace) -> TrainNotificationManager:
return TrainNotificationManager(
notification_url=args.notification_url,
notification_train_start_message=args.notification_train_start_message,
notification_train_success_message=args.notification_train_success_message,
notification_train_eval_success_message=args.notification_train_eval_success_message,
notification_error_message=args.notification_error_message
)
def notify_train_start(
train_notification_manager: Optional[TrainNotificationManager] = None,
**kwargs
):
if train_notification_manager is not None:
train_notification_manager.notify_start(**kwargs)
def notify_train_success(
train_notification_manager: TrainNotificationManager = None,
**kwargs):
if train_notification_manager is not None:
train_notification_manager.notify_success(**kwargs)
def notify_train_error(
train_notification_manager: TrainNotificationManager = None,
**kwargs):
if train_notification_manager is not None:
train_notification_manager.notify_error(**kwargs)
| 0.804021 | 0.310028 |
import logging
from pathlib import Path
from typing import List, Optional, NamedTuple, Union
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints import (
get_checkpoints_json,
get_checkpoint_meta
)
LOGGER = logging.getLogger(__name__)
class CheckPoint(NamedTuple):
path: str
epoch: int
meta: dict
def get_sorted_checkpoint_json_list(checkpoints_json: dict) -> List[dict]:
return sorted(
checkpoints_json.get('checkpoints', []),
key=lambda checkpoint: checkpoint['path']
)
def get_checkpoint_meta_or_none(path: str) -> Optional[dict]:
try:
return get_checkpoint_meta(path)
except FileNotFoundError:
LOGGER.info('meta not found for: %r', path)
return None
def get_checkpoint_for_json(checkpoint_json: Optional[dict]) -> Optional[CheckPoint]:
if not checkpoint_json:
return None
path = checkpoint_json.get('path')
assert path
epoch = checkpoint_json.get('epoch')
assert epoch
meta = get_checkpoint_meta_or_none(path) or {}
return CheckPoint(path=path, epoch=epoch, meta=meta)
def get_last_or_none(a_list: List[T]) -> Optional[T]:
try:
return a_list[-1]
except IndexError:
return None
class CheckPoints:
def __init__(self, log_dir: Union[str, Path]):
assert log_dir
self.log_dir = str(log_dir)
self._checkpoints_json: Optional[dict] = None
def _load_checkpoints_json(self) -> dict:
try:
return get_checkpoints_json(
self.log_dir
)
except FileNotFoundError:
return {}
@property
def checkpoints_json(self) -> dict:
if self._checkpoints_json is None:
self._checkpoints_json = self._load_checkpoints_json()
return self._checkpoints_json
@property
def latest_checkpoint(self) -> Optional[CheckPoint]:
return get_checkpoint_for_json(
get_last_or_none(
get_sorted_checkpoint_json_list(self.checkpoints_json)
)
)
@property
def latest_checkpoint_url(self) -> Optional[str]:
latest_checkpoint = self.latest_checkpoint
return latest_checkpoint.path if latest_checkpoint else None
class ResumeTrainModelParams(NamedTuple):
model_path: str
initial_epoch: int
initial_meta: dict
def get_resume_train_model_params(
log_dir: Optional[str],
auto_resume: bool = True,
resume_train_model_path: Optional[str] = None,
initial_epoch: Optional[int] = None
) -> Optional[ResumeTrainModelParams]:
if auto_resume and log_dir:
latest_checkpoint = CheckPoints(log_dir=log_dir).latest_checkpoint
if latest_checkpoint:
LOGGER.info('auto resuming using latest checkpoint: %r', latest_checkpoint)
return ResumeTrainModelParams(
model_path=latest_checkpoint.path,
initial_epoch=latest_checkpoint.epoch,
initial_meta=latest_checkpoint.meta
)
if resume_train_model_path:
LOGGER.info('using passed in resume train model path: %r', resume_train_model_path)
return ResumeTrainModelParams(
model_path=resume_train_model_path,
initial_epoch=initial_epoch or 0,
initial_meta={}
)
return None
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/utils/checkpoints.py
|
checkpoints.py
|
import logging
from pathlib import Path
from typing import List, Optional, NamedTuple, Union
from sciencebeam_trainer_delft.utils.typing import T
from sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints import (
get_checkpoints_json,
get_checkpoint_meta
)
LOGGER = logging.getLogger(__name__)
class CheckPoint(NamedTuple):
path: str
epoch: int
meta: dict
def get_sorted_checkpoint_json_list(checkpoints_json: dict) -> List[dict]:
return sorted(
checkpoints_json.get('checkpoints', []),
key=lambda checkpoint: checkpoint['path']
)
def get_checkpoint_meta_or_none(path: str) -> Optional[dict]:
try:
return get_checkpoint_meta(path)
except FileNotFoundError:
LOGGER.info('meta not found for: %r', path)
return None
def get_checkpoint_for_json(checkpoint_json: Optional[dict]) -> Optional[CheckPoint]:
if not checkpoint_json:
return None
path = checkpoint_json.get('path')
assert path
epoch = checkpoint_json.get('epoch')
assert epoch
meta = get_checkpoint_meta_or_none(path) or {}
return CheckPoint(path=path, epoch=epoch, meta=meta)
def get_last_or_none(a_list: List[T]) -> Optional[T]:
try:
return a_list[-1]
except IndexError:
return None
class CheckPoints:
def __init__(self, log_dir: Union[str, Path]):
assert log_dir
self.log_dir = str(log_dir)
self._checkpoints_json: Optional[dict] = None
def _load_checkpoints_json(self) -> dict:
try:
return get_checkpoints_json(
self.log_dir
)
except FileNotFoundError:
return {}
@property
def checkpoints_json(self) -> dict:
if self._checkpoints_json is None:
self._checkpoints_json = self._load_checkpoints_json()
return self._checkpoints_json
@property
def latest_checkpoint(self) -> Optional[CheckPoint]:
return get_checkpoint_for_json(
get_last_or_none(
get_sorted_checkpoint_json_list(self.checkpoints_json)
)
)
@property
def latest_checkpoint_url(self) -> Optional[str]:
latest_checkpoint = self.latest_checkpoint
return latest_checkpoint.path if latest_checkpoint else None
class ResumeTrainModelParams(NamedTuple):
model_path: str
initial_epoch: int
initial_meta: dict
def get_resume_train_model_params(
log_dir: Optional[str],
auto_resume: bool = True,
resume_train_model_path: Optional[str] = None,
initial_epoch: Optional[int] = None
) -> Optional[ResumeTrainModelParams]:
if auto_resume and log_dir:
latest_checkpoint = CheckPoints(log_dir=log_dir).latest_checkpoint
if latest_checkpoint:
LOGGER.info('auto resuming using latest checkpoint: %r', latest_checkpoint)
return ResumeTrainModelParams(
model_path=latest_checkpoint.path,
initial_epoch=latest_checkpoint.epoch,
initial_meta=latest_checkpoint.meta
)
if resume_train_model_path:
LOGGER.info('using passed in resume train model path: %r', resume_train_model_path)
return ResumeTrainModelParams(
model_path=resume_train_model_path,
initial_epoch=initial_epoch or 0,
initial_meta={}
)
return None
| 0.898133 | 0.27282 |
import argparse
import logging
import os
import pickle
from pathlib import Path
from shutil import rmtree
from typing import Dict, List
from sciencebeam_trainer_delft.utils.misc import parse_dict, merge_dicts
from sciencebeam_trainer_delft.utils.io import (
get_compression_wrapper,
FileContainer,
open_file_container
)
from sciencebeam_trainer_delft.utils.cli import (
add_default_arguments,
process_default_args,
initialize_and_call_main
)
LOGGER = logging.getLogger(__name__)
SOURCE_URL_META_FILENAME = '.source-url'
def get_source_url_meta_file_path(target_directory: str) -> Path:
return Path(target_directory, SOURCE_URL_META_FILENAME)
def copy_file_container_with_source_meta(
file_container: FileContainer,
target_directory: str):
files = file_container.list_files()
LOGGER.debug('files: %s', files)
if not files:
raise FileNotFoundError('no files found in %s' % file_container)
if os.path.exists(target_directory):
rmtree(target_directory)
os.makedirs(target_directory, exist_ok=True)
target_filepath_list = []
for file_ref in files:
relative_filename = file_ref.basename
relative_output_filename = get_compression_wrapper(
relative_filename
).strip_compression_filename_ext(relative_filename)
# source_filepath = os.path.join(source_url, relative_filename)
target_filepath = os.path.join(target_directory, relative_output_filename)
target_filepath_list.append(target_filepath)
LOGGER.debug('copying %s to %s', file_ref, target_filepath)
file_ref.copy_to(target_filepath)
return target_filepath_list
def copy_directory_with_source_meta(source_url: str, target_directory: str, force: bool = False):
LOGGER.debug('source_url: %s, target_directory: %s', source_url, target_directory)
source_url_meta_file = get_source_url_meta_file_path(target_directory)
current_source_url = (
source_url_meta_file.read_text().strip()
if source_url_meta_file.exists()
else None
)
if not force and current_source_url == source_url:
LOGGER.debug(
'current source_url of %s already (skipping): %s',
target_directory, current_source_url
)
return []
with open_file_container(source_url) as file_container:
result = copy_file_container_with_source_meta(
file_container,
target_directory
)
LOGGER.debug('setting %s to %s', source_url_meta_file, source_url)
source_url_meta_file.write_text(source_url)
return result
def validate_pickle_file(pickle_file: str):
with open(pickle_file, 'rb') as fp:
pickle.load(fp)
LOGGER.info('validated pickle file: %s', pickle_file)
def validate_pickle_files(pickle_files: List[str]):
for pickle_file in pickle_files:
validate_pickle_file(pickle_file)
def is_pickle_file(filename: str) -> bool:
return filename.endswith('.pkl')
def filter_pickle_files(filenames: List[str]) -> List[str]:
return [filename for filename in filenames if is_pickle_file(filename)]
def install_model(
model_base_path: str, model_name: str, model_source_url: str,
force: bool = False, validate_pickles: bool = False):
LOGGER.debug(
'model_base_path: %s, model_name: %s, model_source_url: %s',
model_base_path, model_name, model_source_url
)
target_directory = os.path.join(model_base_path, model_name)
target_files = copy_directory_with_source_meta(
model_source_url, target_directory, force=force
)
if validate_pickles:
validate_pickle_files(filter_pickle_files(target_files))
LOGGER.info('copied model %s to %s (%s)', model_source_url, target_directory, target_files)
def install_models(
model_base_path: str, model_source_dict: Dict[str, str],
force: bool = False, validate_pickles: bool = False):
LOGGER.debug('model_base_path: %s, model_source_dict: %s', model_base_path, model_source_dict)
for model_name, model_source_url in model_source_dict.items():
install_model(
model_base_path, model_name, model_source_url,
force=force, validate_pickles=validate_pickles
)
def parse_model_source_expr(model_source_expr: str) -> Dict[str, str]:
LOGGER.debug('model_source_expr: %s', model_source_expr)
return parse_dict(model_source_expr, delimiter='|')
def parse_args(argv: List[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Install model(s)"
)
parser.add_argument(
"--model-base-path",
required=True,
help=(
"The base path for the local models. It will install the models to a"
" sub-directory using the model name."
)
)
parser.add_argument(
"--install",
nargs='+',
required=True,
type=parse_model_source_expr,
help=(
"The models to install, in the form: <model name>=<url>"
"\n (multiple models can be specified by using the pipe ('|') separator"
" or using the --install parameter multiple times"
)
)
parser.add_argument(
"--force",
action="store_true",
help="Force install model even if already installed from the source url"
)
parser.add_argument(
"--validate-pickles",
action="store_true",
help="Validate .pkl files after copying (e.g. package structure may have changed)"
)
add_default_arguments(parser)
return parser.parse_args(argv)
def run(args: argparse.Namespace):
install_models(
model_base_path=args.model_base_path,
model_source_dict=merge_dicts(args.install),
force=args.force,
validate_pickles=args.validate_pickles
)
def main(argv: List[str] = None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == "__main__":
initialize_and_call_main(main)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/install_models.py
|
install_models.py
|
import argparse
import logging
import os
import pickle
from pathlib import Path
from shutil import rmtree
from typing import Dict, List
from sciencebeam_trainer_delft.utils.misc import parse_dict, merge_dicts
from sciencebeam_trainer_delft.utils.io import (
get_compression_wrapper,
FileContainer,
open_file_container
)
from sciencebeam_trainer_delft.utils.cli import (
add_default_arguments,
process_default_args,
initialize_and_call_main
)
LOGGER = logging.getLogger(__name__)
SOURCE_URL_META_FILENAME = '.source-url'
def get_source_url_meta_file_path(target_directory: str) -> Path:
return Path(target_directory, SOURCE_URL_META_FILENAME)
def copy_file_container_with_source_meta(
file_container: FileContainer,
target_directory: str):
files = file_container.list_files()
LOGGER.debug('files: %s', files)
if not files:
raise FileNotFoundError('no files found in %s' % file_container)
if os.path.exists(target_directory):
rmtree(target_directory)
os.makedirs(target_directory, exist_ok=True)
target_filepath_list = []
for file_ref in files:
relative_filename = file_ref.basename
relative_output_filename = get_compression_wrapper(
relative_filename
).strip_compression_filename_ext(relative_filename)
# source_filepath = os.path.join(source_url, relative_filename)
target_filepath = os.path.join(target_directory, relative_output_filename)
target_filepath_list.append(target_filepath)
LOGGER.debug('copying %s to %s', file_ref, target_filepath)
file_ref.copy_to(target_filepath)
return target_filepath_list
def copy_directory_with_source_meta(source_url: str, target_directory: str, force: bool = False):
LOGGER.debug('source_url: %s, target_directory: %s', source_url, target_directory)
source_url_meta_file = get_source_url_meta_file_path(target_directory)
current_source_url = (
source_url_meta_file.read_text().strip()
if source_url_meta_file.exists()
else None
)
if not force and current_source_url == source_url:
LOGGER.debug(
'current source_url of %s already (skipping): %s',
target_directory, current_source_url
)
return []
with open_file_container(source_url) as file_container:
result = copy_file_container_with_source_meta(
file_container,
target_directory
)
LOGGER.debug('setting %s to %s', source_url_meta_file, source_url)
source_url_meta_file.write_text(source_url)
return result
def validate_pickle_file(pickle_file: str):
with open(pickle_file, 'rb') as fp:
pickle.load(fp)
LOGGER.info('validated pickle file: %s', pickle_file)
def validate_pickle_files(pickle_files: List[str]):
for pickle_file in pickle_files:
validate_pickle_file(pickle_file)
def is_pickle_file(filename: str) -> bool:
return filename.endswith('.pkl')
def filter_pickle_files(filenames: List[str]) -> List[str]:
return [filename for filename in filenames if is_pickle_file(filename)]
def install_model(
model_base_path: str, model_name: str, model_source_url: str,
force: bool = False, validate_pickles: bool = False):
LOGGER.debug(
'model_base_path: %s, model_name: %s, model_source_url: %s',
model_base_path, model_name, model_source_url
)
target_directory = os.path.join(model_base_path, model_name)
target_files = copy_directory_with_source_meta(
model_source_url, target_directory, force=force
)
if validate_pickles:
validate_pickle_files(filter_pickle_files(target_files))
LOGGER.info('copied model %s to %s (%s)', model_source_url, target_directory, target_files)
def install_models(
model_base_path: str, model_source_dict: Dict[str, str],
force: bool = False, validate_pickles: bool = False):
LOGGER.debug('model_base_path: %s, model_source_dict: %s', model_base_path, model_source_dict)
for model_name, model_source_url in model_source_dict.items():
install_model(
model_base_path, model_name, model_source_url,
force=force, validate_pickles=validate_pickles
)
def parse_model_source_expr(model_source_expr: str) -> Dict[str, str]:
LOGGER.debug('model_source_expr: %s', model_source_expr)
return parse_dict(model_source_expr, delimiter='|')
def parse_args(argv: List[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Install model(s)"
)
parser.add_argument(
"--model-base-path",
required=True,
help=(
"The base path for the local models. It will install the models to a"
" sub-directory using the model name."
)
)
parser.add_argument(
"--install",
nargs='+',
required=True,
type=parse_model_source_expr,
help=(
"The models to install, in the form: <model name>=<url>"
"\n (multiple models can be specified by using the pipe ('|') separator"
" or using the --install parameter multiple times"
)
)
parser.add_argument(
"--force",
action="store_true",
help="Force install model even if already installed from the source url"
)
parser.add_argument(
"--validate-pickles",
action="store_true",
help="Validate .pkl files after copying (e.g. package structure may have changed)"
)
add_default_arguments(parser)
return parser.parse_args(argv)
def run(args: argparse.Namespace):
install_models(
model_base_path=args.model_base_path,
model_source_dict=merge_dicts(args.install),
force=args.force,
validate_pickles=args.validate_pickles
)
def main(argv: List[str] = None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == "__main__":
initialize_and_call_main(main)
| 0.560734 | 0.101634 |
import argparse
import concurrent.futures
import logging
import json
import os
from collections import OrderedDict
from typing import Dict, List, Optional
from tqdm.auto import tqdm
from sciencebeam_trainer_delft.utils.io import open_file
from sciencebeam_trainer_delft.utils.cli import (
add_default_arguments,
process_default_args,
initialize_and_call_main
)
LOGGER = logging.getLogger(__name__)
class OutputFormats:
TEXT = 'text'
JSON = 'json'
ALL_OUTPUT_FORMATS = [OutputFormats.TEXT, OutputFormats.JSON]
def read_json(path: str) -> dict:
with open_file(path, mode='r') as fp:
return json.load(fp)
def get_checkpoints_json(checkpoint_path: str) -> dict:
return read_json(os.path.join(checkpoint_path, 'checkpoints.json'))
def get_checkpoint_urls(checkpoints_json: dict) -> List[str]:
return sorted({
checkpoint['path']
for checkpoint in checkpoints_json.get('checkpoints', {})
})
def get_last_checkpoint_url(checkpoints_json: dict) -> Optional[str]:
checkpoint_urls = get_checkpoint_urls(checkpoints_json)
return checkpoint_urls[-1] if checkpoint_urls else None
def get_checkpoint_meta(checkpoint_path: str) -> dict:
return read_json(os.path.join(checkpoint_path, 'meta.json'))
def get_checkpoint_meta_map(
checkpoint_urls: List[str],
max_workers: int) -> Dict[str, dict]:
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(
lambda path: (path, get_checkpoint_meta(path)),
checkpoint_urls
)
return dict(tqdm(results, total=len(checkpoint_urls)))
def get_checkpoint_meta_map_sorted_by_f1(checkpoint_meta_map: Dict[str, dict]):
return OrderedDict(sorted(
checkpoint_meta_map.items(),
key=lambda item: item[1].get('f1') or 0
))
def get_checkpoint_summary_list(
checkpoint_meta_map_sorted_by_f1: Dict[str, dict],
last_checkpoint: dict,
limit: int
) -> List[dict]:
last_checkpoint_path = last_checkpoint.get('path')
best_meta = list(checkpoint_meta_map_sorted_by_f1.values())[-1]
best_f1 = best_meta.get('f1')
return [
{
**meta,
'path': path,
'is_last': path == last_checkpoint_path,
'is_best': meta.get('f1') == best_f1
}
for path, meta in (
list(checkpoint_meta_map_sorted_by_f1.items())[-limit:]
)
]
def format_checkpoint_summary_as_text(
checkpoint_summary_list: List[dict]) -> str:
return 'best checkpoints:\n%s' % '\n\n'.join([
'%05d: %s (%s)%s%s' % (
int(checkpoint_summary.get('epoch', 0)),
checkpoint_summary.get('f1'),
checkpoint_summary.get('path'),
' (last)' if checkpoint_summary.get('is_last') else '',
' (best)' if checkpoint_summary.get('is_best') else ''
)
for checkpoint_summary in checkpoint_summary_list
])
def format_checkpoint_summary(
checkpoint_summary_list: List[dict],
output_format: str) -> str:
if output_format == OutputFormats.TEXT:
return format_checkpoint_summary_as_text(
checkpoint_summary_list
)
if output_format == OutputFormats.JSON:
return json.dumps(checkpoint_summary_list, indent=2)
raise ValueError('unsupported output format: %s' % output_format)
def checkpoint_summary(
checkpoint_path: str,
max_workers: int,
limit: int,
output_format: str):
LOGGER.info('checkpoint_path: %s', checkpoint_path)
checkpoints_json = get_checkpoints_json(checkpoint_path)
LOGGER.debug('checkpoints_json: %s', checkpoints_json)
checkpoint_urls = get_checkpoint_urls(checkpoints_json)
LOGGER.debug('checkpoint_urls: %s', checkpoint_urls)
last_checkpoint = checkpoints_json.get('last_checkpoint')
if last_checkpoint:
LOGGER.info('last checkpoint: %s', last_checkpoint)
if not checkpoint_urls:
raise RuntimeError('no checkpoints found')
checkpoint_meta_map = get_checkpoint_meta_map(
checkpoint_urls,
max_workers=max_workers
)
LOGGER.debug('checkpoint_meta_map: %s', checkpoint_meta_map)
checkpoint_meta_map_sorted_by_f1 = get_checkpoint_meta_map_sorted_by_f1(
checkpoint_meta_map
)
checkpoint_summary_list = get_checkpoint_summary_list(
checkpoint_meta_map_sorted_by_f1=checkpoint_meta_map_sorted_by_f1,
last_checkpoint=last_checkpoint,
limit=limit,
)
formatted_summary = format_checkpoint_summary(
checkpoint_summary_list=checkpoint_summary_list,
output_format=output_format
)
print(formatted_summary)
def parse_args(argv: List[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Checkpoints related summary"
)
parser.add_argument(
"--output-format",
choices=ALL_OUTPUT_FORMATS,
default=OutputFormats.TEXT,
help="The desired output format."
)
parser.add_argument(
"--checkpoint",
required=True,
help="The base path of the checkpoints."
)
parser.add_argument(
"--max-workers",
type=int,
default=20,
help="Maximum number of workers for IO requests"
)
parser.add_argument(
"--limit",
type=int,
default=5,
help="Maximum number results to show"
)
add_default_arguments(parser)
return parser.parse_args(argv)
def run(args: argparse.Namespace):
checkpoint_summary(
checkpoint_path=args.checkpoint,
max_workers=args.max_workers,
limit=args.limit,
output_format=args.output_format
)
def main(argv: List[str] = None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == "__main__":
initialize_and_call_main(main)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/checkpoints.py
|
checkpoints.py
|
import argparse
import concurrent.futures
import logging
import json
import os
from collections import OrderedDict
from typing import Dict, List, Optional
from tqdm.auto import tqdm
from sciencebeam_trainer_delft.utils.io import open_file
from sciencebeam_trainer_delft.utils.cli import (
add_default_arguments,
process_default_args,
initialize_and_call_main
)
LOGGER = logging.getLogger(__name__)
class OutputFormats:
TEXT = 'text'
JSON = 'json'
ALL_OUTPUT_FORMATS = [OutputFormats.TEXT, OutputFormats.JSON]
def read_json(path: str) -> dict:
with open_file(path, mode='r') as fp:
return json.load(fp)
def get_checkpoints_json(checkpoint_path: str) -> dict:
return read_json(os.path.join(checkpoint_path, 'checkpoints.json'))
def get_checkpoint_urls(checkpoints_json: dict) -> List[str]:
return sorted({
checkpoint['path']
for checkpoint in checkpoints_json.get('checkpoints', {})
})
def get_last_checkpoint_url(checkpoints_json: dict) -> Optional[str]:
checkpoint_urls = get_checkpoint_urls(checkpoints_json)
return checkpoint_urls[-1] if checkpoint_urls else None
def get_checkpoint_meta(checkpoint_path: str) -> dict:
return read_json(os.path.join(checkpoint_path, 'meta.json'))
def get_checkpoint_meta_map(
checkpoint_urls: List[str],
max_workers: int) -> Dict[str, dict]:
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(
lambda path: (path, get_checkpoint_meta(path)),
checkpoint_urls
)
return dict(tqdm(results, total=len(checkpoint_urls)))
def get_checkpoint_meta_map_sorted_by_f1(checkpoint_meta_map: Dict[str, dict]):
return OrderedDict(sorted(
checkpoint_meta_map.items(),
key=lambda item: item[1].get('f1') or 0
))
def get_checkpoint_summary_list(
checkpoint_meta_map_sorted_by_f1: Dict[str, dict],
last_checkpoint: dict,
limit: int
) -> List[dict]:
last_checkpoint_path = last_checkpoint.get('path')
best_meta = list(checkpoint_meta_map_sorted_by_f1.values())[-1]
best_f1 = best_meta.get('f1')
return [
{
**meta,
'path': path,
'is_last': path == last_checkpoint_path,
'is_best': meta.get('f1') == best_f1
}
for path, meta in (
list(checkpoint_meta_map_sorted_by_f1.items())[-limit:]
)
]
def format_checkpoint_summary_as_text(
checkpoint_summary_list: List[dict]) -> str:
return 'best checkpoints:\n%s' % '\n\n'.join([
'%05d: %s (%s)%s%s' % (
int(checkpoint_summary.get('epoch', 0)),
checkpoint_summary.get('f1'),
checkpoint_summary.get('path'),
' (last)' if checkpoint_summary.get('is_last') else '',
' (best)' if checkpoint_summary.get('is_best') else ''
)
for checkpoint_summary in checkpoint_summary_list
])
def format_checkpoint_summary(
checkpoint_summary_list: List[dict],
output_format: str) -> str:
if output_format == OutputFormats.TEXT:
return format_checkpoint_summary_as_text(
checkpoint_summary_list
)
if output_format == OutputFormats.JSON:
return json.dumps(checkpoint_summary_list, indent=2)
raise ValueError('unsupported output format: %s' % output_format)
def checkpoint_summary(
checkpoint_path: str,
max_workers: int,
limit: int,
output_format: str):
LOGGER.info('checkpoint_path: %s', checkpoint_path)
checkpoints_json = get_checkpoints_json(checkpoint_path)
LOGGER.debug('checkpoints_json: %s', checkpoints_json)
checkpoint_urls = get_checkpoint_urls(checkpoints_json)
LOGGER.debug('checkpoint_urls: %s', checkpoint_urls)
last_checkpoint = checkpoints_json.get('last_checkpoint')
if last_checkpoint:
LOGGER.info('last checkpoint: %s', last_checkpoint)
if not checkpoint_urls:
raise RuntimeError('no checkpoints found')
checkpoint_meta_map = get_checkpoint_meta_map(
checkpoint_urls,
max_workers=max_workers
)
LOGGER.debug('checkpoint_meta_map: %s', checkpoint_meta_map)
checkpoint_meta_map_sorted_by_f1 = get_checkpoint_meta_map_sorted_by_f1(
checkpoint_meta_map
)
checkpoint_summary_list = get_checkpoint_summary_list(
checkpoint_meta_map_sorted_by_f1=checkpoint_meta_map_sorted_by_f1,
last_checkpoint=last_checkpoint,
limit=limit,
)
formatted_summary = format_checkpoint_summary(
checkpoint_summary_list=checkpoint_summary_list,
output_format=output_format
)
print(formatted_summary)
def parse_args(argv: List[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Checkpoints related summary"
)
parser.add_argument(
"--output-format",
choices=ALL_OUTPUT_FORMATS,
default=OutputFormats.TEXT,
help="The desired output format."
)
parser.add_argument(
"--checkpoint",
required=True,
help="The base path of the checkpoints."
)
parser.add_argument(
"--max-workers",
type=int,
default=20,
help="Maximum number of workers for IO requests"
)
parser.add_argument(
"--limit",
type=int,
default=5,
help="Maximum number results to show"
)
add_default_arguments(parser)
return parser.parse_args(argv)
def run(args: argparse.Namespace):
checkpoint_summary(
checkpoint_path=args.checkpoint,
max_workers=args.max_workers,
limit=args.limit,
output_format=args.output_format
)
def main(argv: List[str] = None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == "__main__":
initialize_and_call_main(main)
| 0.718496 | 0.162413 |
import logging
import argparse
from argparse import _ActionsContainer as ArgParseActionsContainer
from typing import List
from sciencebeam_trainer_delft.utils.misc import parse_number_ranges
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
add_train_notification_arguments
)
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
get_default_batch_size,
get_default_stateful
)
from sciencebeam_trainer_delft.sequence_labelling.config import (
DEFAULT_CHAR_INPUT_DROPOUT,
DEFAULT_CHAR_LSTM_DROPOUT
)
from sciencebeam_trainer_delft.sequence_labelling.models import get_model_names
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import (
DEFAULT_STOP_EPSILON_VALUE,
DEFAULT_STOP_WINDOW_SIZE
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
TAG_OUTPUT_FORMATS
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
EvaluationOutputFormats,
EVALUATION_OUTPUT_FORMATS
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
add_transfer_learning_arguments
)
LOGGER = logging.getLogger(__name__)
GROBID_MODEL_NAMES = [
'affiliation-address', 'citation', 'date', 'figure', 'fulltext', 'header',
'name', 'name-citation', 'name-header', 'patent', 'reference-segmenter',
'segmentation', 'software', 'table'
]
DEFAULT_RANDOM_SEED = 42
DEFAULT_TAG_OUTPUT_FORMAT = TagOutputFormats.XML
def add_common_arguments(
parser: argparse.ArgumentParser,
max_sequence_length_default: int = None):
input_group = parser.add_argument_group('input')
input_group.add_argument(
"--input",
nargs='+',
action='append',
help="provided training file"
)
input_group.add_argument(
"--shuffle-input",
action="store_true",
help="Shuffle the input before splitting"
)
input_group.add_argument(
"--limit",
type=int,
help=(
"limit the number of training samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
parser.add_argument(
"--random-seed",
type=int,
default=DEFAULT_RANDOM_SEED,
help="Set the random seed for reproducibility"
)
parser.add_argument(
"--batch-size", type=int, default=get_default_batch_size(),
help="batch size"
)
parser.add_argument(
"--max-sequence-length", type=int,
default=max_sequence_length_default,
help="maximum sequence length"
)
parser.add_argument(
"--no-use-lmdb", action="store_true",
help="Do not use LMDB embedding cache (load embeddings into memory instead)"
)
parser.add_argument("--multiprocessing", action="store_true", help="Use multiprocessing")
parser.add_argument("--quiet", action="store_true", help="Only log errors")
parser.add_argument(
"--save-input-to-and-exit",
help=(
"If set, saves the input to the specified path and exits."
" This can be useful to retrain the model outside GROBID."
)
)
parser.add_argument(
"--log-file",
help=(
"If set, saves the output to the specified log file."
" This may also be a file in a bucket, in which case it will be uploaded at the end."
" Add the .gz extension if you wish to compress the file."
)
)
parser.add_argument("--job-dir", help="job dir (only used when running via ai platform)")
def add_model_path_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument("--model-path", **kwargs)
def add_fold_count_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument("--fold-count", type=int, default=1, **kwargs)
def add_eval_output_format_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-output-format",
choices=EVALUATION_OUTPUT_FORMATS,
default=EvaluationOutputFormats.TEXT
)
def add_eval_first_entity_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-first-entity",
action="store_true",
help=''.join([
'If set, additional evaluates the first entity (e.g. first_<author>).'
])
)
def add_eval_output_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-output-path",
help='If specified, saves the evaluation to the specified path in the JSON format'
)
def add_eval_output_arguments(parser: argparse.ArgumentParser):
add_eval_output_format_argument(parser)
add_eval_first_entity_argument(parser)
add_eval_output_path_argument(parser)
def add_eval_input_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-input",
nargs='+',
action='append',
help=' '.join([
"Evaluation data at the end of training. If not specified,",
"it will use a slice of the training data"
])
)
parser.add_argument(
"--eval-limit",
type=int,
help=' '.join([
"Limit the number of documents to use for evaluation.",
"This is mostly for testing to make evaluation faster."
])
)
def add_dl_eval_model_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-max-sequence-length",
type=int,
help=' '.join([
"Maximum sequence length to use for evaluation.",
"If not specified, no limit will be applied."
])
)
parser.add_argument(
"--eval-input-window-stride",
type=int,
help="Should be equal or less than eval max sequence length"
)
parser.add_argument(
"--eval-batch-size",
type=int,
help=' '.join([
"The batch size to be used for evaluation.",
"If not specified, the training batch size is used.",
"This may be useful to evaluate on longer sequences",
"that could require a smaller batch size."
])
)
def add_tag_output_format_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--tag-output-format",
default=DEFAULT_TAG_OUTPUT_FORMAT,
choices=TAG_OUTPUT_FORMATS,
help="output format for tag results",
**kwargs
)
def add_tag_output_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--tag-output-path",
help='If specified, saves the tag result to the specified path'
)
def add_tag_transformed_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--tag-transformed",
action='store_true',
help=(
'If enabled, the output will contain the transformed dataset (if any).'
' More specifically, that will for example contain the "unrolled" data.'
)
)
def add_output_argument(parser: ArgParseActionsContainer, **kwargs):
parser.add_argument("--output", help="directory where to save a trained model", **kwargs)
def add_max_epoch_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--max-epoch", type=int, default=10,
help="max epoch to train to",
**kwargs
)
def add_stateful_argument(parser: argparse.ArgumentParser, **kwargs):
default_value = get_default_stateful()
parser.add_argument(
"--stateful",
dest="stateful",
default=default_value,
action="store_true",
help="Make RNNs stateful (required for truncated BPTT)",
**kwargs
)
parser.add_argument(
"--no-stateful",
dest="stateful",
default=default_value,
action="store_false",
help="Disable statefulness (default)",
**kwargs
)
def add_input_window_stride_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--input-window-stride",
type=int,
help="Should be equal or less than max sequence length",
**kwargs
)
def add_train_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--architecture", default='BidLSTM_CRF',
choices=get_model_names(),
help="type of model architecture to be used"
)
parser.add_argument("--use-ELMo", action="store_true", help="Use ELMo contextual embeddings")
parser.add_argument(
"--max-char-length",
type=int,
default=30,
help="The maximum number of chars used by the model"
)
parser.add_argument(
"--additional-token-feature-indices",
type=parse_number_ranges,
help="".join([
"Additional feature values that should be used as tokens.",
" e.g. 0 or 0-3.",
" If blank, no additional token features will be used."
])
)
parser.add_argument(
"--text-feature-indices",
type=parse_number_ranges,
help="".join([
"Feature values that should be treated as text input.",
" e.g. 0 or 0-3.",
" If blank, no additext features will be used.",
" Cannot be used together with --additional-token-feature-indices.",
" Text features will get tokenized."
" If word embeddings are used, then the number of tokens will depend on",
" --concatenated-embeddings-token-count.",
" Tokens from text features replace regular tokens from the training data."
])
)
parser.add_argument(
"--unroll-text-feature-index",
type=int,
help="".join([
"Tokenizes the text at the specified index.",
" Each token will become a separate token.",
" The features will be duplicated for each token.",
" Labels will also be duplicated for each token.",
" Where a label refers to the beginning of a tag,",
" this will only be used for the first token.",
" All other labels will be the intermediate version of the tag."
" The max sequence length will get applied to the unrolled tokens."
" Additionally a new token will be added, with the values:"
" LINESTART, LINEIN, LINEEND"
])
)
parser.add_argument(
"--concatenated-embeddings-token-count",
type=int,
help="".join([
"The number of tokens to concatenate as word embeddings.",
" If not specified, it concatenate the main token with any",
" --additional-token-feature-indices (if any).",
" This option is mainly useful in combination with --text-feature-indices.",
" It has no effect, if no word embeddings are used."
])
)
features_group = parser.add_argument_group('features')
features_group.add_argument("--use-features", action="store_true", help="Use features")
features_group.add_argument(
"--features-indices", "--feature-indices",
type=parse_number_ranges,
help="The feature indices to use. e.g. 7-10. If blank, all of the features will be used."
)
features_group.add_argument(
"--continuous-features-indices",
type=parse_number_ranges,
help=(
"The feature indices to use that are continous. e.g. 7-10."
" If blank, features will be assumed to be categorical."
)
)
features_group.add_argument(
"--features-embedding-size", "--feature-embedding-size",
type=int,
help="size of feature embedding, use 0 to disable embedding"
)
features_group.add_argument(
"--use-features-indices-input", action="store_true",
help="Use features indices values (should be inferred from the model)"
)
features_group.add_argument(
"--features-lstm-units", type=int,
help="Number of LSTM units used by the features"
)
add_stateful_argument(parser)
add_input_window_stride_argument(parser)
output_group = parser.add_argument_group('output')
add_output_argument(output_group)
output_group.add_argument("--checkpoint", help="directory where to save a checkpoint model")
output_group.add_argument(
"--checkpoint-epoch-interval",
type=int,
default=1,
help="save checkpoints every n epochs"
)
parser.add_argument(
"--embedding", default="glove-6B-50d",
help="name of word embedding"
)
parser.add_argument(
"--preload-embedding",
help=" ".join([
"Name or URL to embedding to preload.",
"This can be useful in combination with resuming model training."
])
)
features_group.add_argument(
"--no-embedding",
dest="use_word_embeddings",
default=True,
action="store_false",
help="Disable the use of word embedding"
)
parser.add_argument(
"--char-embedding-size", type=int, default=25,
help="size of char embedding"
)
parser.add_argument(
"--char-lstm-units", type=int, default=25,
help="number of list units for chars"
)
parser.add_argument(
"--char-input-mask-zero", action='store_true',
help="enables masking of zero for the char input"
)
parser.add_argument(
"--char-input-dropout", type=float, default=DEFAULT_CHAR_INPUT_DROPOUT,
help="dropout for char input"
)
parser.add_argument(
"--char-lstm-dropout", type=float, default=DEFAULT_CHAR_LSTM_DROPOUT,
help="dropout for char lstm"
)
parser.add_argument(
"--word-lstm-units", type=int, default=100,
help="number of lstm units for words"
)
parser.add_argument(
"--dropout", type=float, default=0.5,
help="main dropout"
)
parser.add_argument(
"--recurrent-dropout", type=float, default=0.5,
help="recurrent dropout"
)
add_max_epoch_argument(parser)
parser.add_argument(
"--early-stopping-patience", type=int, default=10,
help="how many epochs to continue training after the f1 score hasn't improved"
)
parser.add_argument(
"--resume-train-model-path",
help="path to the model training should be resumed from (e.g. path to checkpoint)"
)
parser.add_argument(
"--initial-epoch",
type=int,
default=0,
help="Sets the initial epoch for model training."
)
parser.add_argument(
"--auto-resume", action='store_true',
help="enables auto-resuming training using checkpoints"
)
add_transfer_learning_arguments(parser)
add_train_notification_arguments(parser)
def add_wapiti_train_arguments(parser: argparse.ArgumentParser):
add_output_argument(parser)
add_max_epoch_argument(parser)
parser.add_argument("--wapiti-template", required=True)
parser.add_argument(
"--wapiti-gzip",
action="store_true",
help="whether to gzip wapiti models before saving"
)
parser.add_argument(
"--wapiti-stop-epsilon-value",
default=DEFAULT_STOP_EPSILON_VALUE
)
parser.add_argument(
"--wapiti-stop-window-size",
type=int,
default=DEFAULT_STOP_WINDOW_SIZE
)
add_train_notification_arguments(parser)
def get_wapiti_train_args(args: argparse.Namespace) -> dict:
return dict(
stop_epsilon_value=args.wapiti_stop_epsilon_value,
stop_window_size=args.wapiti_stop_window_size
)
def add_wapiti_install_arguments(parser: argparse.ArgumentParser):
example_url = "https://github.com/kermitt2/Wapiti/archive/master.tar.gz"
parser.add_argument(
"--wapiti-install-source",
help="source file to install wapiti from, e.g. %s" % example_url
)
def add_all_non_positional_arguments(parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
def add_model_positional_argument(parser: argparse.ArgumentParser):
parser.add_argument("model", nargs='?', choices=GROBID_MODEL_NAMES)
def _flatten_input_paths(input_paths_list: List[List[str]]) -> List[str]:
if not input_paths_list:
return []
return [input_path for input_paths in input_paths_list for input_path in input_paths]
def process_args(args: argparse.Namespace) -> None:
args.input = _flatten_input_paths(args.input)
try:
args.eval_input = _flatten_input_paths(args.eval_input)
except AttributeError:
pass
def create_argument_parser() -> argparse.ArgumentParser:
return argparse.ArgumentParser(
description="Trainer for GROBID models",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/grobid_trainer/cli_args.py
|
cli_args.py
|
import logging
import argparse
from argparse import _ActionsContainer as ArgParseActionsContainer
from typing import List
from sciencebeam_trainer_delft.utils.misc import parse_number_ranges
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
add_train_notification_arguments
)
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
get_default_batch_size,
get_default_stateful
)
from sciencebeam_trainer_delft.sequence_labelling.config import (
DEFAULT_CHAR_INPUT_DROPOUT,
DEFAULT_CHAR_LSTM_DROPOUT
)
from sciencebeam_trainer_delft.sequence_labelling.models import get_model_names
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import (
DEFAULT_STOP_EPSILON_VALUE,
DEFAULT_STOP_WINDOW_SIZE
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
TAG_OUTPUT_FORMATS
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
EvaluationOutputFormats,
EVALUATION_OUTPUT_FORMATS
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
add_transfer_learning_arguments
)
LOGGER = logging.getLogger(__name__)
GROBID_MODEL_NAMES = [
'affiliation-address', 'citation', 'date', 'figure', 'fulltext', 'header',
'name', 'name-citation', 'name-header', 'patent', 'reference-segmenter',
'segmentation', 'software', 'table'
]
DEFAULT_RANDOM_SEED = 42
DEFAULT_TAG_OUTPUT_FORMAT = TagOutputFormats.XML
def add_common_arguments(
parser: argparse.ArgumentParser,
max_sequence_length_default: int = None):
input_group = parser.add_argument_group('input')
input_group.add_argument(
"--input",
nargs='+',
action='append',
help="provided training file"
)
input_group.add_argument(
"--shuffle-input",
action="store_true",
help="Shuffle the input before splitting"
)
input_group.add_argument(
"--limit",
type=int,
help=(
"limit the number of training samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
parser.add_argument(
"--random-seed",
type=int,
default=DEFAULT_RANDOM_SEED,
help="Set the random seed for reproducibility"
)
parser.add_argument(
"--batch-size", type=int, default=get_default_batch_size(),
help="batch size"
)
parser.add_argument(
"--max-sequence-length", type=int,
default=max_sequence_length_default,
help="maximum sequence length"
)
parser.add_argument(
"--no-use-lmdb", action="store_true",
help="Do not use LMDB embedding cache (load embeddings into memory instead)"
)
parser.add_argument("--multiprocessing", action="store_true", help="Use multiprocessing")
parser.add_argument("--quiet", action="store_true", help="Only log errors")
parser.add_argument(
"--save-input-to-and-exit",
help=(
"If set, saves the input to the specified path and exits."
" This can be useful to retrain the model outside GROBID."
)
)
parser.add_argument(
"--log-file",
help=(
"If set, saves the output to the specified log file."
" This may also be a file in a bucket, in which case it will be uploaded at the end."
" Add the .gz extension if you wish to compress the file."
)
)
parser.add_argument("--job-dir", help="job dir (only used when running via ai platform)")
def add_model_path_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument("--model-path", **kwargs)
def add_fold_count_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument("--fold-count", type=int, default=1, **kwargs)
def add_eval_output_format_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-output-format",
choices=EVALUATION_OUTPUT_FORMATS,
default=EvaluationOutputFormats.TEXT
)
def add_eval_first_entity_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-first-entity",
action="store_true",
help=''.join([
'If set, additional evaluates the first entity (e.g. first_<author>).'
])
)
def add_eval_output_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-output-path",
help='If specified, saves the evaluation to the specified path in the JSON format'
)
def add_eval_output_arguments(parser: argparse.ArgumentParser):
add_eval_output_format_argument(parser)
add_eval_first_entity_argument(parser)
add_eval_output_path_argument(parser)
def add_eval_input_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-input",
nargs='+',
action='append',
help=' '.join([
"Evaluation data at the end of training. If not specified,",
"it will use a slice of the training data"
])
)
parser.add_argument(
"--eval-limit",
type=int,
help=' '.join([
"Limit the number of documents to use for evaluation.",
"This is mostly for testing to make evaluation faster."
])
)
def add_dl_eval_model_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--eval-max-sequence-length",
type=int,
help=' '.join([
"Maximum sequence length to use for evaluation.",
"If not specified, no limit will be applied."
])
)
parser.add_argument(
"--eval-input-window-stride",
type=int,
help="Should be equal or less than eval max sequence length"
)
parser.add_argument(
"--eval-batch-size",
type=int,
help=' '.join([
"The batch size to be used for evaluation.",
"If not specified, the training batch size is used.",
"This may be useful to evaluate on longer sequences",
"that could require a smaller batch size."
])
)
def add_tag_output_format_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--tag-output-format",
default=DEFAULT_TAG_OUTPUT_FORMAT,
choices=TAG_OUTPUT_FORMATS,
help="output format for tag results",
**kwargs
)
def add_tag_output_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--tag-output-path",
help='If specified, saves the tag result to the specified path'
)
def add_tag_transformed_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--tag-transformed",
action='store_true',
help=(
'If enabled, the output will contain the transformed dataset (if any).'
' More specifically, that will for example contain the "unrolled" data.'
)
)
def add_output_argument(parser: ArgParseActionsContainer, **kwargs):
parser.add_argument("--output", help="directory where to save a trained model", **kwargs)
def add_max_epoch_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--max-epoch", type=int, default=10,
help="max epoch to train to",
**kwargs
)
def add_stateful_argument(parser: argparse.ArgumentParser, **kwargs):
default_value = get_default_stateful()
parser.add_argument(
"--stateful",
dest="stateful",
default=default_value,
action="store_true",
help="Make RNNs stateful (required for truncated BPTT)",
**kwargs
)
parser.add_argument(
"--no-stateful",
dest="stateful",
default=default_value,
action="store_false",
help="Disable statefulness (default)",
**kwargs
)
def add_input_window_stride_argument(parser: argparse.ArgumentParser, **kwargs):
parser.add_argument(
"--input-window-stride",
type=int,
help="Should be equal or less than max sequence length",
**kwargs
)
def add_train_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--architecture", default='BidLSTM_CRF',
choices=get_model_names(),
help="type of model architecture to be used"
)
parser.add_argument("--use-ELMo", action="store_true", help="Use ELMo contextual embeddings")
parser.add_argument(
"--max-char-length",
type=int,
default=30,
help="The maximum number of chars used by the model"
)
parser.add_argument(
"--additional-token-feature-indices",
type=parse_number_ranges,
help="".join([
"Additional feature values that should be used as tokens.",
" e.g. 0 or 0-3.",
" If blank, no additional token features will be used."
])
)
parser.add_argument(
"--text-feature-indices",
type=parse_number_ranges,
help="".join([
"Feature values that should be treated as text input.",
" e.g. 0 or 0-3.",
" If blank, no additext features will be used.",
" Cannot be used together with --additional-token-feature-indices.",
" Text features will get tokenized."
" If word embeddings are used, then the number of tokens will depend on",
" --concatenated-embeddings-token-count.",
" Tokens from text features replace regular tokens from the training data."
])
)
parser.add_argument(
"--unroll-text-feature-index",
type=int,
help="".join([
"Tokenizes the text at the specified index.",
" Each token will become a separate token.",
" The features will be duplicated for each token.",
" Labels will also be duplicated for each token.",
" Where a label refers to the beginning of a tag,",
" this will only be used for the first token.",
" All other labels will be the intermediate version of the tag."
" The max sequence length will get applied to the unrolled tokens."
" Additionally a new token will be added, with the values:"
" LINESTART, LINEIN, LINEEND"
])
)
parser.add_argument(
"--concatenated-embeddings-token-count",
type=int,
help="".join([
"The number of tokens to concatenate as word embeddings.",
" If not specified, it concatenate the main token with any",
" --additional-token-feature-indices (if any).",
" This option is mainly useful in combination with --text-feature-indices.",
" It has no effect, if no word embeddings are used."
])
)
features_group = parser.add_argument_group('features')
features_group.add_argument("--use-features", action="store_true", help="Use features")
features_group.add_argument(
"--features-indices", "--feature-indices",
type=parse_number_ranges,
help="The feature indices to use. e.g. 7-10. If blank, all of the features will be used."
)
features_group.add_argument(
"--continuous-features-indices",
type=parse_number_ranges,
help=(
"The feature indices to use that are continous. e.g. 7-10."
" If blank, features will be assumed to be categorical."
)
)
features_group.add_argument(
"--features-embedding-size", "--feature-embedding-size",
type=int,
help="size of feature embedding, use 0 to disable embedding"
)
features_group.add_argument(
"--use-features-indices-input", action="store_true",
help="Use features indices values (should be inferred from the model)"
)
features_group.add_argument(
"--features-lstm-units", type=int,
help="Number of LSTM units used by the features"
)
add_stateful_argument(parser)
add_input_window_stride_argument(parser)
output_group = parser.add_argument_group('output')
add_output_argument(output_group)
output_group.add_argument("--checkpoint", help="directory where to save a checkpoint model")
output_group.add_argument(
"--checkpoint-epoch-interval",
type=int,
default=1,
help="save checkpoints every n epochs"
)
parser.add_argument(
"--embedding", default="glove-6B-50d",
help="name of word embedding"
)
parser.add_argument(
"--preload-embedding",
help=" ".join([
"Name or URL to embedding to preload.",
"This can be useful in combination with resuming model training."
])
)
features_group.add_argument(
"--no-embedding",
dest="use_word_embeddings",
default=True,
action="store_false",
help="Disable the use of word embedding"
)
parser.add_argument(
"--char-embedding-size", type=int, default=25,
help="size of char embedding"
)
parser.add_argument(
"--char-lstm-units", type=int, default=25,
help="number of list units for chars"
)
parser.add_argument(
"--char-input-mask-zero", action='store_true',
help="enables masking of zero for the char input"
)
parser.add_argument(
"--char-input-dropout", type=float, default=DEFAULT_CHAR_INPUT_DROPOUT,
help="dropout for char input"
)
parser.add_argument(
"--char-lstm-dropout", type=float, default=DEFAULT_CHAR_LSTM_DROPOUT,
help="dropout for char lstm"
)
parser.add_argument(
"--word-lstm-units", type=int, default=100,
help="number of lstm units for words"
)
parser.add_argument(
"--dropout", type=float, default=0.5,
help="main dropout"
)
parser.add_argument(
"--recurrent-dropout", type=float, default=0.5,
help="recurrent dropout"
)
add_max_epoch_argument(parser)
parser.add_argument(
"--early-stopping-patience", type=int, default=10,
help="how many epochs to continue training after the f1 score hasn't improved"
)
parser.add_argument(
"--resume-train-model-path",
help="path to the model training should be resumed from (e.g. path to checkpoint)"
)
parser.add_argument(
"--initial-epoch",
type=int,
default=0,
help="Sets the initial epoch for model training."
)
parser.add_argument(
"--auto-resume", action='store_true',
help="enables auto-resuming training using checkpoints"
)
add_transfer_learning_arguments(parser)
add_train_notification_arguments(parser)
def add_wapiti_train_arguments(parser: argparse.ArgumentParser):
add_output_argument(parser)
add_max_epoch_argument(parser)
parser.add_argument("--wapiti-template", required=True)
parser.add_argument(
"--wapiti-gzip",
action="store_true",
help="whether to gzip wapiti models before saving"
)
parser.add_argument(
"--wapiti-stop-epsilon-value",
default=DEFAULT_STOP_EPSILON_VALUE
)
parser.add_argument(
"--wapiti-stop-window-size",
type=int,
default=DEFAULT_STOP_WINDOW_SIZE
)
add_train_notification_arguments(parser)
def get_wapiti_train_args(args: argparse.Namespace) -> dict:
return dict(
stop_epsilon_value=args.wapiti_stop_epsilon_value,
stop_window_size=args.wapiti_stop_window_size
)
def add_wapiti_install_arguments(parser: argparse.ArgumentParser):
example_url = "https://github.com/kermitt2/Wapiti/archive/master.tar.gz"
parser.add_argument(
"--wapiti-install-source",
help="source file to install wapiti from, e.g. %s" % example_url
)
def add_all_non_positional_arguments(parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
def add_model_positional_argument(parser: argparse.ArgumentParser):
parser.add_argument("model", nargs='?', choices=GROBID_MODEL_NAMES)
def _flatten_input_paths(input_paths_list: List[List[str]]) -> List[str]:
if not input_paths_list:
return []
return [input_path for input_paths in input_paths_list for input_path in input_paths]
def process_args(args: argparse.Namespace) -> None:
args.input = _flatten_input_paths(args.input)
try:
args.eval_input = _flatten_input_paths(args.eval_input)
except AttributeError:
pass
def create_argument_parser() -> argparse.ArgumentParser:
return argparse.ArgumentParser(
description="Trainer for GROBID models",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
| 0.803097 | 0.150496 |
import logging
import time
import tempfile
import os
from collections import Counter
from datetime import datetime, timezone
from itertools import islice
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.numpy import shuffle_arrays
from sciencebeam_trainer_delft.utils.io import (
write_text,
auto_uploading_output_file
)
from sciencebeam_trainer_delft.embedding import EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
TrainNotificationManager,
notify_train_start,
notify_train_success,
notify_train_error
)
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
Sequence
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_and_labels_crf_file
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_adapters import (
WapitiModelAdapter,
WapitiModelTrainAdapter
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
get_tag_result,
iter_format_tag_result
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
EvaluationOutputFormats,
ClassificationResult
)
from sciencebeam_trainer_delft.sequence_labelling.input_info import (
iter_flat_batch_tokens,
iter_flat_features,
get_quantiles,
get_quantiles_feature_value_length_by_index,
get_feature_counts,
get_suggested_feature_indices,
format_dict,
format_indices
)
from sciencebeam_trainer_delft.sequence_labelling.utils.checkpoints import (
get_resume_train_model_params
)
LOGGER = logging.getLogger(__name__)
DEFAULT_RANDOM_SEED = 42
DEFAULT_TAG_OUTPUT_FORMAT = TagOutputFormats.XML
def set_random_seeds(random_seed: int):
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
def get_default_training_data(model: str) -> str:
return 'data/sequenceLabelling/grobid/' + model + '/' + model + '-060518.train'
def log_data_info(x: np.array, y: np.array, features: np.array):
LOGGER.info('x sample: %s (y: %s)', x[:1][:10], y[:1][:1])
LOGGER.info(
'feature dimensions of first sample, word: %s',
[{index: value for index, value in enumerate(features[0][0])}] # noqa pylint: disable=unnecessary-comprehension
)
def _load_data_and_labels_crf_files(
input_paths: List[str], limit: int = None) -> Tuple[np.array, np.array, np.array]:
if len(input_paths) == 1:
return load_data_and_labels_crf_file(input_paths[0], limit=limit)
x_list = []
y_list = []
features_list = []
for input_path in input_paths:
LOGGER.debug('calling load_data_and_labels_crf_file: %s', input_path)
x, y, f = load_data_and_labels_crf_file(
input_path,
limit=limit
)
x_list.append(x)
y_list.append(y)
features_list.append(f)
return np.concatenate(x_list), np.concatenate(y_list), np.concatenate(features_list)
def get_clean_features_mask(features_all: np.array) -> List[bool]:
feature_lengths = Counter((
len(features_vector)
for features_doc in features_all
for features_vector in features_doc
))
if len(feature_lengths) <= 1:
return [True] * len(features_all)
expected_feature_length = next(feature_lengths.keys().__iter__())
LOGGER.info('cleaning features, expected_feature_length=%s', expected_feature_length)
return [
all(len(features_vector) == expected_feature_length for features_vector in features_doc)
for features_doc in features_all
]
def get_clean_x_y_features(x: np.array, y: np.array, features: np.array):
clean_features_mask = get_clean_features_mask(features)
if sum(clean_features_mask) != len(clean_features_mask):
LOGGER.info(
'ignoring %d documents with inconsistent features',
len(clean_features_mask) - sum(clean_features_mask)
)
return (
x[clean_features_mask],
y[clean_features_mask],
features[clean_features_mask]
)
return x, y, features
def load_data_and_labels(
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
clean_features: bool = True,
random_seed: int = DEFAULT_RANDOM_SEED,
download_manager: DownloadManager = None):
assert download_manager
assert input_paths
LOGGER.info('loading data from: %s', input_paths)
downloaded_input_paths = [
download_manager.download_if_url(input_path)
for input_path in input_paths
]
x_all, y_all, f_all = _load_data_and_labels_crf_files(
downloaded_input_paths,
limit=limit
)
if shuffle_input:
shuffle_arrays([x_all, y_all, f_all], random_seed=random_seed)
log_data_info(x_all, y_all, f_all)
if clean_features:
(x_all, y_all, f_all) = get_clean_x_y_features(
x_all, y_all, f_all
)
return x_all, y_all, f_all
def notify_model_train_start(
model: Union[Sequence, WapitiModelTrainAdapter],
train_notification_manager: Optional[TrainNotificationManager],
output_path: Optional[str]
):
notify_train_start(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
checkpoints_path=model.log_dir,
resume_train_model_path=model.model_path,
initial_epoch=model.training_config.initial_epoch
)
def do_train(
model: Union[Sequence, WapitiModelTrainAdapter],
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
x_train, x_valid, y_train, y_valid, features_train, features_valid = train_test_split(
x_all, y_all, features_all, test_size=0.1, shuffle=False
)
LOGGER.info('%d train sequences', len(x_train))
LOGGER.info('%d validation sequences', len(x_valid))
notify_model_train_start(
model,
train_notification_manager,
output_path=output_path
)
start_time = time.time()
model.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
runtime = round(time.time() - start_time, 3)
LOGGER.info("training runtime: %s seconds ", runtime)
# saving the model
if output_path:
LOGGER.info('saving model to: %s', output_path)
model.save(output_path)
else:
model.save()
notify_train_success(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
last_checkpoint_path=model.last_checkpoint_path
)
def do_train_with_error_notification(
model: Union[Sequence, WapitiModelTrainAdapter],
output_path: str = None,
train_notification_manager: TrainNotificationManager = None,
**kwargs):
model_path = model.get_model_output_path(output_path)
try:
do_train(
model=model,
output_path=output_path,
train_notification_manager=train_notification_manager,
**kwargs
)
except BaseException as error: # pylint: disable=broad-except
notify_train_error(
train_notification_manager,
model_path=model_path,
error=repr(error)
)
raise
def process_resume_train_model_params(
model: Sequence,
auto_resume: bool,
resume_train_model_path: Optional[str]
):
resume_train_model_params = get_resume_train_model_params(
log_dir=model.log_dir,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path,
initial_epoch=model.training_config.initial_epoch
)
if resume_train_model_params:
model.load_from(resume_train_model_params.model_path)
model.training_config.initial_epoch = resume_train_model_params.initial_epoch
model.training_config.initial_meta = resume_train_model_params.initial_meta
# train a GROBID model with all available data
def train(
model_name: str,
embeddings_name, architecture='BidLSTM_CRF', use_ELMo=False,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = 100,
max_epoch=100,
resume_train_model_path: str = None,
auto_resume: bool = False,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model_name = get_model_name(
model_name, output_path=output_path, use_ELMo=use_ELMo
)
model = Sequence(
model_name,
max_epoch=max_epoch,
embeddings_name=embeddings_name,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
model_type=architecture,
use_ELMo=use_ELMo,
**kwargs
)
process_resume_train_model_params(
model,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path
)
do_train_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def wapiti_train(
model_name: str,
template_path: str,
output_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_epoch: int = 100,
train_notification_manager: TrainNotificationManager = None,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
with tempfile.TemporaryDirectory(suffix='-wapiti') as temp_dir:
temp_model_path = os.path.join(temp_dir, 'model.wapiti')
model = WapitiModelTrainAdapter(
model_name=model_name,
template_path=template_path,
temp_model_path=temp_model_path,
max_epoch=max_epoch,
download_manager=download_manager,
gzip_enabled=gzip_enabled,
wapiti_binary_path=wapiti_binary_path,
wapiti_train_args=wapiti_train_args
)
do_train_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def output_classification_result(
classification_result: ClassificationResult,
eval_output_args: Optional[dict],
eval_input_paths: List[str] = None,
model_path: str = None,
model_summary_props: dict = None):
eval_output_args = eval_output_args or dict()
assert eval_output_args is not None
output_format = eval_output_args.get('eval_output_args')
output_path = eval_output_args.get('eval_output_path')
eval_first_entity = eval_output_args.get('eval_first_entity')
if not output_format:
output_format = EvaluationOutputFormats.TEXT
if eval_first_entity:
classification_result = classification_result.with_first_entities()
meta: Dict[str, Any] = {}
meta['eval_timestamp'] = datetime.now(timezone.utc).isoformat()
if eval_input_paths:
meta['eval_input_paths'] = eval_input_paths
if model_path:
meta['model_path'] = model_path
if model_summary_props:
meta.update(model_summary_props)
if output_path:
LOGGER.info('writing evaluation to: %s', output_path)
write_text(output_path, classification_result.get_json_formatted_report(meta=meta))
if output_format == EvaluationOutputFormats.TEXT:
print("\nEvaluation:\n%s" % classification_result.get_text_formatted_report(
digits=4
))
elif output_format == EvaluationOutputFormats.JSON:
print(classification_result.get_json_formatted_report(meta=meta))
else:
print(classification_result.get_formatted_report(
output_format=output_format
))
def do_train_eval(
model: Union[Sequence, WapitiModelTrainAdapter],
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
fold_count: int = 1,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
if eval_input_paths:
x_eval, y_eval, features_eval = load_data_and_labels(
input_paths=eval_input_paths, limit=eval_limit,
download_manager=download_manager
)
x_train_all, y_train_all, features_train_all = (
x_all, y_all, features_all
)
else:
x_train_all, x_eval, y_train_all, y_eval, features_train_all, features_eval = (
train_test_split(x_all, y_all, features_all, test_size=0.1, shuffle=False)
)
x_train, x_valid, y_train, y_valid, features_train, features_valid = train_test_split(
x_train_all, y_train_all, features_train_all, test_size=0.1, shuffle=False
)
LOGGER.info('%d train sequences', len(x_train))
LOGGER.info('%d validation sequences', len(x_valid))
LOGGER.info('%d evaluation sequences', len(x_eval))
notify_model_train_start(
model,
train_notification_manager,
output_path=output_path
)
start_time = time.time()
if fold_count == 1:
model.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
else:
assert isinstance(model, Sequence), \
'nfold evaluation currently only supported for DL models'
model.train_nfold(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid,
fold_number=fold_count
)
runtime = round(time.time() - start_time, 3)
LOGGER.info("training runtime: %s seconds ", runtime)
# evaluation
classification_result = model.get_evaluation_result(
x_eval, y_eval, features=features_eval
)
output_classification_result(
classification_result,
eval_output_args=eval_output_args,
eval_input_paths=eval_input_paths,
model_path=model.get_model_output_path(output_path),
model_summary_props=model.model_summary_props
)
# saving the model
if output_path:
model.save(output_path)
else:
model.save()
notify_train_success(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
last_checkpoint_path=model.last_checkpoint_path,
classification_result=classification_result
)
def do_train_eval_with_error_notification(
model: Union[Sequence, WapitiModelTrainAdapter],
output_path: str = None,
train_notification_manager: TrainNotificationManager = None,
**kwargs):
model_path = model.get_model_output_path(output_path)
try:
do_train_eval(
model=model,
output_path=output_path,
train_notification_manager=train_notification_manager,
**kwargs
)
except BaseException as error: # pylint: disable=broad-except
notify_train_error(
train_notification_manager,
model_path=model_path,
error=repr(error)
)
raise
# split data, train a GROBID model and evaluate it
def train_eval(
model_name: str,
embeddings_name, architecture='BidLSTM_CRF', use_ELMo=False,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
max_sequence_length: int = 100,
fold_count=1, max_epoch=100, batch_size=20,
resume_train_model_path: str = None,
auto_resume: bool = False,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model_name = get_model_name(
model_name, output_path=output_path, use_ELMo=use_ELMo
)
model = Sequence(
model_name,
max_epoch=max_epoch,
embeddings_name=embeddings_name,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
model_type=architecture,
use_ELMo=use_ELMo,
batch_size=batch_size,
fold_number=fold_count,
**kwargs
)
process_resume_train_model_params(
model,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path
)
do_train_eval_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
eval_input_paths=eval_input_paths,
eval_limit=eval_limit,
eval_output_args=eval_output_args,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def wapiti_train_eval(
model_name: str,
template_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
fold_count: int = 1,
max_epoch: int = 100,
train_notification_manager: TrainNotificationManager = None,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
assert fold_count == 1, 'only fold_count == 1 supported'
with tempfile.TemporaryDirectory(suffix='-wapiti') as temp_dir:
temp_model_path = os.path.join(temp_dir, 'model.wapiti')
model = WapitiModelTrainAdapter(
model_name=model_name,
template_path=template_path,
temp_model_path=temp_model_path,
max_epoch=max_epoch,
download_manager=download_manager,
gzip_enabled=gzip_enabled,
wapiti_binary_path=wapiti_binary_path,
wapiti_train_args=wapiti_train_args
)
do_train_eval_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
eval_input_paths=eval_input_paths,
eval_limit=eval_limit,
eval_output_args=eval_output_args,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def do_eval_model(
model: Union[Sequence, WapitiModelAdapter],
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_output_args: dict = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
if split_input:
_, x_eval, _, y_eval, _, features_eval = train_test_split(
x_all, y_all, features_all, test_size=0.1, shuffle=False
)
else:
x_eval = x_all
y_eval = y_all
features_eval = features_all
LOGGER.info('%d evaluation sequences', len(x_eval))
# evaluation
classification_result = model.get_evaluation_result(
x_eval, y_eval, features=features_eval
)
output_classification_result(
classification_result,
eval_output_args=eval_output_args,
eval_input_paths=input_paths,
model_path=model.model_path,
model_summary_props=model.model_summary_props
)
def get_model_name(
model_name: str,
use_ELMo: bool = False,
output_path: str = None,
model_path: str = None):
if output_path or model_path:
pass
else:
model_name = 'grobid-' + model_name
if use_ELMo:
model_name += '-with_ELMo'
return model_name
def load_delft_model(
model_name: str,
use_ELMo: bool = False,
output_path: str = None,
model_path: str = None,
max_sequence_length: Optional[int] = 100,
fold_count: int = 1,
batch_size: int = 20,
embedding_manager: EmbeddingManager = None,
**kwargs):
model = Sequence(
get_model_name(
model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path
),
embeddings_name=None,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
batch_size=batch_size,
fold_number=fold_count,
**kwargs
)
assert model_path
model.load_from(model_path)
return model
def eval_model(
model_name: str,
use_ELMo: bool = False,
input_paths: List[str] = None,
output_path: str = None,
model_path: str = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = 100,
fold_count: int = 1,
batch_size: int = 20,
eval_output_args: dict = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model = load_delft_model(
model_name=model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path,
max_sequence_length=max_sequence_length,
fold_count=fold_count,
batch_size=batch_size,
embedding_manager=embedding_manager,
**kwargs
)
do_eval_model(
model,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
split_input=split_input,
eval_output_args=eval_output_args,
download_manager=download_manager
)
def wapiti_eval_model(
model_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
fold_count: int = 1,
eval_output_args: dict = None,
wapiti_binary_path: str = None):
assert fold_count == 1, 'only fold_count == 1 supported'
model = WapitiModelAdapter.load_from(
model_path,
download_manager=download_manager,
wapiti_binary_path=wapiti_binary_path
)
do_eval_model(
model,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
split_input=split_input,
eval_output_args=eval_output_args,
download_manager=download_manager
)
def do_tag_input(
model: Union[Sequence, WapitiModelAdapter],
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
LOGGER.info('%d input sequences', len(x_all))
tag_result = model.iter_tag(
x_all,
output_format=None,
features=features_all
)
if LOGGER.isEnabledFor(logging.DEBUG):
if not isinstance(tag_result, dict):
tag_result = list(tag_result)
LOGGER.debug('actual raw tag_result: %s', tag_result)
if isinstance(model, Sequence) and model.tag_transformed:
dataset_transformer = model.dataset_transformer_factory()
expected_x_all, expected_y_all, expected_features_all = dataset_transformer.fit_transform(
x_all, y_all, features=features_all
)
else:
expected_x_all = x_all
expected_y_all = y_all
expected_features_all = features_all
expected_tag_result = get_tag_result(
texts=expected_x_all,
labels=expected_y_all
)
LOGGER.debug('actual raw expected_tag_result: %s', expected_tag_result)
formatted_tag_result_iterable = iter_format_tag_result(
tag_result,
output_format=tag_output_format,
expected_tag_result=expected_tag_result,
texts=expected_x_all,
features=expected_features_all,
model_name=model._get_model_name() # pylint: disable=protected-access
)
if tag_output_path:
LOGGER.info('writing tag results to: %r', tag_output_path)
with auto_uploading_output_file(tag_output_path) as fp:
for text in formatted_tag_result_iterable:
fp.write(text)
LOGGER.info('tag results written to: %r', tag_output_path)
else:
LOGGER.info('writing tag_result to stdout')
try:
for text in formatted_tag_result_iterable:
print(text, end='')
except BrokenPipeError:
LOGGER.info('received broken pipe error')
def tag_input(
model_name: str,
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
use_ELMo: bool = False,
input_paths: List[str] = None,
output_path: str = None,
model_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = None,
input_window_stride: int = None,
stateful: bool = None,
fold_count: int = 1,
batch_size: int = 20,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs
):
model = load_delft_model(
model_name=model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path,
max_sequence_length=max_sequence_length,
input_window_stride=input_window_stride,
stateful=stateful,
fold_count=fold_count,
batch_size=batch_size,
embedding_manager=embedding_manager,
**kwargs
)
do_tag_input(
model,
tag_output_format=tag_output_format,
tag_output_path=tag_output_path,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
def wapiti_tag_input(
model_path: str,
download_manager: DownloadManager,
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
input_paths: List[str] = None,
limit: int = None,
random_seed: int = DEFAULT_RANDOM_SEED,
shuffle_input: bool = False,
wapiti_binary_path: str = None
):
model: WapitiModelAdapter = WapitiModelAdapter.load_from(
model_path,
download_manager=download_manager,
wapiti_binary_path=wapiti_binary_path
)
do_tag_input(
model=model,
tag_output_format=tag_output_format,
tag_output_path=tag_output_path,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
def print_input_info(
input_paths: List[str],
limit: int = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit,
download_manager=download_manager,
clean_features=False
)
seq_lengths = np.array([len(seq) for seq in x_all])
y_counts = Counter(
y_row
for y_doc in y_all
for y_row in y_doc
)
flat_features = list(iter_flat_features(features_all))
feature_lengths = Counter(map(len, flat_features))
print('number of input sequences: %d' % len(x_all))
print('sequence lengths: %s' % format_dict(get_quantiles(seq_lengths)))
print('token lengths: %s' % format_dict(get_quantiles(
map(len, iter_flat_batch_tokens(x_all))
)))
print('number of features: %d' % len(features_all[0][0]))
if len(feature_lengths) > 1:
print('inconsistent feature length counts: %s' % format_dict(feature_lengths))
for feature_length in feature_lengths:
print('examples with feature length=%d:\n%s' % (
feature_length,
'\n'.join(islice((
' '.join(features_vector)
for features_vector in flat_features
if len(features_vector) == feature_length
), 3))
))
(x_all, y_all, features_all) = get_clean_x_y_features(
x_all, y_all, features_all
)
quantiles_feature_value_lengths = get_quantiles_feature_value_length_by_index(features_all)
feature_counts = get_feature_counts(features_all)
print('feature value lengths: %s' % format_dict(quantiles_feature_value_lengths))
print('feature counts: %s' % format_dict(feature_counts))
print('suggested feature indices: %s' % format_indices(
get_suggested_feature_indices(feature_counts)
))
print('label counts: %s' % format_dict(y_counts))
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/grobid_trainer/utils.py
|
utils.py
|
import logging
import time
import tempfile
import os
from collections import Counter
from datetime import datetime, timezone
from itertools import islice
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.numpy import shuffle_arrays
from sciencebeam_trainer_delft.utils.io import (
write_text,
auto_uploading_output_file
)
from sciencebeam_trainer_delft.embedding import EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
TrainNotificationManager,
notify_train_start,
notify_train_success,
notify_train_error
)
from sciencebeam_trainer_delft.sequence_labelling.wrapper import (
Sequence
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_and_labels_crf_file
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_adapters import (
WapitiModelAdapter,
WapitiModelTrainAdapter
)
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
get_tag_result,
iter_format_tag_result
)
from sciencebeam_trainer_delft.sequence_labelling.evaluation import (
EvaluationOutputFormats,
ClassificationResult
)
from sciencebeam_trainer_delft.sequence_labelling.input_info import (
iter_flat_batch_tokens,
iter_flat_features,
get_quantiles,
get_quantiles_feature_value_length_by_index,
get_feature_counts,
get_suggested_feature_indices,
format_dict,
format_indices
)
from sciencebeam_trainer_delft.sequence_labelling.utils.checkpoints import (
get_resume_train_model_params
)
LOGGER = logging.getLogger(__name__)
DEFAULT_RANDOM_SEED = 42
DEFAULT_TAG_OUTPUT_FORMAT = TagOutputFormats.XML
def set_random_seeds(random_seed: int):
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
def get_default_training_data(model: str) -> str:
return 'data/sequenceLabelling/grobid/' + model + '/' + model + '-060518.train'
def log_data_info(x: np.array, y: np.array, features: np.array):
LOGGER.info('x sample: %s (y: %s)', x[:1][:10], y[:1][:1])
LOGGER.info(
'feature dimensions of first sample, word: %s',
[{index: value for index, value in enumerate(features[0][0])}] # noqa pylint: disable=unnecessary-comprehension
)
def _load_data_and_labels_crf_files(
input_paths: List[str], limit: int = None) -> Tuple[np.array, np.array, np.array]:
if len(input_paths) == 1:
return load_data_and_labels_crf_file(input_paths[0], limit=limit)
x_list = []
y_list = []
features_list = []
for input_path in input_paths:
LOGGER.debug('calling load_data_and_labels_crf_file: %s', input_path)
x, y, f = load_data_and_labels_crf_file(
input_path,
limit=limit
)
x_list.append(x)
y_list.append(y)
features_list.append(f)
return np.concatenate(x_list), np.concatenate(y_list), np.concatenate(features_list)
def get_clean_features_mask(features_all: np.array) -> List[bool]:
feature_lengths = Counter((
len(features_vector)
for features_doc in features_all
for features_vector in features_doc
))
if len(feature_lengths) <= 1:
return [True] * len(features_all)
expected_feature_length = next(feature_lengths.keys().__iter__())
LOGGER.info('cleaning features, expected_feature_length=%s', expected_feature_length)
return [
all(len(features_vector) == expected_feature_length for features_vector in features_doc)
for features_doc in features_all
]
def get_clean_x_y_features(x: np.array, y: np.array, features: np.array):
clean_features_mask = get_clean_features_mask(features)
if sum(clean_features_mask) != len(clean_features_mask):
LOGGER.info(
'ignoring %d documents with inconsistent features',
len(clean_features_mask) - sum(clean_features_mask)
)
return (
x[clean_features_mask],
y[clean_features_mask],
features[clean_features_mask]
)
return x, y, features
def load_data_and_labels(
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
clean_features: bool = True,
random_seed: int = DEFAULT_RANDOM_SEED,
download_manager: DownloadManager = None):
assert download_manager
assert input_paths
LOGGER.info('loading data from: %s', input_paths)
downloaded_input_paths = [
download_manager.download_if_url(input_path)
for input_path in input_paths
]
x_all, y_all, f_all = _load_data_and_labels_crf_files(
downloaded_input_paths,
limit=limit
)
if shuffle_input:
shuffle_arrays([x_all, y_all, f_all], random_seed=random_seed)
log_data_info(x_all, y_all, f_all)
if clean_features:
(x_all, y_all, f_all) = get_clean_x_y_features(
x_all, y_all, f_all
)
return x_all, y_all, f_all
def notify_model_train_start(
model: Union[Sequence, WapitiModelTrainAdapter],
train_notification_manager: Optional[TrainNotificationManager],
output_path: Optional[str]
):
notify_train_start(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
checkpoints_path=model.log_dir,
resume_train_model_path=model.model_path,
initial_epoch=model.training_config.initial_epoch
)
def do_train(
model: Union[Sequence, WapitiModelTrainAdapter],
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
x_train, x_valid, y_train, y_valid, features_train, features_valid = train_test_split(
x_all, y_all, features_all, test_size=0.1, shuffle=False
)
LOGGER.info('%d train sequences', len(x_train))
LOGGER.info('%d validation sequences', len(x_valid))
notify_model_train_start(
model,
train_notification_manager,
output_path=output_path
)
start_time = time.time()
model.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
runtime = round(time.time() - start_time, 3)
LOGGER.info("training runtime: %s seconds ", runtime)
# saving the model
if output_path:
LOGGER.info('saving model to: %s', output_path)
model.save(output_path)
else:
model.save()
notify_train_success(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
last_checkpoint_path=model.last_checkpoint_path
)
def do_train_with_error_notification(
model: Union[Sequence, WapitiModelTrainAdapter],
output_path: str = None,
train_notification_manager: TrainNotificationManager = None,
**kwargs):
model_path = model.get_model_output_path(output_path)
try:
do_train(
model=model,
output_path=output_path,
train_notification_manager=train_notification_manager,
**kwargs
)
except BaseException as error: # pylint: disable=broad-except
notify_train_error(
train_notification_manager,
model_path=model_path,
error=repr(error)
)
raise
def process_resume_train_model_params(
model: Sequence,
auto_resume: bool,
resume_train_model_path: Optional[str]
):
resume_train_model_params = get_resume_train_model_params(
log_dir=model.log_dir,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path,
initial_epoch=model.training_config.initial_epoch
)
if resume_train_model_params:
model.load_from(resume_train_model_params.model_path)
model.training_config.initial_epoch = resume_train_model_params.initial_epoch
model.training_config.initial_meta = resume_train_model_params.initial_meta
# train a GROBID model with all available data
def train(
model_name: str,
embeddings_name, architecture='BidLSTM_CRF', use_ELMo=False,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = 100,
max_epoch=100,
resume_train_model_path: str = None,
auto_resume: bool = False,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model_name = get_model_name(
model_name, output_path=output_path, use_ELMo=use_ELMo
)
model = Sequence(
model_name,
max_epoch=max_epoch,
embeddings_name=embeddings_name,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
model_type=architecture,
use_ELMo=use_ELMo,
**kwargs
)
process_resume_train_model_params(
model,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path
)
do_train_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def wapiti_train(
model_name: str,
template_path: str,
output_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_epoch: int = 100,
train_notification_manager: TrainNotificationManager = None,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
with tempfile.TemporaryDirectory(suffix='-wapiti') as temp_dir:
temp_model_path = os.path.join(temp_dir, 'model.wapiti')
model = WapitiModelTrainAdapter(
model_name=model_name,
template_path=template_path,
temp_model_path=temp_model_path,
max_epoch=max_epoch,
download_manager=download_manager,
gzip_enabled=gzip_enabled,
wapiti_binary_path=wapiti_binary_path,
wapiti_train_args=wapiti_train_args
)
do_train_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def output_classification_result(
classification_result: ClassificationResult,
eval_output_args: Optional[dict],
eval_input_paths: List[str] = None,
model_path: str = None,
model_summary_props: dict = None):
eval_output_args = eval_output_args or dict()
assert eval_output_args is not None
output_format = eval_output_args.get('eval_output_args')
output_path = eval_output_args.get('eval_output_path')
eval_first_entity = eval_output_args.get('eval_first_entity')
if not output_format:
output_format = EvaluationOutputFormats.TEXT
if eval_first_entity:
classification_result = classification_result.with_first_entities()
meta: Dict[str, Any] = {}
meta['eval_timestamp'] = datetime.now(timezone.utc).isoformat()
if eval_input_paths:
meta['eval_input_paths'] = eval_input_paths
if model_path:
meta['model_path'] = model_path
if model_summary_props:
meta.update(model_summary_props)
if output_path:
LOGGER.info('writing evaluation to: %s', output_path)
write_text(output_path, classification_result.get_json_formatted_report(meta=meta))
if output_format == EvaluationOutputFormats.TEXT:
print("\nEvaluation:\n%s" % classification_result.get_text_formatted_report(
digits=4
))
elif output_format == EvaluationOutputFormats.JSON:
print(classification_result.get_json_formatted_report(meta=meta))
else:
print(classification_result.get_formatted_report(
output_format=output_format
))
def do_train_eval(
model: Union[Sequence, WapitiModelTrainAdapter],
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
fold_count: int = 1,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
if eval_input_paths:
x_eval, y_eval, features_eval = load_data_and_labels(
input_paths=eval_input_paths, limit=eval_limit,
download_manager=download_manager
)
x_train_all, y_train_all, features_train_all = (
x_all, y_all, features_all
)
else:
x_train_all, x_eval, y_train_all, y_eval, features_train_all, features_eval = (
train_test_split(x_all, y_all, features_all, test_size=0.1, shuffle=False)
)
x_train, x_valid, y_train, y_valid, features_train, features_valid = train_test_split(
x_train_all, y_train_all, features_train_all, test_size=0.1, shuffle=False
)
LOGGER.info('%d train sequences', len(x_train))
LOGGER.info('%d validation sequences', len(x_valid))
LOGGER.info('%d evaluation sequences', len(x_eval))
notify_model_train_start(
model,
train_notification_manager,
output_path=output_path
)
start_time = time.time()
if fold_count == 1:
model.train(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid
)
else:
assert isinstance(model, Sequence), \
'nfold evaluation currently only supported for DL models'
model.train_nfold(
x_train, y_train, x_valid, y_valid,
features_train=features_train, features_valid=features_valid,
fold_number=fold_count
)
runtime = round(time.time() - start_time, 3)
LOGGER.info("training runtime: %s seconds ", runtime)
# evaluation
classification_result = model.get_evaluation_result(
x_eval, y_eval, features=features_eval
)
output_classification_result(
classification_result,
eval_output_args=eval_output_args,
eval_input_paths=eval_input_paths,
model_path=model.get_model_output_path(output_path),
model_summary_props=model.model_summary_props
)
# saving the model
if output_path:
model.save(output_path)
else:
model.save()
notify_train_success(
train_notification_manager,
model_path=model.get_model_output_path(output_path),
last_checkpoint_path=model.last_checkpoint_path,
classification_result=classification_result
)
def do_train_eval_with_error_notification(
model: Union[Sequence, WapitiModelTrainAdapter],
output_path: str = None,
train_notification_manager: TrainNotificationManager = None,
**kwargs):
model_path = model.get_model_output_path(output_path)
try:
do_train_eval(
model=model,
output_path=output_path,
train_notification_manager=train_notification_manager,
**kwargs
)
except BaseException as error: # pylint: disable=broad-except
notify_train_error(
train_notification_manager,
model_path=model_path,
error=repr(error)
)
raise
# split data, train a GROBID model and evaluate it
def train_eval(
model_name: str,
embeddings_name, architecture='BidLSTM_CRF', use_ELMo=False,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
max_sequence_length: int = 100,
fold_count=1, max_epoch=100, batch_size=20,
resume_train_model_path: str = None,
auto_resume: bool = False,
train_notification_manager: TrainNotificationManager = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model_name = get_model_name(
model_name, output_path=output_path, use_ELMo=use_ELMo
)
model = Sequence(
model_name,
max_epoch=max_epoch,
embeddings_name=embeddings_name,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
model_type=architecture,
use_ELMo=use_ELMo,
batch_size=batch_size,
fold_number=fold_count,
**kwargs
)
process_resume_train_model_params(
model,
auto_resume=auto_resume,
resume_train_model_path=resume_train_model_path
)
do_train_eval_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
eval_input_paths=eval_input_paths,
eval_limit=eval_limit,
eval_output_args=eval_output_args,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def wapiti_train_eval(
model_name: str,
template_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
output_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_input_paths: List[str] = None,
eval_limit: int = None,
eval_output_args: dict = None,
fold_count: int = 1,
max_epoch: int = 100,
train_notification_manager: TrainNotificationManager = None,
gzip_enabled: bool = False,
wapiti_binary_path: str = None,
wapiti_train_args: dict = None):
assert fold_count == 1, 'only fold_count == 1 supported'
with tempfile.TemporaryDirectory(suffix='-wapiti') as temp_dir:
temp_model_path = os.path.join(temp_dir, 'model.wapiti')
model = WapitiModelTrainAdapter(
model_name=model_name,
template_path=template_path,
temp_model_path=temp_model_path,
max_epoch=max_epoch,
download_manager=download_manager,
gzip_enabled=gzip_enabled,
wapiti_binary_path=wapiti_binary_path,
wapiti_train_args=wapiti_train_args
)
do_train_eval_with_error_notification(
model,
input_paths=input_paths,
output_path=output_path,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
eval_input_paths=eval_input_paths,
eval_limit=eval_limit,
eval_output_args=eval_output_args,
train_notification_manager=train_notification_manager,
download_manager=download_manager
)
def do_eval_model(
model: Union[Sequence, WapitiModelAdapter],
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
eval_output_args: dict = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
if split_input:
_, x_eval, _, y_eval, _, features_eval = train_test_split(
x_all, y_all, features_all, test_size=0.1, shuffle=False
)
else:
x_eval = x_all
y_eval = y_all
features_eval = features_all
LOGGER.info('%d evaluation sequences', len(x_eval))
# evaluation
classification_result = model.get_evaluation_result(
x_eval, y_eval, features=features_eval
)
output_classification_result(
classification_result,
eval_output_args=eval_output_args,
eval_input_paths=input_paths,
model_path=model.model_path,
model_summary_props=model.model_summary_props
)
def get_model_name(
model_name: str,
use_ELMo: bool = False,
output_path: str = None,
model_path: str = None):
if output_path or model_path:
pass
else:
model_name = 'grobid-' + model_name
if use_ELMo:
model_name += '-with_ELMo'
return model_name
def load_delft_model(
model_name: str,
use_ELMo: bool = False,
output_path: str = None,
model_path: str = None,
max_sequence_length: Optional[int] = 100,
fold_count: int = 1,
batch_size: int = 20,
embedding_manager: EmbeddingManager = None,
**kwargs):
model = Sequence(
get_model_name(
model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path
),
embeddings_name=None,
embedding_manager=embedding_manager,
max_sequence_length=max_sequence_length,
batch_size=batch_size,
fold_number=fold_count,
**kwargs
)
assert model_path
model.load_from(model_path)
return model
def eval_model(
model_name: str,
use_ELMo: bool = False,
input_paths: List[str] = None,
output_path: str = None,
model_path: str = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = 100,
fold_count: int = 1,
batch_size: int = 20,
eval_output_args: dict = None,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
model = load_delft_model(
model_name=model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path,
max_sequence_length=max_sequence_length,
fold_count=fold_count,
batch_size=batch_size,
embedding_manager=embedding_manager,
**kwargs
)
do_eval_model(
model,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
split_input=split_input,
eval_output_args=eval_output_args,
download_manager=download_manager
)
def wapiti_eval_model(
model_path: str,
download_manager: DownloadManager,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
split_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
fold_count: int = 1,
eval_output_args: dict = None,
wapiti_binary_path: str = None):
assert fold_count == 1, 'only fold_count == 1 supported'
model = WapitiModelAdapter.load_from(
model_path,
download_manager=download_manager,
wapiti_binary_path=wapiti_binary_path
)
do_eval_model(
model,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
split_input=split_input,
eval_output_args=eval_output_args,
download_manager=download_manager
)
def do_tag_input(
model: Union[Sequence, WapitiModelAdapter],
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
input_paths: List[str] = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit, shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
LOGGER.info('%d input sequences', len(x_all))
tag_result = model.iter_tag(
x_all,
output_format=None,
features=features_all
)
if LOGGER.isEnabledFor(logging.DEBUG):
if not isinstance(tag_result, dict):
tag_result = list(tag_result)
LOGGER.debug('actual raw tag_result: %s', tag_result)
if isinstance(model, Sequence) and model.tag_transformed:
dataset_transformer = model.dataset_transformer_factory()
expected_x_all, expected_y_all, expected_features_all = dataset_transformer.fit_transform(
x_all, y_all, features=features_all
)
else:
expected_x_all = x_all
expected_y_all = y_all
expected_features_all = features_all
expected_tag_result = get_tag_result(
texts=expected_x_all,
labels=expected_y_all
)
LOGGER.debug('actual raw expected_tag_result: %s', expected_tag_result)
formatted_tag_result_iterable = iter_format_tag_result(
tag_result,
output_format=tag_output_format,
expected_tag_result=expected_tag_result,
texts=expected_x_all,
features=expected_features_all,
model_name=model._get_model_name() # pylint: disable=protected-access
)
if tag_output_path:
LOGGER.info('writing tag results to: %r', tag_output_path)
with auto_uploading_output_file(tag_output_path) as fp:
for text in formatted_tag_result_iterable:
fp.write(text)
LOGGER.info('tag results written to: %r', tag_output_path)
else:
LOGGER.info('writing tag_result to stdout')
try:
for text in formatted_tag_result_iterable:
print(text, end='')
except BrokenPipeError:
LOGGER.info('received broken pipe error')
def tag_input(
model_name: str,
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
use_ELMo: bool = False,
input_paths: List[str] = None,
output_path: str = None,
model_path: str = None,
limit: int = None,
shuffle_input: bool = False,
random_seed: int = DEFAULT_RANDOM_SEED,
max_sequence_length: int = None,
input_window_stride: int = None,
stateful: bool = None,
fold_count: int = 1,
batch_size: int = 20,
download_manager: DownloadManager = None,
embedding_manager: EmbeddingManager = None,
**kwargs
):
model = load_delft_model(
model_name=model_name,
use_ELMo=use_ELMo,
output_path=output_path,
model_path=model_path,
max_sequence_length=max_sequence_length,
input_window_stride=input_window_stride,
stateful=stateful,
fold_count=fold_count,
batch_size=batch_size,
embedding_manager=embedding_manager,
**kwargs
)
do_tag_input(
model,
tag_output_format=tag_output_format,
tag_output_path=tag_output_path,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
def wapiti_tag_input(
model_path: str,
download_manager: DownloadManager,
tag_output_format: str = DEFAULT_TAG_OUTPUT_FORMAT,
tag_output_path: Optional[str] = None,
input_paths: List[str] = None,
limit: int = None,
random_seed: int = DEFAULT_RANDOM_SEED,
shuffle_input: bool = False,
wapiti_binary_path: str = None
):
model: WapitiModelAdapter = WapitiModelAdapter.load_from(
model_path,
download_manager=download_manager,
wapiti_binary_path=wapiti_binary_path
)
do_tag_input(
model=model,
tag_output_format=tag_output_format,
tag_output_path=tag_output_path,
input_paths=input_paths,
limit=limit,
shuffle_input=shuffle_input,
random_seed=random_seed,
download_manager=download_manager
)
def print_input_info(
input_paths: List[str],
limit: int = None,
download_manager: DownloadManager = None):
x_all, y_all, features_all = load_data_and_labels(
input_paths=input_paths, limit=limit,
download_manager=download_manager,
clean_features=False
)
seq_lengths = np.array([len(seq) for seq in x_all])
y_counts = Counter(
y_row
for y_doc in y_all
for y_row in y_doc
)
flat_features = list(iter_flat_features(features_all))
feature_lengths = Counter(map(len, flat_features))
print('number of input sequences: %d' % len(x_all))
print('sequence lengths: %s' % format_dict(get_quantiles(seq_lengths)))
print('token lengths: %s' % format_dict(get_quantiles(
map(len, iter_flat_batch_tokens(x_all))
)))
print('number of features: %d' % len(features_all[0][0]))
if len(feature_lengths) > 1:
print('inconsistent feature length counts: %s' % format_dict(feature_lengths))
for feature_length in feature_lengths:
print('examples with feature length=%d:\n%s' % (
feature_length,
'\n'.join(islice((
' '.join(features_vector)
for features_vector in flat_features
if len(features_vector) == feature_length
), 3))
))
(x_all, y_all, features_all) = get_clean_x_y_features(
x_all, y_all, features_all
)
quantiles_feature_value_lengths = get_quantiles_feature_value_length_by_index(features_all)
feature_counts = get_feature_counts(features_all)
print('feature value lengths: %s' % format_dict(quantiles_feature_value_lengths))
print('feature counts: %s' % format_dict(feature_counts))
print('suggested feature indices: %s' % format_indices(
get_suggested_feature_indices(feature_counts)
))
print('label counts: %s' % format_dict(y_counts))
| 0.646572 | 0.268309 |
import logging
import argparse
from abc import abstractmethod
from typing import List, Optional
import sciencebeam_trainer_delft.utils.no_warn_if_disabled # noqa, pylint: disable=unused-import
import sciencebeam_trainer_delft.utils.no_keras_backend_message # noqa, pylint: disable=unused-import
# pylint: disable=wrong-import-order, ungrouped-imports
import keras.backend as K
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.cloud_support import patch_cloud_support
from sciencebeam_trainer_delft.utils.tf import get_tf_info
from sciencebeam_trainer_delft.utils.io import (
copy_file,
auto_uploading_output_file
)
from sciencebeam_trainer_delft.utils.logging import (
tee_stdout_and_stderr_lines_to,
tee_logging_lines_to
)
from sciencebeam_trainer_delft.embedding import EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
get_train_notification_manager
)
from sciencebeam_trainer_delft.sequence_labelling.models import patch_get_model
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_install import (
install_wapiti_and_get_path_or_none
)
from sciencebeam_trainer_delft.utils.cli import (
SubCommand,
SubCommandProcessor
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
get_transfer_learning_config_for_parsed_args
)
from sciencebeam_trainer_delft.sequence_labelling.tools.grobid_trainer.cli_args import (
add_common_arguments,
add_train_arguments,
add_model_path_argument,
add_wapiti_train_arguments,
add_wapiti_install_arguments,
get_wapiti_train_args,
add_fold_count_argument,
add_eval_input_arguments,
add_eval_output_arguments,
add_dl_eval_model_arguments,
add_stateful_argument,
add_input_window_stride_argument,
add_tag_output_format_argument,
add_tag_output_path_argument,
add_tag_transformed_argument,
add_model_positional_argument,
create_argument_parser,
process_args
)
from sciencebeam_trainer_delft.sequence_labelling.tools.grobid_trainer.utils import (
set_random_seeds,
train,
wapiti_train,
train_eval,
wapiti_train_eval,
eval_model,
wapiti_eval_model,
tag_input,
wapiti_tag_input,
print_input_info
)
LOGGER = logging.getLogger(__name__)
class Tasks:
TRAIN = 'train'
TRAIN_EVAL = 'train_eval'
EVAL = 'eval'
TAG = 'tag'
WAPITI_TRAIN = 'wapiti_train'
WAPITI_TRAIN_EVAL = 'wapiti_train_eval'
WAPITI_EVAL = 'wapiti_eval'
WAPITI_TAG = 'wapiti_tag'
INPUT_INFO = 'input_info'
def save_input_to(input_paths: List[str], output_path: str):
assert len(input_paths) == 1, "exactly one input path expected (got: %s)" % input_paths
input_path = input_paths[0]
LOGGER.info('saving input (%s) to: %s', input_path, output_path)
copy_file(input_path, output_path)
def get_eval_input_args(args: argparse.Namespace) -> dict:
return dict(
eval_input_paths=args.eval_input,
eval_limit=args.eval_limit,
)
def get_eval_output_args(args: argparse.Namespace) -> dict:
return dict(
eval_output_format=args.eval_output_format,
eval_first_entity=args.eval_first_entity,
eval_output_path=args.eval_output_path
)
def get_dl_eval_model_args(args: argparse.Namespace) -> dict:
return dict(
eval_max_sequence_length=args.eval_max_sequence_length,
eval_input_window_stride=args.eval_input_window_stride,
eval_batch_size=args.eval_batch_size
)
class GrobidTrainerSubCommand(SubCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.download_manager = None
self.embedding_manager = None
@abstractmethod
def do_run(self, args: argparse.Namespace):
pass
def preload_and_validate_embedding(
self,
embedding_name: str,
use_word_embeddings: bool = True) -> Optional[str]:
if not use_word_embeddings:
return None
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
self.embedding_manager.validate_embedding(embedding_name)
return embedding_name
def get_common_args(self, args: argparse.Namespace) -> dict:
return dict(
model_name=args.model,
input_paths=args.input,
limit=args.limit,
shuffle_input=args.shuffle_input,
random_seed=args.random_seed,
batch_size=args.batch_size,
max_sequence_length=args.max_sequence_length,
multiprocessing=args.multiprocessing,
embedding_manager=self.embedding_manager,
download_manager=self.download_manager
)
def get_train_args(self, args: argparse.Namespace) -> dict:
return dict(
architecture=args.architecture,
use_ELMo=args.use_ELMo,
output_path=args.output,
log_dir=args.checkpoint,
char_emb_size=args.char_embedding_size,
char_lstm_units=args.char_lstm_units,
word_lstm_units=args.word_lstm_units,
dropout=args.dropout,
recurrent_dropout=args.recurrent_dropout,
max_epoch=args.max_epoch,
use_features=args.use_features,
features_indices=args.features_indices,
features_embedding_size=args.features_embedding_size,
patience=args.early_stopping_patience,
config_props=dict(
max_char_length=args.max_char_length,
char_input_mask_zero=args.char_input_mask_zero,
char_input_dropout=args.char_input_dropout,
char_lstm_dropout=args.char_lstm_dropout,
additional_token_feature_indices=args.additional_token_feature_indices,
text_feature_indices=args.text_feature_indices,
unroll_text_feature_index=args.unroll_text_feature_index,
concatenated_embeddings_token_count=args.concatenated_embeddings_token_count,
use_word_embeddings=args.use_word_embeddings,
use_features_indices_input=args.use_features_indices_input,
continuous_features_indices=args.continuous_features_indices,
features_lstm_units=args.features_lstm_units,
stateful=args.stateful
),
training_props=dict(
initial_epoch=args.initial_epoch,
input_window_stride=args.input_window_stride,
checkpoint_epoch_interval=args.checkpoint_epoch_interval
),
resume_train_model_path=args.resume_train_model_path,
auto_resume=args.auto_resume,
transfer_learning_config=get_transfer_learning_config_for_parsed_args(args),
train_notification_manager=get_train_notification_manager(args),
**self.get_common_args(args)
)
def run(self, args: argparse.Namespace):
if args.save_input_to_and_exit:
save_input_to(args.input, args.save_input_to_and_exit)
return
self.download_manager = DownloadManager()
self.embedding_manager = EmbeddingManager(
download_manager=self.download_manager
)
if args.no_use_lmdb:
self.embedding_manager.disable_embedding_lmdb_cache()
set_random_seeds(args.random_seed)
self.do_run(args)
# see https://github.com/tensorflow/tensorflow/issues/3388
K.clear_session()
class TrainSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
add_model_path_argument(parser, help='directory to the saved model')
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
if args.preload_embedding:
self.preload_and_validate_embedding(
args.preload_embedding,
use_word_embeddings=True
)
embedding_name = self.preload_and_validate_embedding(
args.embedding,
use_word_embeddings=args.use_word_embeddings and not args.resume_train_model_path
)
LOGGER.info('get_tf_info: %s', get_tf_info())
train(
embeddings_name=embedding_name,
**self.get_train_args(args)
)
class WapitiTrainSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_wapiti_train_arguments(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
wapiti_train(
model_name=args.model,
template_path=args.wapiti_template,
input_paths=args.input,
limit=args.limit,
output_path=args.output,
max_epoch=args.max_epoch,
download_manager=self.download_manager,
gzip_enabled=args.wapiti_gzip,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
),
wapiti_train_args=get_wapiti_train_args(args),
train_notification_manager=get_train_notification_manager(args)
)
class TrainEvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
add_model_path_argument(parser, help='directory to the saved model')
add_fold_count_argument(parser)
add_eval_input_arguments(parser)
add_eval_output_arguments(parser)
add_dl_eval_model_arguments(parser)
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
if args.fold_count < 1:
raise ValueError("fold-count should be equal or more than 1")
if args.preload_embedding:
self.preload_and_validate_embedding(
args.preload_embedding,
use_word_embeddings=True
)
embedding_name = self.preload_and_validate_embedding(
args.embedding,
use_word_embeddings=args.use_word_embeddings and not args.resume_train_model_path
)
LOGGER.info('get_tf_info: %s', get_tf_info())
train_eval(
fold_count=args.fold_count,
embeddings_name=embedding_name,
eval_output_args=get_eval_output_args(args),
**get_eval_input_args(args),
**get_dl_eval_model_args(args),
**self.get_train_args(args)
)
class WapitiTrainEvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_wapiti_train_arguments(parser)
add_eval_input_arguments(parser)
add_eval_output_arguments(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
wapiti_train_eval(
model_name=args.model,
template_path=args.wapiti_template,
input_paths=args.input,
limit=args.limit,
eval_input_paths=args.eval_input,
eval_limit=args.eval_limit,
output_path=args.output,
max_epoch=args.max_epoch,
download_manager=self.download_manager,
gzip_enabled=args.wapiti_gzip,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
),
wapiti_train_args=get_wapiti_train_args(args),
train_notification_manager=get_train_notification_manager(args),
eval_output_args=get_eval_output_args(args)
)
class EvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_model_path_argument(parser, required=True, help='directory to load the model from')
parser.add_argument(
"--use-eval-train-test-split",
action="store_true",
help=" ".join([
"If enabled, split the input when running 'eval'",
"(in the same way it is split for 'train_eval')"
])
)
add_eval_output_arguments(parser)
add_stateful_argument(parser)
add_dl_eval_model_arguments(parser)
def do_run(self, args: argparse.Namespace):
eval_model(
model_path=args.model_path,
split_input=args.use_eval_train_test_split,
eval_output_args=get_eval_output_args(args),
stateful=args.stateful,
**get_dl_eval_model_args(args),
**self.get_common_args(args)
)
class WapitiEvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_model_path_argument(parser, required=True, help='directory to load the model from')
add_eval_output_arguments(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
wapiti_eval_model(
model_path=args.model_path,
input_paths=args.input,
limit=args.limit,
eval_output_args=get_eval_output_args(args),
download_manager=self.download_manager,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
)
)
class TagSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser, max_sequence_length_default=None)
add_stateful_argument(parser)
add_input_window_stride_argument(parser)
add_model_path_argument(parser, required=True, help='directory to load the model from')
add_tag_output_format_argument(parser)
add_tag_output_path_argument(parser)
add_tag_transformed_argument(parser)
def do_run(self, args: argparse.Namespace):
tag_input(
model_path=args.model_path,
tag_output_format=args.tag_output_format,
tag_output_path=args.tag_output_path,
tag_transformed=args.tag_transformed,
stateful=args.stateful,
input_window_stride=args.input_window_stride,
**self.get_common_args(args)
)
class WapitiTagSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser, max_sequence_length_default=None)
add_model_path_argument(parser, required=True, help='directory to load the model from')
add_tag_output_format_argument(parser)
add_tag_output_path_argument(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
wapiti_tag_input(
model_path=args.model_path,
tag_output_format=args.tag_output_format,
tag_output_path=args.tag_output_path,
input_paths=args.input,
limit=args.limit,
download_manager=self.download_manager,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
)
)
class InputInfoSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
def do_run(self, args: argparse.Namespace):
print_input_info(
input_paths=args.input,
limit=args.limit,
download_manager=self.download_manager
)
SUB_COMMANDS = [
TrainSubCommand(
Tasks.TRAIN,
'Train the model using the provided input(s)'
),
TrainEvalSubCommand(
Tasks.TRAIN_EVAL,
'Train and reserve a slice of the input data for evaluation'
),
EvalSubCommand(
Tasks.EVAL,
'Evaluate the already trained model on the provided input(s)'
),
TagSubCommand(
Tasks.TAG,
'Tag inputs and show results. Optionally also show a diff to the expected labels'
),
WapitiTrainSubCommand(
Tasks.WAPITI_TRAIN,
'Train the model using the provided input(s)'
),
WapitiTrainEvalSubCommand(
Tasks.WAPITI_TRAIN_EVAL,
'Train and reserve a slice of the input data for evaluation'
),
WapitiEvalSubCommand(
Tasks.WAPITI_EVAL,
'Evaluate the already trained model on the provided input(s)'
),
WapitiTagSubCommand(
Tasks.WAPITI_TAG,
'Tag inputs and show results. Optionally also show a diff to the expected labels'
),
InputInfoSubCommand(
Tasks.INPUT_INFO,
'Display input summary information relating to the passed in input(s)'
)
]
def get_subcommand_processor():
return SubCommandProcessor(SUB_COMMANDS, command_dest='action')
def parse_args(argv: List[str] = None, subcommand_processor: SubCommandProcessor = None):
parser = create_argument_parser()
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='action')
add_model_positional_argument(parser)
subcommand_processor.add_sub_command_parsers(parser)
args = parser.parse_args(argv)
process_args(args)
return args
def run(args: argparse.Namespace, subcommand_processor: SubCommandProcessor = None):
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='action')
try:
subcommand_processor.run(args)
except BaseException as exc:
LOGGER.error('uncaught exception: %s', exc, exc_info=exc)
raise
def main(argv: List[str] = None):
subcommand_processor = get_subcommand_processor()
args = parse_args(argv, subcommand_processor=subcommand_processor)
if args.quiet:
logging.root.setLevel('ERROR')
elif args.debug:
for name in [__name__, 'sciencebeam_trainer_delft', 'delft']:
logging.getLogger(name).setLevel('DEBUG')
if args.log_file:
with auto_uploading_output_file(args.log_file, mode='w') as log_fp:
try:
with tee_stdout_and_stderr_lines_to(log_fp.write, append_line_feed=True):
with tee_logging_lines_to(log_fp.write, append_line_feed=True):
run(args, subcommand_processor=subcommand_processor)
finally:
logging.shutdown()
else:
run(args, subcommand_processor=subcommand_processor)
def main_setup():
logging.root.handlers = []
logging.basicConfig(level='INFO')
patch_cloud_support()
patch_get_model()
if __name__ == "__main__":
main_setup()
main()
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/sequence_labelling/tools/grobid_trainer/cli.py
|
cli.py
|
import logging
import argparse
from abc import abstractmethod
from typing import List, Optional
import sciencebeam_trainer_delft.utils.no_warn_if_disabled # noqa, pylint: disable=unused-import
import sciencebeam_trainer_delft.utils.no_keras_backend_message # noqa, pylint: disable=unused-import
# pylint: disable=wrong-import-order, ungrouped-imports
import keras.backend as K
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.cloud_support import patch_cloud_support
from sciencebeam_trainer_delft.utils.tf import get_tf_info
from sciencebeam_trainer_delft.utils.io import (
copy_file,
auto_uploading_output_file
)
from sciencebeam_trainer_delft.utils.logging import (
tee_stdout_and_stderr_lines_to,
tee_logging_lines_to
)
from sciencebeam_trainer_delft.embedding import EmbeddingManager
from sciencebeam_trainer_delft.sequence_labelling.utils.train_notify import (
get_train_notification_manager
)
from sciencebeam_trainer_delft.sequence_labelling.models import patch_get_model
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_install import (
install_wapiti_and_get_path_or_none
)
from sciencebeam_trainer_delft.utils.cli import (
SubCommand,
SubCommandProcessor
)
from sciencebeam_trainer_delft.sequence_labelling.transfer_learning import (
get_transfer_learning_config_for_parsed_args
)
from sciencebeam_trainer_delft.sequence_labelling.tools.grobid_trainer.cli_args import (
add_common_arguments,
add_train_arguments,
add_model_path_argument,
add_wapiti_train_arguments,
add_wapiti_install_arguments,
get_wapiti_train_args,
add_fold_count_argument,
add_eval_input_arguments,
add_eval_output_arguments,
add_dl_eval_model_arguments,
add_stateful_argument,
add_input_window_stride_argument,
add_tag_output_format_argument,
add_tag_output_path_argument,
add_tag_transformed_argument,
add_model_positional_argument,
create_argument_parser,
process_args
)
from sciencebeam_trainer_delft.sequence_labelling.tools.grobid_trainer.utils import (
set_random_seeds,
train,
wapiti_train,
train_eval,
wapiti_train_eval,
eval_model,
wapiti_eval_model,
tag_input,
wapiti_tag_input,
print_input_info
)
LOGGER = logging.getLogger(__name__)
class Tasks:
TRAIN = 'train'
TRAIN_EVAL = 'train_eval'
EVAL = 'eval'
TAG = 'tag'
WAPITI_TRAIN = 'wapiti_train'
WAPITI_TRAIN_EVAL = 'wapiti_train_eval'
WAPITI_EVAL = 'wapiti_eval'
WAPITI_TAG = 'wapiti_tag'
INPUT_INFO = 'input_info'
def save_input_to(input_paths: List[str], output_path: str):
assert len(input_paths) == 1, "exactly one input path expected (got: %s)" % input_paths
input_path = input_paths[0]
LOGGER.info('saving input (%s) to: %s', input_path, output_path)
copy_file(input_path, output_path)
def get_eval_input_args(args: argparse.Namespace) -> dict:
return dict(
eval_input_paths=args.eval_input,
eval_limit=args.eval_limit,
)
def get_eval_output_args(args: argparse.Namespace) -> dict:
return dict(
eval_output_format=args.eval_output_format,
eval_first_entity=args.eval_first_entity,
eval_output_path=args.eval_output_path
)
def get_dl_eval_model_args(args: argparse.Namespace) -> dict:
return dict(
eval_max_sequence_length=args.eval_max_sequence_length,
eval_input_window_stride=args.eval_input_window_stride,
eval_batch_size=args.eval_batch_size
)
class GrobidTrainerSubCommand(SubCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.download_manager = None
self.embedding_manager = None
@abstractmethod
def do_run(self, args: argparse.Namespace):
pass
def preload_and_validate_embedding(
self,
embedding_name: str,
use_word_embeddings: bool = True) -> Optional[str]:
if not use_word_embeddings:
return None
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
self.embedding_manager.validate_embedding(embedding_name)
return embedding_name
def get_common_args(self, args: argparse.Namespace) -> dict:
return dict(
model_name=args.model,
input_paths=args.input,
limit=args.limit,
shuffle_input=args.shuffle_input,
random_seed=args.random_seed,
batch_size=args.batch_size,
max_sequence_length=args.max_sequence_length,
multiprocessing=args.multiprocessing,
embedding_manager=self.embedding_manager,
download_manager=self.download_manager
)
def get_train_args(self, args: argparse.Namespace) -> dict:
return dict(
architecture=args.architecture,
use_ELMo=args.use_ELMo,
output_path=args.output,
log_dir=args.checkpoint,
char_emb_size=args.char_embedding_size,
char_lstm_units=args.char_lstm_units,
word_lstm_units=args.word_lstm_units,
dropout=args.dropout,
recurrent_dropout=args.recurrent_dropout,
max_epoch=args.max_epoch,
use_features=args.use_features,
features_indices=args.features_indices,
features_embedding_size=args.features_embedding_size,
patience=args.early_stopping_patience,
config_props=dict(
max_char_length=args.max_char_length,
char_input_mask_zero=args.char_input_mask_zero,
char_input_dropout=args.char_input_dropout,
char_lstm_dropout=args.char_lstm_dropout,
additional_token_feature_indices=args.additional_token_feature_indices,
text_feature_indices=args.text_feature_indices,
unroll_text_feature_index=args.unroll_text_feature_index,
concatenated_embeddings_token_count=args.concatenated_embeddings_token_count,
use_word_embeddings=args.use_word_embeddings,
use_features_indices_input=args.use_features_indices_input,
continuous_features_indices=args.continuous_features_indices,
features_lstm_units=args.features_lstm_units,
stateful=args.stateful
),
training_props=dict(
initial_epoch=args.initial_epoch,
input_window_stride=args.input_window_stride,
checkpoint_epoch_interval=args.checkpoint_epoch_interval
),
resume_train_model_path=args.resume_train_model_path,
auto_resume=args.auto_resume,
transfer_learning_config=get_transfer_learning_config_for_parsed_args(args),
train_notification_manager=get_train_notification_manager(args),
**self.get_common_args(args)
)
def run(self, args: argparse.Namespace):
if args.save_input_to_and_exit:
save_input_to(args.input, args.save_input_to_and_exit)
return
self.download_manager = DownloadManager()
self.embedding_manager = EmbeddingManager(
download_manager=self.download_manager
)
if args.no_use_lmdb:
self.embedding_manager.disable_embedding_lmdb_cache()
set_random_seeds(args.random_seed)
self.do_run(args)
# see https://github.com/tensorflow/tensorflow/issues/3388
K.clear_session()
class TrainSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
add_model_path_argument(parser, help='directory to the saved model')
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
if args.preload_embedding:
self.preload_and_validate_embedding(
args.preload_embedding,
use_word_embeddings=True
)
embedding_name = self.preload_and_validate_embedding(
args.embedding,
use_word_embeddings=args.use_word_embeddings and not args.resume_train_model_path
)
LOGGER.info('get_tf_info: %s', get_tf_info())
train(
embeddings_name=embedding_name,
**self.get_train_args(args)
)
class WapitiTrainSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_wapiti_train_arguments(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
wapiti_train(
model_name=args.model,
template_path=args.wapiti_template,
input_paths=args.input,
limit=args.limit,
output_path=args.output,
max_epoch=args.max_epoch,
download_manager=self.download_manager,
gzip_enabled=args.wapiti_gzip,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
),
wapiti_train_args=get_wapiti_train_args(args),
train_notification_manager=get_train_notification_manager(args)
)
class TrainEvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
add_model_path_argument(parser, help='directory to the saved model')
add_fold_count_argument(parser)
add_eval_input_arguments(parser)
add_eval_output_arguments(parser)
add_dl_eval_model_arguments(parser)
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
if args.fold_count < 1:
raise ValueError("fold-count should be equal or more than 1")
if args.preload_embedding:
self.preload_and_validate_embedding(
args.preload_embedding,
use_word_embeddings=True
)
embedding_name = self.preload_and_validate_embedding(
args.embedding,
use_word_embeddings=args.use_word_embeddings and not args.resume_train_model_path
)
LOGGER.info('get_tf_info: %s', get_tf_info())
train_eval(
fold_count=args.fold_count,
embeddings_name=embedding_name,
eval_output_args=get_eval_output_args(args),
**get_eval_input_args(args),
**get_dl_eval_model_args(args),
**self.get_train_args(args)
)
class WapitiTrainEvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_wapiti_train_arguments(parser)
add_eval_input_arguments(parser)
add_eval_output_arguments(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
if not args.model:
raise ValueError("model required")
wapiti_train_eval(
model_name=args.model,
template_path=args.wapiti_template,
input_paths=args.input,
limit=args.limit,
eval_input_paths=args.eval_input,
eval_limit=args.eval_limit,
output_path=args.output,
max_epoch=args.max_epoch,
download_manager=self.download_manager,
gzip_enabled=args.wapiti_gzip,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
),
wapiti_train_args=get_wapiti_train_args(args),
train_notification_manager=get_train_notification_manager(args),
eval_output_args=get_eval_output_args(args)
)
class EvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_model_path_argument(parser, required=True, help='directory to load the model from')
parser.add_argument(
"--use-eval-train-test-split",
action="store_true",
help=" ".join([
"If enabled, split the input when running 'eval'",
"(in the same way it is split for 'train_eval')"
])
)
add_eval_output_arguments(parser)
add_stateful_argument(parser)
add_dl_eval_model_arguments(parser)
def do_run(self, args: argparse.Namespace):
eval_model(
model_path=args.model_path,
split_input=args.use_eval_train_test_split,
eval_output_args=get_eval_output_args(args),
stateful=args.stateful,
**get_dl_eval_model_args(args),
**self.get_common_args(args)
)
class WapitiEvalSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_model_path_argument(parser, required=True, help='directory to load the model from')
add_eval_output_arguments(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
wapiti_eval_model(
model_path=args.model_path,
input_paths=args.input,
limit=args.limit,
eval_output_args=get_eval_output_args(args),
download_manager=self.download_manager,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
)
)
class TagSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser, max_sequence_length_default=None)
add_stateful_argument(parser)
add_input_window_stride_argument(parser)
add_model_path_argument(parser, required=True, help='directory to load the model from')
add_tag_output_format_argument(parser)
add_tag_output_path_argument(parser)
add_tag_transformed_argument(parser)
def do_run(self, args: argparse.Namespace):
tag_input(
model_path=args.model_path,
tag_output_format=args.tag_output_format,
tag_output_path=args.tag_output_path,
tag_transformed=args.tag_transformed,
stateful=args.stateful,
input_window_stride=args.input_window_stride,
**self.get_common_args(args)
)
class WapitiTagSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser, max_sequence_length_default=None)
add_model_path_argument(parser, required=True, help='directory to load the model from')
add_tag_output_format_argument(parser)
add_tag_output_path_argument(parser)
add_wapiti_install_arguments(parser)
def do_run(self, args: argparse.Namespace):
wapiti_tag_input(
model_path=args.model_path,
tag_output_format=args.tag_output_format,
tag_output_path=args.tag_output_path,
input_paths=args.input,
limit=args.limit,
download_manager=self.download_manager,
wapiti_binary_path=install_wapiti_and_get_path_or_none(
args.wapiti_install_source,
download_manager=self.download_manager
)
)
class InputInfoSubCommand(GrobidTrainerSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
def do_run(self, args: argparse.Namespace):
print_input_info(
input_paths=args.input,
limit=args.limit,
download_manager=self.download_manager
)
SUB_COMMANDS = [
TrainSubCommand(
Tasks.TRAIN,
'Train the model using the provided input(s)'
),
TrainEvalSubCommand(
Tasks.TRAIN_EVAL,
'Train and reserve a slice of the input data for evaluation'
),
EvalSubCommand(
Tasks.EVAL,
'Evaluate the already trained model on the provided input(s)'
),
TagSubCommand(
Tasks.TAG,
'Tag inputs and show results. Optionally also show a diff to the expected labels'
),
WapitiTrainSubCommand(
Tasks.WAPITI_TRAIN,
'Train the model using the provided input(s)'
),
WapitiTrainEvalSubCommand(
Tasks.WAPITI_TRAIN_EVAL,
'Train and reserve a slice of the input data for evaluation'
),
WapitiEvalSubCommand(
Tasks.WAPITI_EVAL,
'Evaluate the already trained model on the provided input(s)'
),
WapitiTagSubCommand(
Tasks.WAPITI_TAG,
'Tag inputs and show results. Optionally also show a diff to the expected labels'
),
InputInfoSubCommand(
Tasks.INPUT_INFO,
'Display input summary information relating to the passed in input(s)'
)
]
def get_subcommand_processor():
return SubCommandProcessor(SUB_COMMANDS, command_dest='action')
def parse_args(argv: List[str] = None, subcommand_processor: SubCommandProcessor = None):
parser = create_argument_parser()
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='action')
add_model_positional_argument(parser)
subcommand_processor.add_sub_command_parsers(parser)
args = parser.parse_args(argv)
process_args(args)
return args
def run(args: argparse.Namespace, subcommand_processor: SubCommandProcessor = None):
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='action')
try:
subcommand_processor.run(args)
except BaseException as exc:
LOGGER.error('uncaught exception: %s', exc, exc_info=exc)
raise
def main(argv: List[str] = None):
subcommand_processor = get_subcommand_processor()
args = parse_args(argv, subcommand_processor=subcommand_processor)
if args.quiet:
logging.root.setLevel('ERROR')
elif args.debug:
for name in [__name__, 'sciencebeam_trainer_delft', 'delft']:
logging.getLogger(name).setLevel('DEBUG')
if args.log_file:
with auto_uploading_output_file(args.log_file, mode='w') as log_fp:
try:
with tee_stdout_and_stderr_lines_to(log_fp.write, append_line_feed=True):
with tee_logging_lines_to(log_fp.write, append_line_feed=True):
run(args, subcommand_processor=subcommand_processor)
finally:
logging.shutdown()
else:
run(args, subcommand_processor=subcommand_processor)
def main_setup():
logging.root.handlers = []
logging.basicConfig(level='INFO')
patch_cloud_support()
patch_get_model()
if __name__ == "__main__":
main_setup()
main()
| 0.730386 | 0.105441 |
import logging
import time
from functools import partial
from typing import List, Tuple
import pandas as pd
import delft.textClassification.models
import delft.textClassification.wrapper
from sciencebeam_trainer_delft.text_classification.wrapper import Classifier
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.models.Attention import Attention
from sciencebeam_trainer_delft.text_classification.models import (
get_callbacks,
train_model
)
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver
)
from sciencebeam_trainer_delft.text_classification.evaluation import (
ClassificationResult
)
from sciencebeam_trainer_delft.text_classification.reader import (
load_data_frame,
load_texts_and_classes_pandas,
load_classes_pandas
)
from sciencebeam_trainer_delft.text_classification.config import (
AppConfig,
ModelConfig,
TrainingConfig
)
LOGGER = logging.getLogger(__name__)
def get_downloaded_input_paths(
input_paths: List[str],
download_manager: DownloadManager) -> List[str]:
return [
download_manager.download_if_url(input_path)
for input_path in input_paths
]
def load_input_data_frame(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> pd.DataFrame:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
df = load_data_frame(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(df))
return df
def load_input_data(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> Tuple[List[str], List[List[str]], List[str]]:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
xtr, y, y_names = load_texts_and_classes_pandas(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(xtr))
return xtr, y, y_names
def load_label_data(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> Tuple[List[List[str]], List[str]]:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
y, y_names = load_classes_pandas(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(y))
return y, y_names
def _patch_delft():
delft.textClassification.models.Attention = Attention
delft.textClassification.wrapper.train_model = train_model
def train(
app_config: AppConfig,
model_config: ModelConfig,
training_config: TrainingConfig,
train_input_texts: List[str],
train_input_labels: List[List[str]],
model_path: str):
_patch_delft()
model = Classifier(
embeddings_name=model_config.embeddings_name,
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.embeddings_name = model_config.embeddings_name
model.model_config = model_config
model.model_config.word_embedding_size = model.embeddings.embed_size
model.training_config = training_config
model_saver = ModelSaver(model_config)
callbacks = get_callbacks(
model_saver=model_saver,
log_dir=training_config.log_dir
)
delft.textClassification.wrapper.train_model = partial(
train_model,
callbacks=callbacks
)
model.train(train_input_texts, train_input_labels)
LOGGER.info('saving model to: %s', model_path)
model.save_to(model_path)
def predict(
app_config: AppConfig,
eval_input_texts: List[str],
model_path: str):
model = Classifier(
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.load_from(model_path)
LOGGER.info('number of texts to classify: %s', len(eval_input_texts))
start_time = time.time()
result = model.predict(eval_input_texts, output_format="csv")
LOGGER.info("runtime: %s seconds", round(time.time() - start_time, 3))
return {
'labels': model.model_config.list_classes,
'prediction': result
}
def evaluate(
app_config: AppConfig,
eval_input_texts: List[str],
eval_input_labels: List[List[str]],
model_path: str):
model = Classifier(
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.load_from(model_path)
LOGGER.info('number of texts to classify: %s', len(eval_input_texts))
start_time = time.time()
result = model.predict(eval_input_texts, output_format="csv")
LOGGER.info("runtime: %s seconds", round(time.time() - start_time, 3))
return ClassificationResult(
y_true=eval_input_labels,
y_pred=result,
label_names=model.model_config.list_classes
)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/cli_utils.py
|
cli_utils.py
|
import logging
import time
from functools import partial
from typing import List, Tuple
import pandas as pd
import delft.textClassification.models
import delft.textClassification.wrapper
from sciencebeam_trainer_delft.text_classification.wrapper import Classifier
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.utils.models.Attention import Attention
from sciencebeam_trainer_delft.text_classification.models import (
get_callbacks,
train_model
)
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver
)
from sciencebeam_trainer_delft.text_classification.evaluation import (
ClassificationResult
)
from sciencebeam_trainer_delft.text_classification.reader import (
load_data_frame,
load_texts_and_classes_pandas,
load_classes_pandas
)
from sciencebeam_trainer_delft.text_classification.config import (
AppConfig,
ModelConfig,
TrainingConfig
)
LOGGER = logging.getLogger(__name__)
def get_downloaded_input_paths(
input_paths: List[str],
download_manager: DownloadManager) -> List[str]:
return [
download_manager.download_if_url(input_path)
for input_path in input_paths
]
def load_input_data_frame(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> pd.DataFrame:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
df = load_data_frame(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(df))
return df
def load_input_data(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> Tuple[List[str], List[List[str]], List[str]]:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
xtr, y, y_names = load_texts_and_classes_pandas(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(xtr))
return xtr, y, y_names
def load_label_data(
input_paths: List[str],
download_manager: DownloadManager,
limit: int = None) -> Tuple[List[List[str]], List[str]]:
assert len(input_paths) == 1
LOGGER.info('loading data: %s', input_paths)
downloaded_input_paths = get_downloaded_input_paths(
input_paths,
download_manager=download_manager
)
y, y_names = load_classes_pandas(
downloaded_input_paths[0],
limit=limit
)
LOGGER.info('loaded data: %d rows', len(y))
return y, y_names
def _patch_delft():
delft.textClassification.models.Attention = Attention
delft.textClassification.wrapper.train_model = train_model
def train(
app_config: AppConfig,
model_config: ModelConfig,
training_config: TrainingConfig,
train_input_texts: List[str],
train_input_labels: List[List[str]],
model_path: str):
_patch_delft()
model = Classifier(
embeddings_name=model_config.embeddings_name,
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.embeddings_name = model_config.embeddings_name
model.model_config = model_config
model.model_config.word_embedding_size = model.embeddings.embed_size
model.training_config = training_config
model_saver = ModelSaver(model_config)
callbacks = get_callbacks(
model_saver=model_saver,
log_dir=training_config.log_dir
)
delft.textClassification.wrapper.train_model = partial(
train_model,
callbacks=callbacks
)
model.train(train_input_texts, train_input_labels)
LOGGER.info('saving model to: %s', model_path)
model.save_to(model_path)
def predict(
app_config: AppConfig,
eval_input_texts: List[str],
model_path: str):
model = Classifier(
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.load_from(model_path)
LOGGER.info('number of texts to classify: %s', len(eval_input_texts))
start_time = time.time()
result = model.predict(eval_input_texts, output_format="csv")
LOGGER.info("runtime: %s seconds", round(time.time() - start_time, 3))
return {
'labels': model.model_config.list_classes,
'prediction': result
}
def evaluate(
app_config: AppConfig,
eval_input_texts: List[str],
eval_input_labels: List[List[str]],
model_path: str):
model = Classifier(
download_manager=app_config.download_manager,
embedding_manager=app_config.embedding_manager
)
model.load_from(model_path)
LOGGER.info('number of texts to classify: %s', len(eval_input_texts))
start_time = time.time()
result = model.predict(eval_input_texts, output_format="csv")
LOGGER.info("runtime: %s seconds", round(time.time() - start_time, 3))
return ClassificationResult(
y_true=eval_input_labels,
y_pred=result,
label_names=model.model_config.list_classes
)
| 0.703549 | 0.267387 |
import argparse
import logging
import json
from abc import abstractmethod
from typing import List, Optional
import sciencebeam_trainer_delft.utils.no_warn_if_disabled # noqa, pylint: disable=unused-import
import sciencebeam_trainer_delft.utils.no_keras_backend_message # noqa, pylint: disable=unused-import
# pylint: disable=wrong-import-order, ungrouped-imports
import keras.backend as K
import pandas as pd
from sciencebeam_trainer_delft.utils.cli import (
SubCommand,
SubCommandProcessor
)
from sciencebeam_trainer_delft.utils.io import (
auto_uploading_output_file
)
from sciencebeam_trainer_delft.utils.logging import (
tee_stdout_and_stderr_lines_to,
tee_logging_lines_to
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.embedding import EmbeddingManager
from sciencebeam_trainer_delft.text_classification.config import (
AppConfig,
ModelConfig,
TrainingConfig
)
from sciencebeam_trainer_delft.text_classification.reader import (
save_data_frame,
get_texts_and_classes_from_data_frame
)
from sciencebeam_trainer_delft.text_classification.cli_utils import (
load_input_data_frame,
load_input_data,
load_label_data,
train,
predict,
evaluate
)
LOGGER = logging.getLogger(__name__)
DEFAULT_MODEL_PATH = 'data/models/textClassification/toxic'
DEFAULT_EMBEDDNGS_NAME = 'glove.6B.50d'
def add_common_arguments(
parser: argparse.ArgumentParser):
parser.add_argument("--quiet", action="store_true", help="Only log errors")
parser.add_argument(
"--model-path",
required=True
)
parser.add_argument(
"--embeddings",
default=DEFAULT_EMBEDDNGS_NAME
)
parser.add_argument(
"--embedding",
dest="embeddings",
help="Alias for --embeddings"
)
parser.add_argument(
"--preload-embedding",
help=" ".join([
"Name or URL to embedding to preload.",
"This can be useful in combination with resuming model training."
])
)
parser.add_argument(
"--no-use-lmdb", action="store_true",
help="Do not use LMDB embedding cache (load embeddings into memory instead)"
)
parser.add_argument(
"--log-file",
help=(
"If set, saves the output to the specified log file."
" This may also be a file in a bucket, in which case it will be uploaded at the end."
" Add the .gz extension if you wish to compress the file."
)
)
parser.add_argument(
"--job-dir",
help="job dir (only used when running via ai platform)"
)
def add_train_arguments(
parser: argparse.ArgumentParser):
train_group = parser.add_argument_group('train')
train_group.add_argument(
"--train-input",
nargs='+',
default=[],
action='append',
required=True,
help="provided training file"
)
train_group.add_argument(
"--train-input-limit",
type=int,
help=(
"limit the number of training samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
train_group.add_argument(
"--architecture",
default='bidLstm',
help="The desired architecture"
)
train_group.add_argument(
"--max-epoch",
type=int,
default=100,
help="max epoch to train to"
)
train_group.add_argument(
"--batch-size",
type=int,
default=256,
help="batch size"
)
train_group.add_argument(
"--checkpoint",
help="directory where to save a checkpoint model"
)
def add_predict_arguments(
parser: argparse.ArgumentParser):
predict_group = parser.add_argument_group('predict')
predict_group.add_argument(
"--predict-input",
nargs='+',
required=True,
action='append',
help="provided predict file"
)
predict_group.add_argument(
"--predict-input-limit",
type=int,
help=(
"limit the number of predict samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
predict_group.add_argument(
"--predict-output",
help="save output as csv / tsv to"
)
def add_eval_arguments(
parser: argparse.ArgumentParser):
eval_group = parser.add_argument_group('eval')
eval_group.add_argument(
"--eval-input",
nargs='+',
required=True,
action='append',
help="provided evaluation file"
)
eval_group.add_argument(
"--eval-label-input",
nargs='+',
required=False,
action='append',
help="provided separate evaluation label file"
)
eval_group.add_argument(
"--eval-input-limit",
type=int,
help=(
"limit the number of evaluation samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
def _flatten_input_paths(input_paths_list: List[List[str]]) -> List[str]:
if not input_paths_list:
return []
return [input_path for input_paths in input_paths_list for input_path in input_paths]
class SubCommandNames:
TRAIN = 'train'
EVAL = 'eval'
TRAIN_EVAL = 'train_eval'
PREDICT = 'predict'
class BaseSubCommand(SubCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.download_manager = None
self.embedding_manager = None
self.app_config = None
@abstractmethod
def do_run(self, args: argparse.Namespace):
pass
def preload_and_validate_embedding(
self,
embedding_name: str,
use_word_embeddings: bool = True) -> Optional[str]:
if not use_word_embeddings:
return None
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
self.embedding_manager.validate_embedding(embedding_name)
return embedding_name
def run(self, args: argparse.Namespace):
self.download_manager = DownloadManager()
self.embedding_manager = EmbeddingManager(
download_manager=self.download_manager
)
self.app_config = AppConfig(
download_manager=self.download_manager,
embedding_manager=self.embedding_manager
)
if args.no_use_lmdb:
self.embedding_manager.disable_embedding_lmdb_cache()
if args.preload_embedding:
self.preload_and_validate_embedding(
args.preload_embedding,
use_word_embeddings=True
)
self.do_run(args)
# see https://github.com/tensorflow/tensorflow/issues/3388
K.clear_session()
class TrainSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('train')
download_manager = DownloadManager()
train_input_paths = _flatten_input_paths(args.train_input)
train_input_texts, train_input_labels, list_classes = load_input_data(
train_input_paths,
download_manager=download_manager,
limit=args.train_input_limit
)
LOGGER.info('list_classes: %s', list_classes)
embedding_name = self.preload_and_validate_embedding(
args.embeddings,
use_word_embeddings=True
)
train(
app_config=self.app_config,
model_config=ModelConfig(
embeddings_name=embedding_name,
model_type=args.architecture,
list_classes=list_classes
),
training_config=TrainingConfig(
batch_size=args.batch_size,
max_epoch=args.max_epoch,
log_dir=args.checkpoint
),
train_input_texts=train_input_texts,
train_input_labels=train_input_labels,
model_path=args.model_path
)
class EvalSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_eval_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('eval')
download_manager = DownloadManager()
eval_input_paths = _flatten_input_paths(args.eval_input)
eval_label_input_paths = _flatten_input_paths(args.eval_label_input)
eval_input_texts, eval_input_labels, list_classes = load_input_data(
eval_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
if eval_label_input_paths:
eval_input_labels, _ = load_label_data(
eval_label_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
LOGGER.info('list_classes: %s', list_classes)
result = evaluate(
app_config=self.app_config,
eval_input_texts=eval_input_texts,
eval_input_labels=eval_input_labels,
model_path=args.model_path
)
print(result.text_formatted_report)
class TrainEvalSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
add_eval_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('train eval')
download_manager = DownloadManager()
train_input_paths = _flatten_input_paths(args.train_input)
train_input_texts, train_input_labels, list_classes = load_input_data(
train_input_paths,
download_manager=download_manager,
limit=args.train_input_limit
)
eval_input_paths = _flatten_input_paths(args.eval_input)
eval_label_input_paths = _flatten_input_paths(args.eval_label_input)
eval_input_texts, eval_input_labels, _ = load_input_data(
eval_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
if eval_label_input_paths:
eval_input_labels, _ = load_label_data(
eval_label_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
LOGGER.info('list_classes: %s', list_classes)
embedding_name = self.preload_and_validate_embedding(
args.embeddings,
use_word_embeddings=True
)
train(
app_config=self.app_config,
model_config=ModelConfig(
embeddings_name=embedding_name,
model_type=args.architecture,
list_classes=list_classes
),
training_config=TrainingConfig(
batch_size=args.batch_size,
max_epoch=args.max_epoch,
log_dir=args.checkpoint
),
train_input_texts=train_input_texts,
train_input_labels=train_input_labels,
model_path=args.model_path
)
result = evaluate(
app_config=self.app_config,
eval_input_texts=eval_input_texts,
eval_input_labels=eval_input_labels,
model_path=args.model_path
)
print(result.text_formatted_report)
class PredictSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_predict_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('train')
download_manager = DownloadManager()
predict_input_paths = _flatten_input_paths(args.predict_input)
predict_df = load_input_data_frame(
predict_input_paths,
download_manager=download_manager,
limit=args.predict_input_limit
)
predict_input_texts, _, _ = get_texts_and_classes_from_data_frame(
predict_df
)
result = predict(
app_config=self.app_config,
eval_input_texts=predict_input_texts,
model_path=args.model_path
)
list_classes = result['labels']
prediction = result['prediction']
LOGGER.info('list_classes: %s', list_classes)
result_df = pd.concat([
predict_df[predict_df.columns[:2]],
pd.DataFrame(
prediction,
columns=list_classes,
index=predict_df.index
)
], axis=1)
if args.predict_output:
LOGGER.info('writing output to: %s', args.predict_output)
save_data_frame(result_df, args.predict_output)
else:
print(json.dumps(
result_df.to_dict(orient='records'),
indent=2
))
SUB_COMMANDS = [
TrainSubCommand(
SubCommandNames.TRAIN,
'Train the model using the provided input(s)'
),
EvalSubCommand(
SubCommandNames.EVAL,
'Evaluate the model using the provided input(s)'
),
TrainEvalSubCommand(
SubCommandNames.TRAIN_EVAL,
'Train and then evaluate the model using the provided input(s)'
),
PredictSubCommand(
SubCommandNames.PREDICT,
'Predict the model using the provided input(s)'
),
]
def create_parser() -> argparse.ArgumentParser:
return argparse.ArgumentParser(
description="Trainer for GROBID models"
)
def get_subcommand_processor():
return SubCommandProcessor(SUB_COMMANDS, command_dest='command')
def parse_args(argv: List[str] = None, subcommand_processor: SubCommandProcessor = None):
parser = create_parser()
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='command')
subcommand_processor.add_sub_command_parsers(parser)
args = parser.parse_args(argv)
return args
def run(args: argparse.Namespace, subcommand_processor: SubCommandProcessor = None):
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='action')
try:
subcommand_processor.run(args)
except BaseException as exc:
LOGGER.error('uncaught exception: %s', exc, exc_info=exc)
raise
def main(argv: List[str] = None):
subcommand_processor = get_subcommand_processor()
args = parse_args(argv, subcommand_processor=subcommand_processor)
if args.quiet:
logging.root.setLevel('ERROR')
elif args.debug:
for name in [__name__, 'sciencebeam_trainer_delft', 'delft']:
logging.getLogger(name).setLevel('DEBUG')
if args.log_file:
with auto_uploading_output_file(args.log_file, mode='w') as log_fp:
try:
with tee_stdout_and_stderr_lines_to(log_fp.write, append_line_feed=True):
with tee_logging_lines_to(log_fp.write, append_line_feed=True):
run(args, subcommand_processor=subcommand_processor)
finally:
logging.shutdown()
else:
run(args, subcommand_processor=subcommand_processor)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/__main__.py
|
__main__.py
|
import argparse
import logging
import json
from abc import abstractmethod
from typing import List, Optional
import sciencebeam_trainer_delft.utils.no_warn_if_disabled # noqa, pylint: disable=unused-import
import sciencebeam_trainer_delft.utils.no_keras_backend_message # noqa, pylint: disable=unused-import
# pylint: disable=wrong-import-order, ungrouped-imports
import keras.backend as K
import pandas as pd
from sciencebeam_trainer_delft.utils.cli import (
SubCommand,
SubCommandProcessor
)
from sciencebeam_trainer_delft.utils.io import (
auto_uploading_output_file
)
from sciencebeam_trainer_delft.utils.logging import (
tee_stdout_and_stderr_lines_to,
tee_logging_lines_to
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.embedding import EmbeddingManager
from sciencebeam_trainer_delft.text_classification.config import (
AppConfig,
ModelConfig,
TrainingConfig
)
from sciencebeam_trainer_delft.text_classification.reader import (
save_data_frame,
get_texts_and_classes_from_data_frame
)
from sciencebeam_trainer_delft.text_classification.cli_utils import (
load_input_data_frame,
load_input_data,
load_label_data,
train,
predict,
evaluate
)
LOGGER = logging.getLogger(__name__)
DEFAULT_MODEL_PATH = 'data/models/textClassification/toxic'
DEFAULT_EMBEDDNGS_NAME = 'glove.6B.50d'
def add_common_arguments(
parser: argparse.ArgumentParser):
parser.add_argument("--quiet", action="store_true", help="Only log errors")
parser.add_argument(
"--model-path",
required=True
)
parser.add_argument(
"--embeddings",
default=DEFAULT_EMBEDDNGS_NAME
)
parser.add_argument(
"--embedding",
dest="embeddings",
help="Alias for --embeddings"
)
parser.add_argument(
"--preload-embedding",
help=" ".join([
"Name or URL to embedding to preload.",
"This can be useful in combination with resuming model training."
])
)
parser.add_argument(
"--no-use-lmdb", action="store_true",
help="Do not use LMDB embedding cache (load embeddings into memory instead)"
)
parser.add_argument(
"--log-file",
help=(
"If set, saves the output to the specified log file."
" This may also be a file in a bucket, in which case it will be uploaded at the end."
" Add the .gz extension if you wish to compress the file."
)
)
parser.add_argument(
"--job-dir",
help="job dir (only used when running via ai platform)"
)
def add_train_arguments(
parser: argparse.ArgumentParser):
train_group = parser.add_argument_group('train')
train_group.add_argument(
"--train-input",
nargs='+',
default=[],
action='append',
required=True,
help="provided training file"
)
train_group.add_argument(
"--train-input-limit",
type=int,
help=(
"limit the number of training samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
train_group.add_argument(
"--architecture",
default='bidLstm',
help="The desired architecture"
)
train_group.add_argument(
"--max-epoch",
type=int,
default=100,
help="max epoch to train to"
)
train_group.add_argument(
"--batch-size",
type=int,
default=256,
help="batch size"
)
train_group.add_argument(
"--checkpoint",
help="directory where to save a checkpoint model"
)
def add_predict_arguments(
parser: argparse.ArgumentParser):
predict_group = parser.add_argument_group('predict')
predict_group.add_argument(
"--predict-input",
nargs='+',
required=True,
action='append',
help="provided predict file"
)
predict_group.add_argument(
"--predict-input-limit",
type=int,
help=(
"limit the number of predict samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
predict_group.add_argument(
"--predict-output",
help="save output as csv / tsv to"
)
def add_eval_arguments(
parser: argparse.ArgumentParser):
eval_group = parser.add_argument_group('eval')
eval_group.add_argument(
"--eval-input",
nargs='+',
required=True,
action='append',
help="provided evaluation file"
)
eval_group.add_argument(
"--eval-label-input",
nargs='+',
required=False,
action='append',
help="provided separate evaluation label file"
)
eval_group.add_argument(
"--eval-input-limit",
type=int,
help=(
"limit the number of evaluation samples."
" With more than one input file, the limit will be applied to"
" each of the input files individually"
)
)
def _flatten_input_paths(input_paths_list: List[List[str]]) -> List[str]:
if not input_paths_list:
return []
return [input_path for input_paths in input_paths_list for input_path in input_paths]
class SubCommandNames:
TRAIN = 'train'
EVAL = 'eval'
TRAIN_EVAL = 'train_eval'
PREDICT = 'predict'
class BaseSubCommand(SubCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.download_manager = None
self.embedding_manager = None
self.app_config = None
@abstractmethod
def do_run(self, args: argparse.Namespace):
pass
def preload_and_validate_embedding(
self,
embedding_name: str,
use_word_embeddings: bool = True) -> Optional[str]:
if not use_word_embeddings:
return None
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
self.embedding_manager.validate_embedding(embedding_name)
return embedding_name
def run(self, args: argparse.Namespace):
self.download_manager = DownloadManager()
self.embedding_manager = EmbeddingManager(
download_manager=self.download_manager
)
self.app_config = AppConfig(
download_manager=self.download_manager,
embedding_manager=self.embedding_manager
)
if args.no_use_lmdb:
self.embedding_manager.disable_embedding_lmdb_cache()
if args.preload_embedding:
self.preload_and_validate_embedding(
args.preload_embedding,
use_word_embeddings=True
)
self.do_run(args)
# see https://github.com/tensorflow/tensorflow/issues/3388
K.clear_session()
class TrainSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('train')
download_manager = DownloadManager()
train_input_paths = _flatten_input_paths(args.train_input)
train_input_texts, train_input_labels, list_classes = load_input_data(
train_input_paths,
download_manager=download_manager,
limit=args.train_input_limit
)
LOGGER.info('list_classes: %s', list_classes)
embedding_name = self.preload_and_validate_embedding(
args.embeddings,
use_word_embeddings=True
)
train(
app_config=self.app_config,
model_config=ModelConfig(
embeddings_name=embedding_name,
model_type=args.architecture,
list_classes=list_classes
),
training_config=TrainingConfig(
batch_size=args.batch_size,
max_epoch=args.max_epoch,
log_dir=args.checkpoint
),
train_input_texts=train_input_texts,
train_input_labels=train_input_labels,
model_path=args.model_path
)
class EvalSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_eval_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('eval')
download_manager = DownloadManager()
eval_input_paths = _flatten_input_paths(args.eval_input)
eval_label_input_paths = _flatten_input_paths(args.eval_label_input)
eval_input_texts, eval_input_labels, list_classes = load_input_data(
eval_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
if eval_label_input_paths:
eval_input_labels, _ = load_label_data(
eval_label_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
LOGGER.info('list_classes: %s', list_classes)
result = evaluate(
app_config=self.app_config,
eval_input_texts=eval_input_texts,
eval_input_labels=eval_input_labels,
model_path=args.model_path
)
print(result.text_formatted_report)
class TrainEvalSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_train_arguments(parser)
add_eval_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('train eval')
download_manager = DownloadManager()
train_input_paths = _flatten_input_paths(args.train_input)
train_input_texts, train_input_labels, list_classes = load_input_data(
train_input_paths,
download_manager=download_manager,
limit=args.train_input_limit
)
eval_input_paths = _flatten_input_paths(args.eval_input)
eval_label_input_paths = _flatten_input_paths(args.eval_label_input)
eval_input_texts, eval_input_labels, _ = load_input_data(
eval_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
if eval_label_input_paths:
eval_input_labels, _ = load_label_data(
eval_label_input_paths,
download_manager=download_manager,
limit=args.eval_input_limit
)
LOGGER.info('list_classes: %s', list_classes)
embedding_name = self.preload_and_validate_embedding(
args.embeddings,
use_word_embeddings=True
)
train(
app_config=self.app_config,
model_config=ModelConfig(
embeddings_name=embedding_name,
model_type=args.architecture,
list_classes=list_classes
),
training_config=TrainingConfig(
batch_size=args.batch_size,
max_epoch=args.max_epoch,
log_dir=args.checkpoint
),
train_input_texts=train_input_texts,
train_input_labels=train_input_labels,
model_path=args.model_path
)
result = evaluate(
app_config=self.app_config,
eval_input_texts=eval_input_texts,
eval_input_labels=eval_input_labels,
model_path=args.model_path
)
print(result.text_formatted_report)
class PredictSubCommand(BaseSubCommand):
def add_arguments(self, parser: argparse.ArgumentParser):
add_common_arguments(parser)
add_predict_arguments(parser)
def do_run(self, args: argparse.Namespace):
LOGGER.info('train')
download_manager = DownloadManager()
predict_input_paths = _flatten_input_paths(args.predict_input)
predict_df = load_input_data_frame(
predict_input_paths,
download_manager=download_manager,
limit=args.predict_input_limit
)
predict_input_texts, _, _ = get_texts_and_classes_from_data_frame(
predict_df
)
result = predict(
app_config=self.app_config,
eval_input_texts=predict_input_texts,
model_path=args.model_path
)
list_classes = result['labels']
prediction = result['prediction']
LOGGER.info('list_classes: %s', list_classes)
result_df = pd.concat([
predict_df[predict_df.columns[:2]],
pd.DataFrame(
prediction,
columns=list_classes,
index=predict_df.index
)
], axis=1)
if args.predict_output:
LOGGER.info('writing output to: %s', args.predict_output)
save_data_frame(result_df, args.predict_output)
else:
print(json.dumps(
result_df.to_dict(orient='records'),
indent=2
))
SUB_COMMANDS = [
TrainSubCommand(
SubCommandNames.TRAIN,
'Train the model using the provided input(s)'
),
EvalSubCommand(
SubCommandNames.EVAL,
'Evaluate the model using the provided input(s)'
),
TrainEvalSubCommand(
SubCommandNames.TRAIN_EVAL,
'Train and then evaluate the model using the provided input(s)'
),
PredictSubCommand(
SubCommandNames.PREDICT,
'Predict the model using the provided input(s)'
),
]
def create_parser() -> argparse.ArgumentParser:
return argparse.ArgumentParser(
description="Trainer for GROBID models"
)
def get_subcommand_processor():
return SubCommandProcessor(SUB_COMMANDS, command_dest='command')
def parse_args(argv: List[str] = None, subcommand_processor: SubCommandProcessor = None):
parser = create_parser()
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='command')
subcommand_processor.add_sub_command_parsers(parser)
args = parser.parse_args(argv)
return args
def run(args: argparse.Namespace, subcommand_processor: SubCommandProcessor = None):
if subcommand_processor is None:
subcommand_processor = SubCommandProcessor(SUB_COMMANDS, command_dest='action')
try:
subcommand_processor.run(args)
except BaseException as exc:
LOGGER.error('uncaught exception: %s', exc, exc_info=exc)
raise
def main(argv: List[str] = None):
subcommand_processor = get_subcommand_processor()
args = parse_args(argv, subcommand_processor=subcommand_processor)
if args.quiet:
logging.root.setLevel('ERROR')
elif args.debug:
for name in [__name__, 'sciencebeam_trainer_delft', 'delft']:
logging.getLogger(name).setLevel('DEBUG')
if args.log_file:
with auto_uploading_output_file(args.log_file, mode='w') as log_fp:
try:
with tee_stdout_and_stderr_lines_to(log_fp.write, append_line_feed=True):
with tee_logging_lines_to(log_fp.write, append_line_feed=True):
run(args, subcommand_processor=subcommand_processor)
finally:
logging.shutdown()
else:
run(args, subcommand_processor=subcommand_processor)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
| 0.818047 | 0.097476 |
import logging
import os
from delft.textClassification.models import getModel
from delft.textClassification.wrapper import (
Classifier as _Classifier
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.embedding.embedding import Embeddings
from sciencebeam_trainer_delft.embedding.manager import EmbeddingManager
from sciencebeam_trainer_delft.text_classification.config import ModelConfig
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver,
ModelLoader
)
LOGGER = logging.getLogger(__name__)
DEFAULT_EMBEDDINGS_PATH = './embedding-registry.json'
class Classifier(_Classifier):
def __init__(
self,
download_manager: DownloadManager = None,
embedding_registry_path: str = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
self.embedding_registry_path = embedding_registry_path or DEFAULT_EMBEDDINGS_PATH
if download_manager is None:
download_manager = DownloadManager()
if embedding_manager is None:
embedding_manager = EmbeddingManager(
path=self.embedding_registry_path,
download_manager=download_manager
)
self.download_manager = download_manager
self.embedding_manager = embedding_manager
super().__init__(**kwargs)
def save_to(self, model_path: str):
# create subfolder for the model if not already exists
saver = ModelSaver(model_config=self.model_config)
saver.save_model_config(
self.model_config,
os.path.join(model_path, self.config_file)
)
if self.model_config.fold_number == 1:
if self.model is not None:
saver.save_model_weights(
self.model,
os.path.join(
model_path,
self.model_config.model_type + "." + self.weight_file
)
)
else:
LOGGER.error('Model has not been built')
else:
if self.models is None:
LOGGER.error('nfolds models have not been built')
else:
for i in range(0, self.model_config.fold_number):
saver.save_model_weights(
self.models[i],
os.path.join(
model_path,
self.model_config.model_type + ".model{0}_weights.hdf5".format(i)
)
)
LOGGER.info('nfolds model saved')
def get_embedding_for_model_config(self, model_config: ModelConfig):
embedding_name = model_config.embeddings_name
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
embeddings = Embeddings(
embedding_name,
path=self.embedding_registry_path,
use_ELMo=model_config.use_ELMo,
use_BERT=model_config.use_BERT
)
if not embeddings.embed_size > 0:
raise AssertionError(
'invalid embedding size, embeddings not loaded? %s' % embedding_name
)
return embeddings
def load_from(self, model_path: str):
loader = ModelLoader(download_manager=self.download_manager)
self.model_config = loader.load_model_config_from_file(
os.path.join(model_path, self.config_file)
)
# load embeddings
self.embeddings = self.get_embedding_for_model_config(self.model_config)
self.model_config.word_embedding_size = self.embeddings.embed_size
self.model = getModel(self.model_config, self.training_config)
if self.model_config.fold_number == 1:
loader.load_model_weights_from_file(
os.path.join(
model_path,
self.model_config.model_type + "." + self.weight_file
),
self.model
)
else:
self.models = []
for i in range(0, self.model_config.fold_number):
local_model = getModel(self.model_config, self.training_config)
loader.load_model_weights_from_file(
os.path.join(
model_path,
self.model_config.model_type + ".model{0}_weights.hdf5".format(i)
),
local_model
)
self.models.append(local_model)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/wrapper.py
|
wrapper.py
|
import logging
import os
from delft.textClassification.models import getModel
from delft.textClassification.wrapper import (
Classifier as _Classifier
)
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.embedding.embedding import Embeddings
from sciencebeam_trainer_delft.embedding.manager import EmbeddingManager
from sciencebeam_trainer_delft.text_classification.config import ModelConfig
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver,
ModelLoader
)
LOGGER = logging.getLogger(__name__)
DEFAULT_EMBEDDINGS_PATH = './embedding-registry.json'
class Classifier(_Classifier):
def __init__(
self,
download_manager: DownloadManager = None,
embedding_registry_path: str = None,
embedding_manager: EmbeddingManager = None,
**kwargs):
self.embedding_registry_path = embedding_registry_path or DEFAULT_EMBEDDINGS_PATH
if download_manager is None:
download_manager = DownloadManager()
if embedding_manager is None:
embedding_manager = EmbeddingManager(
path=self.embedding_registry_path,
download_manager=download_manager
)
self.download_manager = download_manager
self.embedding_manager = embedding_manager
super().__init__(**kwargs)
def save_to(self, model_path: str):
# create subfolder for the model if not already exists
saver = ModelSaver(model_config=self.model_config)
saver.save_model_config(
self.model_config,
os.path.join(model_path, self.config_file)
)
if self.model_config.fold_number == 1:
if self.model is not None:
saver.save_model_weights(
self.model,
os.path.join(
model_path,
self.model_config.model_type + "." + self.weight_file
)
)
else:
LOGGER.error('Model has not been built')
else:
if self.models is None:
LOGGER.error('nfolds models have not been built')
else:
for i in range(0, self.model_config.fold_number):
saver.save_model_weights(
self.models[i],
os.path.join(
model_path,
self.model_config.model_type + ".model{0}_weights.hdf5".format(i)
)
)
LOGGER.info('nfolds model saved')
def get_embedding_for_model_config(self, model_config: ModelConfig):
embedding_name = model_config.embeddings_name
embedding_name = self.embedding_manager.ensure_available(embedding_name)
LOGGER.info('embedding_name: %s', embedding_name)
embeddings = Embeddings(
embedding_name,
path=self.embedding_registry_path,
use_ELMo=model_config.use_ELMo,
use_BERT=model_config.use_BERT
)
if not embeddings.embed_size > 0:
raise AssertionError(
'invalid embedding size, embeddings not loaded? %s' % embedding_name
)
return embeddings
def load_from(self, model_path: str):
loader = ModelLoader(download_manager=self.download_manager)
self.model_config = loader.load_model_config_from_file(
os.path.join(model_path, self.config_file)
)
# load embeddings
self.embeddings = self.get_embedding_for_model_config(self.model_config)
self.model_config.word_embedding_size = self.embeddings.embed_size
self.model = getModel(self.model_config, self.training_config)
if self.model_config.fold_number == 1:
loader.load_model_weights_from_file(
os.path.join(
model_path,
self.model_config.model_type + "." + self.weight_file
),
self.model
)
else:
self.models = []
for i in range(0, self.model_config.fold_number):
local_model = getModel(self.model_config, self.training_config)
loader.load_model_weights_from_file(
os.path.join(
model_path,
self.model_config.model_type + ".model{0}_weights.hdf5".format(i)
),
local_model
)
self.models.append(local_model)
| 0.577138 | 0.076822 |
import logging
from datetime import datetime
from abc import ABC
import json
import os
from keras.models import Model
from sciencebeam_trainer_delft.utils.cloud_support import auto_upload_from_local_file
from sciencebeam_trainer_delft.utils.io import open_file, write_text
from sciencebeam_trainer_delft.text_classification.config import ModelConfig
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
LOGGER = logging.getLogger(__name__)
class _BaseModelSaverLoader(ABC):
config_file = 'config.json'
weight_file = 'model_weights.hdf5'
meta_file = 'meta.json'
def get_model_weights_filename(self, model_path: str, model_config: ModelConfig):
return os.path.join(
model_path,
model_config.model_type + "." + self.weight_file
)
class ModelSaver(_BaseModelSaverLoader):
def __init__(self, model_config: ModelConfig):
self.model_config = model_config
def save_model_config(self, model_config: ModelConfig, filepath: str):
LOGGER.debug('model_config: %s', model_config)
with open_file(filepath, 'w') as fp:
model_config.save_fp(fp)
LOGGER.info('model config file saved to %s', filepath)
def save_model_weights(self, model: Model, filepath: str):
with auto_upload_from_local_file(filepath) as local_filepath:
model.save(local_filepath)
LOGGER.info('model saved to %s', filepath)
def save_meta(self, meta: dict, filepath: str):
write_text(
filepath,
json.dumps(meta, sort_keys=False, indent=4)
)
LOGGER.info('model meta saved to %s', filepath)
def update_checkpoints_meta_file(self, filepath: str, checkpoint_directory: str, epoch: int):
try:
with open_file(filepath, 'r') as fp:
meta = json.load(fp)
except FileNotFoundError:
meta = {}
checkpoint_meta = {
'epoch': (1 + epoch),
'path': checkpoint_directory,
'timestamp': datetime.utcnow().isoformat()
}
meta['checkpoints'] = meta.get('checkpoints', [])
meta['checkpoints'].append(checkpoint_meta)
meta['last_checkpoint'] = checkpoint_meta
with open_file(filepath, 'w') as fp:
json.dump(meta, fp, sort_keys=False, indent=4)
LOGGER.info('updated checkpoints meta: %s', filepath)
def save_to(self, directory: str, model: Model, meta: dict = None):
self.save_model_config(self.model_config, os.path.join(directory, self.config_file))
self.save_model_weights(
model,
self.get_model_weights_filename(directory, model_config=self.model_config)
)
if meta:
self.save_meta(meta, os.path.join(directory, self.meta_file))
def add_checkpoint_meta(self, checkpoint_directory: str, epoch: int):
self.update_checkpoints_meta_file(
os.path.join(os.path.dirname(checkpoint_directory), 'checkpoints.json'),
checkpoint_directory=checkpoint_directory,
epoch=epoch
)
class ModelLoader(_BaseModelSaverLoader):
def __init__(
self,
download_manager: DownloadManager = None):
if download_manager is None:
download_manager = DownloadManager()
self.download_manager = download_manager
def load_model_config_from_file(self, filepath: str):
LOGGER.info('loading model config from %s', filepath)
with open_file(filepath, 'r') as fp:
return ModelConfig.load_fp(fp)
def load_model_weights_from_file(self, filepath: str, model: Model):
LOGGER.info('loading model from %s', filepath)
# we need a seekable file, ensure we download the file first
local_filepath = self.download_manager.download_if_url(filepath)
# using load_weights to avoid print statement in load method
model.load_weights(local_filepath)
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/saving.py
|
saving.py
|
import logging
from datetime import datetime
from abc import ABC
import json
import os
from keras.models import Model
from sciencebeam_trainer_delft.utils.cloud_support import auto_upload_from_local_file
from sciencebeam_trainer_delft.utils.io import open_file, write_text
from sciencebeam_trainer_delft.text_classification.config import ModelConfig
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
LOGGER = logging.getLogger(__name__)
class _BaseModelSaverLoader(ABC):
config_file = 'config.json'
weight_file = 'model_weights.hdf5'
meta_file = 'meta.json'
def get_model_weights_filename(self, model_path: str, model_config: ModelConfig):
return os.path.join(
model_path,
model_config.model_type + "." + self.weight_file
)
class ModelSaver(_BaseModelSaverLoader):
def __init__(self, model_config: ModelConfig):
self.model_config = model_config
def save_model_config(self, model_config: ModelConfig, filepath: str):
LOGGER.debug('model_config: %s', model_config)
with open_file(filepath, 'w') as fp:
model_config.save_fp(fp)
LOGGER.info('model config file saved to %s', filepath)
def save_model_weights(self, model: Model, filepath: str):
with auto_upload_from_local_file(filepath) as local_filepath:
model.save(local_filepath)
LOGGER.info('model saved to %s', filepath)
def save_meta(self, meta: dict, filepath: str):
write_text(
filepath,
json.dumps(meta, sort_keys=False, indent=4)
)
LOGGER.info('model meta saved to %s', filepath)
def update_checkpoints_meta_file(self, filepath: str, checkpoint_directory: str, epoch: int):
try:
with open_file(filepath, 'r') as fp:
meta = json.load(fp)
except FileNotFoundError:
meta = {}
checkpoint_meta = {
'epoch': (1 + epoch),
'path': checkpoint_directory,
'timestamp': datetime.utcnow().isoformat()
}
meta['checkpoints'] = meta.get('checkpoints', [])
meta['checkpoints'].append(checkpoint_meta)
meta['last_checkpoint'] = checkpoint_meta
with open_file(filepath, 'w') as fp:
json.dump(meta, fp, sort_keys=False, indent=4)
LOGGER.info('updated checkpoints meta: %s', filepath)
def save_to(self, directory: str, model: Model, meta: dict = None):
self.save_model_config(self.model_config, os.path.join(directory, self.config_file))
self.save_model_weights(
model,
self.get_model_weights_filename(directory, model_config=self.model_config)
)
if meta:
self.save_meta(meta, os.path.join(directory, self.meta_file))
def add_checkpoint_meta(self, checkpoint_directory: str, epoch: int):
self.update_checkpoints_meta_file(
os.path.join(os.path.dirname(checkpoint_directory), 'checkpoints.json'),
checkpoint_directory=checkpoint_directory,
epoch=epoch
)
class ModelLoader(_BaseModelSaverLoader):
def __init__(
self,
download_manager: DownloadManager = None):
if download_manager is None:
download_manager = DownloadManager()
self.download_manager = download_manager
def load_model_config_from_file(self, filepath: str):
LOGGER.info('loading model config from %s', filepath)
with open_file(filepath, 'r') as fp:
return ModelConfig.load_fp(fp)
def load_model_weights_from_file(self, filepath: str, model: Model):
LOGGER.info('loading model from %s', filepath)
# we need a seekable file, ensure we download the file first
local_filepath = self.download_manager.download_if_url(filepath)
# using load_weights to avoid print statement in load method
model.load_weights(local_filepath)
| 0.55097 | 0.084266 |
import logging
from collections import OrderedDict
from typing import List
import numpy as np
from sklearn.metrics import (
log_loss,
roc_auc_score,
f1_score,
precision_score,
recall_score
)
LOGGER = logging.getLogger(__name__)
class ClassificationResult:
def __init__(
self,
y_true: List[List[str]],
y_pred: List[List[str]],
label_names: List[str]
):
y_true_array: np.ndarray = np.asarray(y_true)
y_pred_array: np.ndarray = np.asarray(y_pred)
LOGGER.info('y_true: %s', y_true)
LOGGER.info('y_pred: %s', y_pred)
self.scores = OrderedDict()
for j, label_name in enumerate(label_names):
labels = [0, 1]
y_true_class = y_true_array[:, j]
y_pred_class = y_pred_array[:, j]
y_true_binary_class = y_true_array[:, j] >= 0.5
y_pred_binary_class = y_pred_array[:, j] >= 0.5
loss = log_loss(y_true_class, y_pred_class, labels=labels)
precision = precision_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
recall = recall_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
f1 = f1_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
try:
roc_auc = roc_auc_score(y_true_class, y_pred_class)
except ValueError as e:
LOGGER.warning('could not calculate roc (index=%d): %s', j, e)
roc_auc = np.nan
self.scores[label_name] = {
'precision': precision,
'recall': recall,
'f1': f1,
'loss': loss,
'roc_auc': roc_auc,
'support': np.sum(y_true_binary_class)
}
self.macro_averages = {
'precision': np.mean([score['precision'] for score in self.scores.values()]),
'recall': np.mean([score['recall'] for score in self.scores.values()]),
'f1': np.mean([score['f1'] for score in self.scores.values()]),
'loss': np.mean([score['loss'] for score in self.scores.values()]),
'roc_auc': np.mean([score['roc_auc'] for score in self.scores.values()]),
'support': np.sum([score['support'] for score in self.scores.values()]),
}
@property
def text_formatted_report(self):
return self.get_text_formatted_report().rstrip()
def get_text_formatted_report(
self,
digits: int = 4,
exclude_no_support: bool = False):
name_width = max(map(len, self.scores.keys()))
last_line_heading = 'all (macro avg. / mean)'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support", "roc_auc"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}' + u' {:>9.{digits}f}\n'
for type_name in sorted(self.scores.keys()):
item_scores = self.scores[type_name]
if exclude_no_support and not item_scores['support']:
continue
report += row_fmt.format(
*[
type_name,
item_scores['precision'],
item_scores['recall'],
item_scores['f1'],
item_scores['support'],
item_scores['roc_auc']
],
width=width,
digits=digits
)
report += u'\n'
report += row_fmt.format(
*[
last_line_heading,
self.macro_averages['precision'],
self.macro_averages['recall'],
self.macro_averages['f1'],
self.macro_averages['support'],
self.macro_averages['roc_auc']
],
width=width,
digits=digits
)
return report
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/evaluation.py
|
evaluation.py
|
import logging
from collections import OrderedDict
from typing import List
import numpy as np
from sklearn.metrics import (
log_loss,
roc_auc_score,
f1_score,
precision_score,
recall_score
)
LOGGER = logging.getLogger(__name__)
class ClassificationResult:
def __init__(
self,
y_true: List[List[str]],
y_pred: List[List[str]],
label_names: List[str]
):
y_true_array: np.ndarray = np.asarray(y_true)
y_pred_array: np.ndarray = np.asarray(y_pred)
LOGGER.info('y_true: %s', y_true)
LOGGER.info('y_pred: %s', y_pred)
self.scores = OrderedDict()
for j, label_name in enumerate(label_names):
labels = [0, 1]
y_true_class = y_true_array[:, j]
y_pred_class = y_pred_array[:, j]
y_true_binary_class = y_true_array[:, j] >= 0.5
y_pred_binary_class = y_pred_array[:, j] >= 0.5
loss = log_loss(y_true_class, y_pred_class, labels=labels)
precision = precision_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
recall = recall_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
f1 = f1_score(y_true_binary_class, y_pred_binary_class, zero_division=0)
try:
roc_auc = roc_auc_score(y_true_class, y_pred_class)
except ValueError as e:
LOGGER.warning('could not calculate roc (index=%d): %s', j, e)
roc_auc = np.nan
self.scores[label_name] = {
'precision': precision,
'recall': recall,
'f1': f1,
'loss': loss,
'roc_auc': roc_auc,
'support': np.sum(y_true_binary_class)
}
self.macro_averages = {
'precision': np.mean([score['precision'] for score in self.scores.values()]),
'recall': np.mean([score['recall'] for score in self.scores.values()]),
'f1': np.mean([score['f1'] for score in self.scores.values()]),
'loss': np.mean([score['loss'] for score in self.scores.values()]),
'roc_auc': np.mean([score['roc_auc'] for score in self.scores.values()]),
'support': np.sum([score['support'] for score in self.scores.values()]),
}
@property
def text_formatted_report(self):
return self.get_text_formatted_report().rstrip()
def get_text_formatted_report(
self,
digits: int = 4,
exclude_no_support: bool = False):
name_width = max(map(len, self.scores.keys()))
last_line_heading = 'all (macro avg. / mean)'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support", "roc_auc"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}' + u' {:>9.{digits}f}\n'
for type_name in sorted(self.scores.keys()):
item_scores = self.scores[type_name]
if exclude_no_support and not item_scores['support']:
continue
report += row_fmt.format(
*[
type_name,
item_scores['precision'],
item_scores['recall'],
item_scores['f1'],
item_scores['support'],
item_scores['roc_auc']
],
width=width,
digits=digits
)
report += u'\n'
report += row_fmt.format(
*[
last_line_heading,
self.macro_averages['precision'],
self.macro_averages['recall'],
self.macro_averages['f1'],
self.macro_averages['support'],
self.macro_averages['roc_auc']
],
width=width,
digits=digits
)
return report
| 0.86898 | 0.383237 |
from typing import Tuple, List
import pandas as pd
import numpy as np
from sciencebeam_trainer_delft.utils.io import auto_uploading_output_file
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/textClassification/reader.py
def get_filepath_csv_separator(filepath: str):
if filepath.endswith('.tsv') or filepath.endswith('.tsv.gz'):
return '\t'
return ','
def load_data_frame(
filepath: str,
limit: int = None,
**kwargs) -> pd.DataFrame:
sep = get_filepath_csv_separator(filepath)
return pd.read_csv(filepath, nrows=limit, sep=sep, **kwargs)
def save_data_frame(
df: pd.DataFrame,
filepath: str,
index: bool = False,
**kwargs) -> pd.DataFrame:
sep = get_filepath_csv_separator(filepath)
with auto_uploading_output_file(filepath, mode='w') as fp:
return df.to_csv(fp, sep=sep, index=index, **kwargs)
def get_texts_and_classes_from_data_frame(
df: pd.DataFrame) -> Tuple[List[str], List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id text class_0 ... class_n
id_0 text_0 class_00 ... class_n0
id_1 text_1 class_01 ... class_n1
...
id_m text_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
df = df.copy()
df.iloc[:, 1].fillna('MISSINGVALUE', inplace=True)
texts_list = []
for j in range(0, df.shape[0]):
texts_list.append(df.iloc[j, 1])
classes = df.iloc[:, 2:]
classes_list = classes.values.tolist()
classes_label_names = list(classes.columns.values)
return np.asarray(texts_list), np.asarray(classes_list), classes_label_names
def load_texts_and_classes_pandas(
filepath: str,
limit: int = None,
**kwargs) -> Tuple[List[str], List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id text class_0 ... class_n
id_0 text_0 class_00 ... class_n0
id_1 text_1 class_01 ... class_n1
...
id_m text_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
return get_texts_and_classes_from_data_frame(
load_data_frame(filepath, limit=limit, **kwargs)
)
def load_classes_pandas(
filepath: str,
limit: int = None,
**kwargs) -> Tuple[List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id class_0 ... class_n
id_0 class_00 ... class_n0
id_1 class_01 ... class_n1
...
id_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
df = load_data_frame(filepath, limit=limit, **kwargs)
classes = df.iloc[:, 1:]
classes_list = classes.values.tolist()
classes_label_names = list(classes.columns.values)
return np.asarray(classes_list), classes_label_names
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/reader.py
|
reader.py
|
from typing import Tuple, List
import pandas as pd
import numpy as np
from sciencebeam_trainer_delft.utils.io import auto_uploading_output_file
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/textClassification/reader.py
def get_filepath_csv_separator(filepath: str):
if filepath.endswith('.tsv') or filepath.endswith('.tsv.gz'):
return '\t'
return ','
def load_data_frame(
filepath: str,
limit: int = None,
**kwargs) -> pd.DataFrame:
sep = get_filepath_csv_separator(filepath)
return pd.read_csv(filepath, nrows=limit, sep=sep, **kwargs)
def save_data_frame(
df: pd.DataFrame,
filepath: str,
index: bool = False,
**kwargs) -> pd.DataFrame:
sep = get_filepath_csv_separator(filepath)
with auto_uploading_output_file(filepath, mode='w') as fp:
return df.to_csv(fp, sep=sep, index=index, **kwargs)
def get_texts_and_classes_from_data_frame(
df: pd.DataFrame) -> Tuple[List[str], List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id text class_0 ... class_n
id_0 text_0 class_00 ... class_n0
id_1 text_1 class_01 ... class_n1
...
id_m text_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
df = df.copy()
df.iloc[:, 1].fillna('MISSINGVALUE', inplace=True)
texts_list = []
for j in range(0, df.shape[0]):
texts_list.append(df.iloc[j, 1])
classes = df.iloc[:, 2:]
classes_list = classes.values.tolist()
classes_label_names = list(classes.columns.values)
return np.asarray(texts_list), np.asarray(classes_list), classes_label_names
def load_texts_and_classes_pandas(
filepath: str,
limit: int = None,
**kwargs) -> Tuple[List[str], List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id text class_0 ... class_n
id_0 text_0 class_00 ... class_n0
id_1 text_1 class_01 ... class_n1
...
id_m text_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
return get_texts_and_classes_from_data_frame(
load_data_frame(filepath, limit=limit, **kwargs)
)
def load_classes_pandas(
filepath: str,
limit: int = None,
**kwargs) -> Tuple[List[List[str]], List[str]]:
"""
Load texts and classes from a file in csv format using pandas dataframe:
id class_0 ... class_n
id_0 class_00 ... class_n0
id_1 class_01 ... class_n1
...
id_m class_0m ... class_nm
It should support any CSV file format.
Returns:
tuple(numpy array, numpy array): texts and classes
"""
df = load_data_frame(filepath, limit=limit, **kwargs)
classes = df.iloc[:, 1:]
classes_list = classes.values.tolist()
classes_label_names = list(classes.columns.values)
return np.asarray(classes_list), classes_label_names
| 0.785925 | 0.337913 |
import logging
import math
import os
from typing import List
import numpy as np
from sklearn.metrics import log_loss, roc_auc_score
from keras.models import Model
from keras.callbacks import Callback
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver
)
from sciencebeam_trainer_delft.text_classification.callbacks import (
ModelWithMetadataCheckpoint
)
LOGGER = logging.getLogger(__name__)
def get_callbacks(
model_saver: ModelSaver,
log_dir: str = None,
meta: dict = None) -> List[Callback]:
callbacks = []
if log_dir:
epoch_dirname = 'epoch-{epoch:05d}'
assert model_saver
save_callback = ModelWithMetadataCheckpoint(
os.path.join(log_dir, epoch_dirname),
model_saver=model_saver,
monitor='f1',
meta=meta
)
callbacks.append(save_callback)
return callbacks
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/textClassification/models.py
def train_model( # pylint: disable=too-many-statements
model: Model,
list_classes: List[str],
batch_size: int, # pylint: disable=unused-argument
max_epoch: int,
use_roc_auc: bool,
class_weights,
training_generator,
validation_generator,
val_y,
use_ELMo=False,
use_BERT=False,
multiprocessing: bool = True,
nb_workers: int = 6,
callbacks: List[Callback] = None):
best_loss = -1.0
best_roc_auc = -1.0
best_weights = None
best_epoch = 0
current_epoch = 1
if use_ELMo or use_BERT:
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
while current_epoch <= max_epoch:
model.fit_generator(
generator=training_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers,
class_weight=class_weights,
epochs=current_epoch,
initial_epoch=(current_epoch - 1),
callbacks=callbacks)
y_pred = model.predict_generator(
generator=validation_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers)
total_loss = 0.0
total_roc_auc = 0.0
# we distinguish 1-class and multiclass problems
if len(list_classes) == 1:
total_loss = log_loss(val_y, y_pred)
total_roc_auc = roc_auc_score(val_y, y_pred)
else:
for j in range(0, len(list_classes)):
labels = [0, 1]
loss = log_loss(val_y[:, j], y_pred[:, j], labels=labels)
total_loss += loss
try:
roc_auc = roc_auc_score(val_y[:, j], y_pred[:, j])
except ValueError as e:
LOGGER.debug('could not calculate roc (index=%d): %s', j, e)
roc_auc = np.nan
total_roc_auc += roc_auc
total_loss /= len(list_classes)
total_roc_auc /= len(list_classes)
if np.isnan(total_roc_auc):
use_roc_auc = False
if use_roc_auc:
LOGGER.info(
"Epoch %s loss %s best_loss %s (for info)",
current_epoch, total_loss, best_loss
)
LOGGER.info(
"Epoch %s roc_auc %s best_roc_auc %s (for early stop)",
current_epoch, total_roc_auc, best_roc_auc
)
else:
LOGGER.info(
"Epoch %s loss %s best_loss %s (for early stop)",
current_epoch, total_loss, best_loss
)
LOGGER.info(
"Epoch %s roc_auc %s best_roc_auc %s (for info)",
current_epoch, total_roc_auc, best_roc_auc
)
current_epoch += 1
if total_loss < best_loss or best_loss == -1 or math.isnan(best_loss) is True:
best_loss = total_loss
if use_roc_auc is False:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc is False:
if current_epoch - best_epoch == 5:
break
if total_roc_auc > best_roc_auc or best_roc_auc == -1:
best_roc_auc = total_roc_auc
if use_roc_auc:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc:
if current_epoch - best_epoch == 5:
break
model.set_weights(best_weights)
if use_roc_auc:
return model, best_roc_auc
else:
return model, best_loss
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/text_classification/models.py
|
models.py
|
import logging
import math
import os
from typing import List
import numpy as np
from sklearn.metrics import log_loss, roc_auc_score
from keras.models import Model
from keras.callbacks import Callback
from sciencebeam_trainer_delft.text_classification.saving import (
ModelSaver
)
from sciencebeam_trainer_delft.text_classification.callbacks import (
ModelWithMetadataCheckpoint
)
LOGGER = logging.getLogger(__name__)
def get_callbacks(
model_saver: ModelSaver,
log_dir: str = None,
meta: dict = None) -> List[Callback]:
callbacks = []
if log_dir:
epoch_dirname = 'epoch-{epoch:05d}'
assert model_saver
save_callback = ModelWithMetadataCheckpoint(
os.path.join(log_dir, epoch_dirname),
model_saver=model_saver,
monitor='f1',
meta=meta
)
callbacks.append(save_callback)
return callbacks
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/textClassification/models.py
def train_model( # pylint: disable=too-many-statements
model: Model,
list_classes: List[str],
batch_size: int, # pylint: disable=unused-argument
max_epoch: int,
use_roc_auc: bool,
class_weights,
training_generator,
validation_generator,
val_y,
use_ELMo=False,
use_BERT=False,
multiprocessing: bool = True,
nb_workers: int = 6,
callbacks: List[Callback] = None):
best_loss = -1.0
best_roc_auc = -1.0
best_weights = None
best_epoch = 0
current_epoch = 1
if use_ELMo or use_BERT:
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
while current_epoch <= max_epoch:
model.fit_generator(
generator=training_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers,
class_weight=class_weights,
epochs=current_epoch,
initial_epoch=(current_epoch - 1),
callbacks=callbacks)
y_pred = model.predict_generator(
generator=validation_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers)
total_loss = 0.0
total_roc_auc = 0.0
# we distinguish 1-class and multiclass problems
if len(list_classes) == 1:
total_loss = log_loss(val_y, y_pred)
total_roc_auc = roc_auc_score(val_y, y_pred)
else:
for j in range(0, len(list_classes)):
labels = [0, 1]
loss = log_loss(val_y[:, j], y_pred[:, j], labels=labels)
total_loss += loss
try:
roc_auc = roc_auc_score(val_y[:, j], y_pred[:, j])
except ValueError as e:
LOGGER.debug('could not calculate roc (index=%d): %s', j, e)
roc_auc = np.nan
total_roc_auc += roc_auc
total_loss /= len(list_classes)
total_roc_auc /= len(list_classes)
if np.isnan(total_roc_auc):
use_roc_auc = False
if use_roc_auc:
LOGGER.info(
"Epoch %s loss %s best_loss %s (for info)",
current_epoch, total_loss, best_loss
)
LOGGER.info(
"Epoch %s roc_auc %s best_roc_auc %s (for early stop)",
current_epoch, total_roc_auc, best_roc_auc
)
else:
LOGGER.info(
"Epoch %s loss %s best_loss %s (for early stop)",
current_epoch, total_loss, best_loss
)
LOGGER.info(
"Epoch %s roc_auc %s best_roc_auc %s (for info)",
current_epoch, total_roc_auc, best_roc_auc
)
current_epoch += 1
if total_loss < best_loss or best_loss == -1 or math.isnan(best_loss) is True:
best_loss = total_loss
if use_roc_auc is False:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc is False:
if current_epoch - best_epoch == 5:
break
if total_roc_auc > best_roc_auc or best_roc_auc == -1:
best_roc_auc = total_roc_auc
if use_roc_auc:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc:
if current_epoch - best_epoch == 5:
break
model.set_weights(best_weights)
if use_roc_auc:
return model, best_roc_auc
else:
return model, best_loss
| 0.699357 | 0.183942 |
from __future__ import absolute_import
import logging
import sys
from io import StringIO
from contextlib import contextmanager
from typing import Callable, IO, List, Optional, Sequence, TextIO, cast
LOGGER = logging.getLogger(__name__)
def configure_logging(level='INFO', secondary_level='WARN'):
logging.basicConfig(level=secondary_level)
logging.getLogger('delft').setLevel(level)
logging.getLogger('sciencebeam_trainer_delft').setLevel(level)
def reset_logging(**kwargs):
logging.root.handlers = []
configure_logging(**kwargs)
class TeeStreamToLineWriter:
def __init__(
self,
*line_writers: Callable[[str], None],
raw_fp: IO = None,
append_line_feed: bool = False):
self.line_writers = line_writers
self.raw_fp = raw_fp
self.line_buffer = StringIO()
self.append_line_feed = append_line_feed
def _write_line(self, line: str):
if self.append_line_feed:
line += '\n'
for line_writer in self.line_writers:
line_writer(line)
def _flush_message(self, message: str):
self._write_line(message.split('\r')[-1].rstrip())
def write(self, message: str):
if self.raw_fp:
self.raw_fp.write(message)
if not message:
return
if message.startswith('\n'):
self._flush_message(self.line_buffer.getvalue())
self.line_buffer = StringIO()
message = message[1:]
if not message:
return
lines = message.split('\n')
complete_lines = lines[:-1]
remaining_message = lines[-1]
if complete_lines:
self.line_buffer.write(complete_lines[0])
complete_lines[0] = self.line_buffer.getvalue()
self.line_buffer = StringIO()
else:
self.line_buffer.write(remaining_message)
for complete_line in complete_lines:
self._flush_message(complete_line)
def flush(self):
if self.raw_fp:
self.raw_fp.flush()
@contextmanager
def tee_stdout_lines_to(
*line_writers: Callable[[str], None],
**kwargs):
prev_stdout = sys.stdout
try:
sys.stdout = cast(TextIO, TeeStreamToLineWriter(
*line_writers,
raw_fp=prev_stdout,
**kwargs
))
yield sys.stdout
finally:
sys.stdout = prev_stdout
@contextmanager
def tee_stderr_lines_to(
*line_writers: Callable[[str], None],
**kwargs):
prev_stderr = sys.stderr
try:
sys.stderr = cast(TextIO, TeeStreamToLineWriter(
*line_writers,
raw_fp=prev_stderr,
**kwargs
))
yield sys.stderr
finally:
sys.stderr = prev_stderr
@contextmanager
def tee_stdout_and_stderr_lines_to(
*line_writers: Callable[[str], None],
**kwargs):
with tee_stdout_lines_to(*line_writers, **kwargs) as stdout:
with tee_stderr_lines_to(*line_writers, **kwargs) as stderr:
yield (stdout, stderr)
class LineWriterLoggingHandler(logging.Handler):
def __init__(
self,
*line_writers: Callable[[str], None],
append_line_feed: bool = False,
**kwargs):
self.line_writers: Sequence[Callable[[str], None]] = line_writers
self.append_line_feed = append_line_feed
self._logging = False
super().__init__(**kwargs)
def _write_line(self, line: str):
if self.append_line_feed:
line += '\n'
for line_writer in self.line_writers:
try:
line_writer(line)
except Exception as exc: # pylint: disable=broad-except
LOGGER.warning(
'failed to write: %r due to %s', line, exc, exc_info=exc
)
def emit(self, record: logging.LogRecord):
if self._logging:
return
try:
self._logging = True
self._write_line(self.format(record))
finally:
self._logging = False
def get_default_logging_formatter() -> Optional[logging.Formatter]:
for root_handler in logging.root.handlers:
if isinstance(root_handler, logging.StreamHandler):
return root_handler.formatter
return None
def flush_logging_handlers(handlers: List[logging.Handler]):
for handler in handlers:
handler.flush()
@contextmanager
def tee_logging_lines_to(
*line_writers: Callable[[str], None],
logger: logging.Logger = None,
formatter: logging.Formatter = None,
**kwargs):
if logger is None:
logger = logging.root
if formatter is None:
formatter = get_default_logging_formatter()
prev_handlers = logger.handlers
try:
handler = LineWriterLoggingHandler(*line_writers, **kwargs)
if formatter is not None:
handler.setFormatter(formatter)
logger.addHandler(handler)
yield logger
finally:
flush_logging_handlers(logger.handlers)
flush_logging_handlers(logging.root.handlers)
logger.handlers = prev_handlers
|
sciencebeam-trainer-delft
|
/sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/utils/logging.py
|
logging.py
|
from __future__ import absolute_import
import logging
import sys
from io import StringIO
from contextlib import contextmanager
from typing import Callable, IO, List, Optional, Sequence, TextIO, cast
LOGGER = logging.getLogger(__name__)
def configure_logging(level='INFO', secondary_level='WARN'):
logging.basicConfig(level=secondary_level)
logging.getLogger('delft').setLevel(level)
logging.getLogger('sciencebeam_trainer_delft').setLevel(level)
def reset_logging(**kwargs):
logging.root.handlers = []
configure_logging(**kwargs)
class TeeStreamToLineWriter:
def __init__(
self,
*line_writers: Callable[[str], None],
raw_fp: IO = None,
append_line_feed: bool = False):
self.line_writers = line_writers
self.raw_fp = raw_fp
self.line_buffer = StringIO()
self.append_line_feed = append_line_feed
def _write_line(self, line: str):
if self.append_line_feed:
line += '\n'
for line_writer in self.line_writers:
line_writer(line)
def _flush_message(self, message: str):
self._write_line(message.split('\r')[-1].rstrip())
def write(self, message: str):
if self.raw_fp:
self.raw_fp.write(message)
if not message:
return
if message.startswith('\n'):
self._flush_message(self.line_buffer.getvalue())
self.line_buffer = StringIO()
message = message[1:]
if not message:
return
lines = message.split('\n')
complete_lines = lines[:-1]
remaining_message = lines[-1]
if complete_lines:
self.line_buffer.write(complete_lines[0])
complete_lines[0] = self.line_buffer.getvalue()
self.line_buffer = StringIO()
else:
self.line_buffer.write(remaining_message)
for complete_line in complete_lines:
self._flush_message(complete_line)
def flush(self):
if self.raw_fp:
self.raw_fp.flush()
@contextmanager
def tee_stdout_lines_to(
*line_writers: Callable[[str], None],
**kwargs):
prev_stdout = sys.stdout
try:
sys.stdout = cast(TextIO, TeeStreamToLineWriter(
*line_writers,
raw_fp=prev_stdout,
**kwargs
))
yield sys.stdout
finally:
sys.stdout = prev_stdout
@contextmanager
def tee_stderr_lines_to(
*line_writers: Callable[[str], None],
**kwargs):
prev_stderr = sys.stderr
try:
sys.stderr = cast(TextIO, TeeStreamToLineWriter(
*line_writers,
raw_fp=prev_stderr,
**kwargs
))
yield sys.stderr
finally:
sys.stderr = prev_stderr
@contextmanager
def tee_stdout_and_stderr_lines_to(
*line_writers: Callable[[str], None],
**kwargs):
with tee_stdout_lines_to(*line_writers, **kwargs) as stdout:
with tee_stderr_lines_to(*line_writers, **kwargs) as stderr:
yield (stdout, stderr)
class LineWriterLoggingHandler(logging.Handler):
def __init__(
self,
*line_writers: Callable[[str], None],
append_line_feed: bool = False,
**kwargs):
self.line_writers: Sequence[Callable[[str], None]] = line_writers
self.append_line_feed = append_line_feed
self._logging = False
super().__init__(**kwargs)
def _write_line(self, line: str):
if self.append_line_feed:
line += '\n'
for line_writer in self.line_writers:
try:
line_writer(line)
except Exception as exc: # pylint: disable=broad-except
LOGGER.warning(
'failed to write: %r due to %s', line, exc, exc_info=exc
)
def emit(self, record: logging.LogRecord):
if self._logging:
return
try:
self._logging = True
self._write_line(self.format(record))
finally:
self._logging = False
def get_default_logging_formatter() -> Optional[logging.Formatter]:
for root_handler in logging.root.handlers:
if isinstance(root_handler, logging.StreamHandler):
return root_handler.formatter
return None
def flush_logging_handlers(handlers: List[logging.Handler]):
for handler in handlers:
handler.flush()
@contextmanager
def tee_logging_lines_to(
*line_writers: Callable[[str], None],
logger: logging.Logger = None,
formatter: logging.Formatter = None,
**kwargs):
if logger is None:
logger = logging.root
if formatter is None:
formatter = get_default_logging_formatter()
prev_handlers = logger.handlers
try:
handler = LineWriterLoggingHandler(*line_writers, **kwargs)
if formatter is not None:
handler.setFormatter(formatter)
logger.addHandler(handler)
yield logger
finally:
flush_logging_handlers(logger.handlers)
flush_logging_handlers(logging.root.handlers)
logger.handlers = prev_handlers
| 0.542621 | 0.058346 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.