id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
300 | test_processors.py | internetarchive_openlibrary/openlibrary/tests/core/test_processors.py | from openlibrary.core.processors import readableurls as processors
from infogami.infobase import client, common
import web
class MockSite:
def __init__(self):
self.docs = {}
self.olids = {}
def get(self, key):
return self.docs.get(key)
def add(self, doc):
# @@ UGLY!
doc = common.parse_query(doc)
doc = client.Site(None, None)._process_dict(doc)
key = doc['key']
self.docs[key] = client.create_thing(self, key, doc)
olid = key.split("/")[-1]
if web.re_compile(r'OL\d+[A-Z]').match(olid):
self.olids[olid] = key
def _request(self, path, method=None, data=None):
if path == "/olid_to_key":
olid = data['olid']
return web.storage(key=self.olids.get(olid))
def _get_backreferences(self):
return {}
def test_MockSite():
site = MockSite()
assert site.get("/books/OL1M") is None
book = {"key": "/books/OL1M", "type": {"key": "/type/edition"}, "title": "foo"}
site.add(book)
assert site.get("/books/OL1M") is not None
assert site.get("/books/OL1M").dict() == book
assert site._request("/olid_to_key", data={"olid": "OL1M"}) == {
"key": "/books/OL1M"
}
def _get_mock_site():
site = MockSite()
book = {"key": "/books/OL1M", "type": {"key": "/type/edition"}, "title": "foo"}
site.add(book)
list = {
"key": "/people/joe/lists/OL1L",
"type": {"key": "/type/list"},
"name": "foo",
}
site.add(list)
return site
def test_get_object():
site = _get_mock_site()
def f(key):
doc = processors._get_object(site, key)
return doc and doc.key
assert f("/books/OL1M") == "/books/OL1M"
assert f("/b/OL1M") == "/books/OL1M"
assert f("/whatever/OL1M") == "/books/OL1M"
assert f("/not-there") is None
_mock_site = _get_mock_site()
def get_readable_path(path, encoding=None):
patterns = processors.ReadableUrlProcessor.patterns
return processors.get_readable_path(_mock_site, path, patterns, encoding=encoding)
def test_book_urls():
f = get_readable_path
# regular pages
assert f("/books/OL1M") == ("/books/OL1M", "/books/OL1M/foo")
assert f("/books/OL1M/foo") == ("/books/OL1M", "/books/OL1M/foo")
assert f("/books/OL1M/foo/edit") == ("/books/OL1M/edit", "/books/OL1M/foo/edit")
# with bad title
assert f("/books/OL1M/bar") == ("/books/OL1M", "/books/OL1M/foo")
assert f("/books/OL1M/bar/edit") == ("/books/OL1M/edit", "/books/OL1M/foo/edit")
# test /b/ redirects
assert f("/b/OL1M") == ("/books/OL1M", "/books/OL1M/foo")
assert f("/b/OL1M/foo/edit") == ("/books/OL1M/edit", "/books/OL1M/foo/edit")
# test olid redirects
assert f("/whatever/OL1M") == ("/books/OL1M", "/books/OL1M/foo")
# test encoding
assert f("/books/OL1M.json") == ("/books/OL1M.json", "/books/OL1M.json")
assert f("/books/OL1M", encoding="json") == ("/books/OL1M", "/books/OL1M")
def test_list_urls():
f = get_readable_path
print(f("/people/joe/lists/OL1L"))
assert f("/people/joe/lists/OL1L") == (
"/people/joe/lists/OL1L",
"/people/joe/lists/OL1L/foo",
)
assert f("/people/joe/lists/OL1L/bar") == (
"/people/joe/lists/OL1L",
"/people/joe/lists/OL1L/foo",
)
assert f("/people/joe/lists/OL1L/bar/edit") == (
"/people/joe/lists/OL1L/edit",
"/people/joe/lists/OL1L/foo/edit",
)
| 3,490 | Python | .py | 90 | 32.511111 | 86 | 0.593694 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
301 | test_wikidata.py | internetarchive_openlibrary/openlibrary/tests/core/test_wikidata.py | import pytest
from unittest.mock import patch
from openlibrary.core import wikidata
from datetime import datetime, timedelta
EXAMPLE_WIKIDATA_DICT = {
'id': "Q42",
'type': 'str',
'labels': {'en': ''},
'descriptions': {'en': ''},
'aliases': {'en': ['']},
'statements': {'': {}},
'sitelinks': {'': {}},
}
def createWikidataEntity(
qid: str = "Q42", expired: bool = False
) -> wikidata.WikidataEntity:
merged_dict = EXAMPLE_WIKIDATA_DICT.copy()
merged_dict['id'] = qid
updated_days_ago = wikidata.WIKIDATA_CACHE_TTL_DAYS + 1 if expired else 0
return wikidata.WikidataEntity.from_dict(
merged_dict, datetime.now() - timedelta(days=updated_days_ago)
)
EXPIRED = "expired"
MISSING = "missing"
VALID_CACHE = ""
@pytest.mark.parametrize(
"bust_cache, fetch_missing, status, expected_web_call, expected_cache_call",
[
# if bust_cache, always call web, never call cache
(True, True, VALID_CACHE, True, False),
(True, False, VALID_CACHE, True, False),
# if not fetch_missing, only call web when expired
(False, False, VALID_CACHE, False, True),
(False, False, EXPIRED, True, True),
# if fetch_missing, only call web when missing or expired
(False, True, VALID_CACHE, False, True),
(False, True, MISSING, True, True),
(False, True, EXPIRED, True, True),
],
)
def test_get_wikidata_entity(
bust_cache: bool,
fetch_missing: bool,
status: str,
expected_web_call: bool,
expected_cache_call: bool,
) -> None:
with (
patch.object(wikidata, "_get_from_cache") as mock_get_from_cache,
patch.object(wikidata, "_get_from_web") as mock_get_from_web,
):
if status == EXPIRED:
mock_get_from_cache.return_value = createWikidataEntity(expired=True)
elif status == MISSING:
mock_get_from_cache.return_value = None
else:
mock_get_from_cache.return_value = createWikidataEntity()
wikidata.get_wikidata_entity(
'Q42', bust_cache=bust_cache, fetch_missing=fetch_missing
)
if expected_web_call:
mock_get_from_web.assert_called_once()
else:
mock_get_from_web.assert_not_called()
if expected_cache_call:
mock_get_from_cache.assert_called_once()
else:
mock_get_from_cache.assert_not_called()
| 2,429 | Python | .py | 68 | 29.235294 | 81 | 0.630952 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
302 | test_models.py | internetarchive_openlibrary/openlibrary/tests/core/test_models.py | from openlibrary.core import models
import pytest
class MockSite:
def get(self, key):
return models.Thing(self, key, data={})
def _get_backreferences(self, thing):
return {}
class MockLendableEdition(models.Edition):
def get_ia_collections(self):
return ['lendinglibrary']
class MockPrivateEdition(models.Edition):
def get_ia_collections(self):
return ['lendinglibrary', 'georgetown-university-law-library-rr']
class TestEdition:
def mock_edition(self, edition_class):
data = {"key": "/books/OL1M", "type": {"key": "/type/edition"}, "title": "foo"}
return edition_class(MockSite(), "/books/OL1M", data=data)
def test_url(self):
e = self.mock_edition(models.Edition)
assert e.url() == "/books/OL1M/foo"
assert e.url(v=1) == "/books/OL1M/foo?v=1"
assert e.url(suffix="/add-cover") == "/books/OL1M/foo/add-cover"
data = {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
}
e = models.Edition(MockSite(), "/books/OL1M", data=data)
assert e.url() == "/books/OL1M/untitled"
def test_get_ebook_info(self):
e = self.mock_edition(models.Edition)
assert e.get_ebook_info() == {}
def test_is_not_in_private_collection(self):
e = self.mock_edition(MockLendableEdition)
assert not e.is_in_private_collection()
def test_in_borrowable_collection_cuz_not_in_private_collection(self):
e = self.mock_edition(MockLendableEdition)
assert e.in_borrowable_collection()
def test_is_in_private_collection(self):
e = self.mock_edition(MockPrivateEdition)
assert e.is_in_private_collection()
def test_not_in_borrowable_collection_cuz_in_private_collection(self):
e = self.mock_edition(MockPrivateEdition)
assert not e.in_borrowable_collection()
@pytest.mark.parametrize(
["isbn_or_asin", "expected"],
[
("1111111111", ("1111111111", "")), # ISBN 10
("9780747532699", ("9780747532699", "")), # ISBN 13
("B06XYHVXVJ", ("", "B06XYHVXVJ")), # ASIN
("b06xyhvxvj", ("", "B06XYHVXVJ")), # Lower case ASIN
("", ("", "")), # Nothing at all.
],
)
def test_get_isbn_or_asin(self, isbn_or_asin, expected) -> None:
e: models.Edition = self.mock_edition(MockPrivateEdition)
got = e.get_isbn_or_asin(isbn_or_asin)
assert got == expected
@pytest.mark.parametrize(
["isbn", "asin", "expected"],
[
("1111111111", "", True), # ISBN 10
("", "B06XYHVXVJ", True), # ASIN
("9780747532699", "", True), # ISBN 13
("0", "", False), # Invalid ISBN length
("", "0", False), # Invalid ASIN length
("", "", False), # Nothing at all.
],
)
def test_is_valid_identifier(self, isbn, asin, expected) -> None:
e: models.Edition = self.mock_edition(MockPrivateEdition)
got = e.is_valid_identifier(isbn=isbn, asin=asin)
assert got == expected
@pytest.mark.parametrize(
["isbn", "asin", "expected"],
[
("1111111111", "", ["1111111111", "9781111111113"]),
("9780747532699", "", ["0747532699", "9780747532699"]),
("", "B06XYHVXVJ", ["B06XYHVXVJ"]),
(
"9780747532699",
"B06XYHVXVJ",
["0747532699", "9780747532699", "B06XYHVXVJ"],
),
("", "", []),
],
)
def test_get_identifier_forms(
self, isbn: str, asin: str, expected: list[str]
) -> None:
e: models.Edition = self.mock_edition(MockPrivateEdition)
got = e.get_identifier_forms(isbn=isbn, asin=asin)
assert got == expected
class TestAuthor:
def test_url(self):
data = {"key": "/authors/OL1A", "type": {"key": "/type/author"}, "name": "foo"}
e = models.Author(MockSite(), "/authors/OL1A", data=data)
assert e.url() == "/authors/OL1A/foo"
assert e.url(v=1) == "/authors/OL1A/foo?v=1"
assert e.url(suffix="/add-photo") == "/authors/OL1A/foo/add-photo"
data = {
"key": "/authors/OL1A",
"type": {"key": "/type/author"},
}
e = models.Author(MockSite(), "/authors/OL1A", data=data)
assert e.url() == "/authors/OL1A/unnamed"
class TestSubject:
def test_url(self):
subject = models.Subject({"key": "/subjects/love"})
assert subject.url() == "/subjects/love"
assert subject.url("/lists") == "/subjects/love/lists"
class TestWork:
def test_resolve_redirect_chain(self, monkeypatch):
# e.g. https://openlibrary.org/works/OL2163721W.json
# Chain:
type_redir = {"key": "/type/redirect"}
type_work = {"key": "/type/work"}
work1_key = "/works/OL123W"
work2_key = "/works/OL234W"
work3_key = "/works/OL345W"
work4_key = "/works/OL456W"
work1 = {"key": work1_key, "location": work2_key, "type": type_redir}
work2 = {"key": work2_key, "location": work3_key, "type": type_redir}
work3 = {"key": work3_key, "location": work4_key, "type": type_redir}
work4 = {"key": work4_key, "type": type_work}
import web
from openlibrary.mocks import mock_infobase
site = mock_infobase.MockSite()
site.save(web.storage(work1))
site.save(web.storage(work2))
site.save(web.storage(work3))
site.save(web.storage(work4))
monkeypatch.setattr(web.ctx, "site", site, raising=False)
work_key = "/works/OL123W"
redirect_chain = models.Work.get_redirect_chain(work_key)
assert redirect_chain
resolved_work = redirect_chain[-1]
assert (
str(resolved_work.type) == type_work['key']
), f"{resolved_work} of type {resolved_work.type} should be {type_work['key']}"
assert resolved_work.key == work4_key, f"Should be work4.key: {resolved_work}"
| 6,092 | Python | .py | 140 | 34.678571 | 87 | 0.5827 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
303 | test_i18n.py | internetarchive_openlibrary/openlibrary/tests/core/test_i18n.py | import web
from openlibrary.mocks.mock_infobase import MockSite
# The i18n module should be moved to core.
from openlibrary import i18n
class MockTranslations(dict):
def gettext(self, message):
return self.get(message, message)
def ungettext(self, message1, message2, n):
if n == 1:
return self.gettext(message1)
else:
return self.gettext(message2)
class MockLoadTranslations(dict):
def __call__(self, lang):
return self.get(lang)
def init(self, lang, translations):
self[lang] = MockTranslations(translations)
class Test_ungettext:
def setup_monkeypatch(self, monkeypatch):
self.d = MockLoadTranslations()
ctx = web.storage()
monkeypatch.setattr(i18n, "load_translations", self.d)
monkeypatch.setattr(web, "ctx", ctx)
monkeypatch.setattr(web.webapi, "ctx", web.ctx)
self._load_fake_context()
web.ctx.lang = 'en'
web.ctx.site = MockSite()
def _load_fake_context(self):
self.app = web.application()
self.env = {
"PATH_INFO": "/",
"HTTP_METHOD": "GET",
}
self.app.load(self.env)
def test_ungettext(self, monkeypatch):
self.setup_monkeypatch(monkeypatch)
assert i18n.ungettext("book", "books", 1) == "book"
assert i18n.ungettext("book", "books", 2) == "books"
web.ctx.lang = 'fr'
self.d.init(
'fr',
{
'book': 'libre',
'books': 'libres',
},
)
assert i18n.ungettext("book", "books", 1) == "libre"
assert i18n.ungettext("book", "books", 2) == "libres"
web.ctx.lang = 'te'
assert i18n.ungettext("book", "books", 1) == "book"
assert i18n.ungettext("book", "books", 2) == "books"
def test_ungettext_with_args(self, monkeypatch):
self.setup_monkeypatch(monkeypatch)
assert i18n.ungettext("one book", "%(n)d books", 1, n=1) == "one book"
assert i18n.ungettext("one book", "%(n)d books", 2, n=2) == "2 books"
web.ctx.lang = 'fr'
self.d.init(
'fr',
{
'one book': 'un libre',
'%(n)d books': '%(n)d libres',
},
)
assert i18n.ungettext("one book", "%(n)d books", 1, n=1) == "un libre"
assert i18n.ungettext("one book", "%(n)d books", 2, n=2) == "2 libres"
| 2,465 | Python | .py | 65 | 28.846154 | 78 | 0.566204 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
304 | test_model.py | internetarchive_openlibrary/openlibrary/tests/core/lists/test_model.py | from openlibrary.mocks.mock_infobase import MockSite
import openlibrary.core.lists.model as list_model
class TestList:
def test_owner(self):
list_model.register_models()
self._test_list_owner("/people/anand")
self._test_list_owner("/people/anand-test")
self._test_list_owner("/people/anand_test")
def _test_list_owner(self, user_key):
site = MockSite()
list_key = user_key + "/lists/OL1L"
self.save_doc(site, "/type/user", user_key)
self.save_doc(site, "/type/list", list_key)
list = site.get(list_key)
assert list is not None
assert isinstance(list, list_model.List)
assert list.get_owner() is not None
assert list.get_owner().key == user_key
def save_doc(self, site, type, key, **fields):
d = {"key": key, "type": {"key": type}}
d.update(fields)
site.save(d)
| 908 | Python | .py | 22 | 33.590909 | 52 | 0.625711 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
305 | mock_infobase.py | internetarchive_openlibrary/openlibrary/mocks/mock_infobase.py | """Simple implementation of mock infogami site to use in testing.
"""
import glob
import itertools
import json
import re
import pytest
import web
from datetime import datetime
from infogami.infobase import client, common, account, config as infobase_config
from infogami import config
from openlibrary.plugins.upstream.models import Changeset
from openlibrary.plugins.upstream.utils import safeget
key_patterns = {
'work': '/works/OL%dW',
'edition': '/books/OL%dM',
'author': '/authors/OL%dA',
}
class MockSite:
def __init__(self):
self.reset()
def reset(self):
self.store = MockStore()
if config.get('infobase') is None:
config.infobase = {}
infobase_config.secret_key = "foobar"
config.infobase['secret_key'] = "foobar"
self.account_manager = self.create_account_manager()
self._cache = {}
self.docs = {}
self.docs_historical = {}
self.changesets = []
self.index = []
self.keys = {'work': 0, 'author': 0, 'edition': 0}
def create_account_manager(self):
# Hack to use the accounts stuff from Infogami
infobase_config.user_root = "/people"
store = web.storage(store=self.store)
site = web.storage(store=store, save_many=self.save_many)
return account.AccountManager(site, config.infobase['secret_key'])
def _save_doc(self, query, timestamp):
key = query['key']
if key in self.docs:
rev = self.docs[key]['revision'] + 1
else:
rev = 1
doc = dict(query)
doc['revision'] = rev
doc['latest_revision'] = rev
doc['last_modified'] = {
"type": "/type/datetime",
"value": timestamp.isoformat(),
}
if rev == 1:
doc['created'] = doc['last_modified']
else:
doc['created'] = self.docs[key]['created']
self.docs[key] = doc
self.docs_historical[(key, rev)] = doc
return doc
def save(
self, query, comment=None, action=None, data=None, timestamp=None, author=None
):
timestamp = timestamp or datetime.now()
if author:
author = {"key": author.key}
doc = self._save_doc(query, timestamp)
changes = [web.storage({"key": doc['key'], "revision": doc['revision']})]
changeset = self._make_changeset(
timestamp=timestamp,
kind=action,
comment=comment,
data=data,
changes=changes,
author=author,
)
self.changesets.append(changeset)
self.reindex(doc)
def save_many(
self, query, comment=None, action=None, data=None, timestamp=None, author=None
):
timestamp = timestamp or datetime.now()
docs = [self._save_doc(doc, timestamp) for doc in query]
if author:
author = {"key": author.key}
changes = [
web.storage({"key": doc['key'], "revision": doc['revision']})
for doc in docs
]
changeset = self._make_changeset(
timestamp=timestamp,
kind=action,
comment=comment,
data=data,
changes=changes,
author=author,
)
self.changesets.append(changeset)
for doc in docs:
self.reindex(doc)
def quicksave(self, key, type="/type/object", **kw):
"""Handy utility to save an object with less code and get the saved object as return value.
foo = mock_site.quicksave("/books/OL1M", "/type/edition", title="Foo")
"""
query = {
"key": key,
"type": {"key": type},
}
query.update(kw)
self.save(query)
return self.get(key)
def _make_changeset(self, timestamp, kind, comment, data, changes, author=None):
id = len(self.changesets)
return {
"id": id,
"kind": kind or "update",
"comment": comment,
"data": data or {},
"changes": changes,
"timestamp": timestamp.isoformat(),
"author": author,
"ip": "127.0.0.1",
"bot": False,
}
def get_change(self, cid: int) -> Changeset:
return Changeset(self, self.changesets[cid])
def recentchanges(self, query):
limit = query.pop("limit", 1000)
offset = query.pop("offset", 0)
author = query.pop("author", None)
if not author:
raise NotImplementedError(
"MockSite.recentchanges without author not implemented"
)
result = list(
itertools.islice(
(
Changeset(self, c)
for c in reversed(self.changesets)
if safeget(lambda: c['author']['key']) == author
),
offset,
offset + limit,
)
)
return result
def get(self, key, revision=None, lazy=False):
if revision:
data = self.docs_historical.get((key, revision))
else:
data = self.docs.get(key)
data = data and web.storage(common.parse_query(data))
return data and client.create_thing(self, key, self._process_dict(data))
def _process(self, value):
if isinstance(value, list):
return [self._process(v) for v in value]
elif isinstance(value, dict):
d = {}
for k, v in value.items():
d[k] = self._process(v)
return client.create_thing(self, d.get('key'), d)
elif isinstance(value, common.Reference):
return client.create_thing(self, str(value), None)
else:
return value
def _process_dict(self, data):
d = {}
for k, v in data.items():
d[k] = self._process(v)
return d
def get_many(self, keys):
return [self.get(k) for k in keys if k in self.docs]
def things(self, query):
limit = query.pop('limit', 100)
offset = query.pop('offset', 0)
keys = set(self.docs)
for k, v in query.items():
if isinstance(v, dict):
# query keys need to be flattened properly,
# this corrects any nested keys that have been included
# in values.
flat = common.flatten_dict(v)[0]
k = web.rstrips(k + '.' + flat[0], '.key')
v = flat[1]
keys = {k for k in self.filter_index(self.index, k, v) if k in keys}
keys = sorted(keys)
return keys[offset : offset + limit]
def regex_ilike(self, pattern: str, text: str) -> bool:
"""Construct a regex pattern for ILIKE operation and match against the text."""
# Remove '_' to ignore single character matches, the same as Infobase.
regex_pattern = re.escape(pattern).replace(r"\*", ".*").replace("_", "")
return bool(re.match(regex_pattern, text, re.IGNORECASE))
def filter_index(self, index, name, value):
operations = {
"~": lambda i, value: isinstance(i.value, str)
and self.regex_ilike(value, i.value),
"<": lambda i, value: i.value < value,
">": lambda i, value: i.value > value,
"!": lambda i, value: i.value != value,
"=": lambda i, value: i.value == value,
}
pattern = ".*([%s])$" % "".join(operations)
rx = web.re_compile(pattern)
if m := rx.match(name):
op = m.group(1)
name = name[:-1]
else:
op = "="
f = operations[op]
if name == 'isbn_':
names = ['isbn_10', 'isbn_13']
else:
names = [name]
if isinstance(value, list): # Match any of the elements in value if it's a list
for n in names:
for i in index:
if i.name == n and any(f(i, v) for v in value):
yield i.key
else: # Otherwise just match directly
for n in names:
for i in index:
if i.name == n and f(i, value):
yield i.key
def compute_index(self, doc):
key = doc['key']
index = common.flatten_dict(doc)
for k, v in index:
# for handling last_modified.value
if k.endswith(".value"):
k = web.rstrips(k, ".value")
if k.endswith(".key"):
yield web.storage(
key=key, datatype="ref", name=web.rstrips(k, ".key"), value=v
)
elif isinstance(v, str):
yield web.storage(key=key, datatype="str", name=k, value=v)
elif isinstance(v, int):
yield web.storage(key=key, datatype="int", name=k, value=v)
def reindex(self, doc):
self.index = [i for i in self.index if i.key != doc['key']]
self.index.extend(self.compute_index(doc))
def find_user_by_email(self, email):
return None
def versions(self, q):
return []
def _get_backreferences(self, doc):
return {}
def _load(self, key, revision=None):
doc = self.get(key, revision=revision)
data = doc.dict()
data = web.storage(common.parse_query(data))
return self._process_dict(data)
def new(self, key, data=None):
"""Creates a new thing in memory."""
data = common.parse_query(data)
data = self._process_dict(data or {})
return client.create_thing(self, key, data)
def new_key(self, type):
assert type.startswith('/type/')
t = type[6:]
self.keys[t] += 1
return key_patterns[t] % self.keys[t]
def register(self, username, displayname, email, password):
try:
self.account_manager.register(
username=username,
email=email,
password=password,
data={"displayname": displayname},
)
except common.InfobaseException as e:
raise client.ClientException("bad_data", str(e))
def activate_account(self, username):
try:
self.account_manager.activate(username=username)
except common.InfobaseException as e:
raise client.ClientException(str(e))
def update_account(self, username, **kw):
status = self.account_manager.update(username, **kw)
if status != "ok":
raise client.ClientException("bad_data", "Account activation failed.")
def login(self, username, password):
status = self.account_manager.login(username, password)
if status == "ok":
self.account_manager.set_auth_token("/people/" + username)
else:
d = {"code": status}
raise client.ClientException(
"bad_data", msg="Login failed", json=json.dumps(d)
)
def find_account(self, username=None, email=None):
if username is not None:
return self.store.get("account/" + username)
else:
try:
return self.store.values(type="account", name="email", value=email)[0]
except IndexError:
return None
def get_user(self):
if auth_token := web.ctx.get("infobase_auth_token", ""):
try:
user_key, login_time, digest = auth_token.split(',')
except ValueError:
return
a = self.account_manager
if a._check_salted_hash(a.secret_key, user_key + "," + login_time, digest):
return self.get(user_key)
class MockConnection:
def get_auth_token(self):
return web.ctx.infobase_auth_token
def set_auth_token(self, token):
web.ctx.infobase_auth_token = token
class MockStore(dict):
def __setitem__(self, key, doc):
doc['_key'] = key
dict.__setitem__(self, key, doc)
put = __setitem__
def put_many(self, docs):
self.update((doc['_key'], doc) for doc in docs)
def _query(self, type=None, name=None, value=None, limit=100, offset=0):
for doc in dict.values(self):
if type is not None and doc.get("type", "") != type:
continue
if name is not None and doc.get(name) != value:
continue
yield doc
def keys(self, **kw):
return [doc['_key'] for doc in self._query(**kw)]
def values(self, **kw):
return list(self._query(**kw))
def items(self, **kw):
return [(doc["_key"], doc) for doc in self._query(**kw)]
@pytest.fixture
def mock_site(request):
"""mock_site funcarg.
Creates a mock site, assigns it to web.ctx.site and returns it.
"""
def read_types():
for path in glob.glob("openlibrary/plugins/openlibrary/types/*.type"):
text = open(path).read()
doc = eval(text, {'true': True, 'false': False})
if isinstance(doc, list):
yield from doc
else:
yield doc
def setup_models():
from openlibrary.plugins.upstream import models
models.setup()
site = MockSite()
setup_models()
for doc in read_types():
site.save(doc)
old_ctx = dict(web.ctx)
web.ctx.clear()
web.ctx.site = site
web.ctx.conn = MockConnection()
web.ctx.env = web.ctx.environ = web.storage()
web.ctx.headers = []
def undo():
web.ctx.clear()
web.ctx.update(old_ctx)
request.addfinalizer(undo)
return site
| 13,718 | Python | .py | 363 | 27.633609 | 99 | 0.554173 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
306 | mock_ol.py | internetarchive_openlibrary/openlibrary/mocks/mock_ol.py | import pytest
import re
import web
from infogami import config
from infogami.utils import delegate
try: # newer versions of web.py
from web.browser import AppBrowser
except ImportError: # older versions of web.py
from web import AppBrowser
from openlibrary.mocks.mock_infobase import mock_site, MockConnection
from openlibrary.plugins import ol_infobase
@pytest.fixture
def ol(request):
"""ol funcarg for pytest tests.
The ol objects exposes the following:
* ol.browser(): web.py browser object that works with OL webapp
* ol.site: mock site (also accessible from web.ctx.site)
* ol.sentmail: Last mail sent
"""
return OL(request)
@web.memoize
def load_plugins():
config.plugin_path = ["openlibrary.plugins", ""]
config.plugins = ["openlibrary", "worksearch", "upstream", "admin"]
delegate._load()
class EMail(web.storage):
def extract_links(self):
"""Extracts link from the email message."""
return re.findall(r"http://[^\s]*", self.message)
class OLBrowser(AppBrowser):
def get_text(self, e=None, name=None, **kw):
if name or kw:
e = self.get_soup().find(name=name, **kw)
return AppBrowser.get_text(self, e)
class OL:
"""Mock OL object for all tests."""
@pytest.fixture
def __init__(self, request, monkeypatch):
self.request = request
self.monkeypatch = monkeypatch(request)
self.site = mock_site(request)
self.monkeypatch.setattr(ol_infobase, "init_plugin", lambda: None)
self._load_plugins(request)
self._mock_sendmail(request)
self.setup_config()
def browser(self):
return OLBrowser(delegate.app)
def setup_config(self):
config.from_address = "Open Library <[email protected]>"
def _load_plugins(self, request):
def create_site():
web.ctx.conn = MockConnection()
if web.ctx.get('env'):
auth_token = web.cookies().get(config.login_cookie_name)
web.ctx.conn.set_auth_token(auth_token)
return self.site
self.monkeypatch.setattr(delegate, "create_site", create_site)
load_plugins()
def _mock_sendmail(self, request):
self.sentmail = None
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
headers = headers or {}
self.sentmail = EMail(
kw,
from_address=from_address,
to_address=to_address,
subject=subject,
message=message,
headers=headers,
)
self.monkeypatch.setattr(web, "sendmail", sendmail)
| 2,716 | Python | .py | 71 | 30.366197 | 85 | 0.642584 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
307 | mock_memcache.py | internetarchive_openlibrary/openlibrary/mocks/mock_memcache.py | """Library to mock memcache functionality.
"""
import memcache
import pytest
class Client:
"""Mock memcache client."""
def __init__(self, servers=None):
servers = servers or []
self.servers = servers
self.cache = {}
def set(self, key, value):
self.cache[key] = value
def get(self, key):
return self.cache.get(key)
def add(self, key, value):
if key not in self.cache:
self.cache[key] = value
return True
else:
return False
def delete(self, key):
self.cache.pop(key, None)
@pytest.fixture
def mock_memcache(request, monkeypatch):
"""This patches all the existing memcache connections to use mock memcache instance."""
m = monkeypatch
request.addfinalizer(m.undo)
mock_memcache = Client()
def proxy(name):
method = getattr(mock_memcache, name)
def f(self, *a, **kw):
return method(*a, **kw)
return f
m.setattr(memcache.Client, "get", proxy("get"))
m.setattr(memcache.Client, "set", proxy("set"))
m.setattr(memcache.Client, "add", proxy("add"))
return mock_memcache
| 1,174 | Python | .py | 37 | 25 | 91 | 0.621766 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
308 | mock_ia.py | internetarchive_openlibrary/openlibrary/mocks/mock_ia.py | """Mock of openlibrary.core.ia module.
"""
import pytest
from openlibrary.core import ia
@pytest.fixture
def mock_ia(request, monkeypatch):
"""pytest funcarg to mock openlibrary.core.ia module.
from openlibrary.core import ia
def test_ia(mock_ia):
assert ia.get_metadata("foo") == {}
mock_ia.set_metadata("foo", {"collection": ["a", "b"]})
assert ia.get_metadata("foo") == {"collection": ["a", "b"]}
"""
metadata = {}
class IA:
def set_metadata(self, itemid, meta):
metadata[itemid] = meta
def get_metadata(self, itemid):
return metadata.get(itemid, {})
mock_ia = IA()
monkeypatch.setattr(ia, 'get_metadata', ia.get_metadata)
return mock_ia
| 751 | Python | .py | 22 | 28.136364 | 67 | 0.628651 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
309 | test_mock_memcache.py | internetarchive_openlibrary/openlibrary/mocks/tests/test_mock_memcache.py | from .. import mock_memcache
import memcache
class Test_mock_memcache:
def test_set(self):
m = mock_memcache.Client([])
m.set("a", 1)
assert m.get("a") == 1
m.set("a", "foo")
assert m.get("a") == "foo"
m.set("a", ["foo", "bar"])
assert m.get("a") == ["foo", "bar"]
def test_add(self):
m = mock_memcache.Client([])
assert m.add("a", 1) is True
assert m.get("a") == 1
assert m.add("a", 2) is False
mc = memcache.Client(servers=[])
def test_mock_memcache_func_arg(mock_memcache):
mc.set("a", 1)
assert mc.get("a") == 1
| 632 | Python | .py | 20 | 24.8 | 47 | 0.535 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
310 | test_mock_infobase.py | internetarchive_openlibrary/openlibrary/mocks/tests/test_mock_infobase.py | import datetime
import web
class TestMockSite:
def test_new_key(self, mock_site):
ekey = mock_site.new_key('/type/edition')
assert ekey == '/books/OL1M'
ekey = mock_site.new_key('/type/edition')
assert ekey == '/books/OL2M'
wkey = mock_site.new_key('/type/work')
assert wkey == '/works/OL1W'
wkey = mock_site.new_key('/type/work')
assert wkey == '/works/OL2W'
akey = mock_site.new_key('/type/author')
assert akey == '/authors/OL1A'
akey = mock_site.new_key('/type/author')
assert akey == '/authors/OL2A'
def test_get(self, mock_site):
doc = {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"title": "The Test Book",
}
timestamp = datetime.datetime(2010, 1, 2, 3, 4, 5)
mock_site.save(doc, timestamp=timestamp)
assert mock_site.get("/books/OL1M").dict() == {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"title": "The Test Book",
"revision": 1,
"latest_revision": 1,
"last_modified": {"type": "/type/datetime", "value": "2010-01-02T03:04:05"},
"created": {"type": "/type/datetime", "value": "2010-01-02T03:04:05"},
}
assert mock_site.get("/books/OL1M").__class__.__name__ == "Edition"
def test_query(self, mock_site):
doc = {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"title": "The Test Book",
"subjects": ["love", "san_francisco"],
"isbn_10": ["0123456789"],
"isbn_13": ["0123456789abc"],
}
timestamp = datetime.datetime(2010, 1, 2, 3, 4, 5)
mock_site.reset()
mock_site.save(doc, timestamp=timestamp)
assert mock_site.things({"type": "/type/edition"}) == ["/books/OL1M"]
assert mock_site.things({"type": "/type/work"}) == []
assert mock_site.things({"type": "/type/edition", "subjects": "love"}) == [
"/books/OL1M"
]
assert mock_site.things({"type": "/type/edition", "subjects": "hate"}) == []
assert mock_site.things({"key~": "/books/*"}) == ["/books/OL1M"]
assert mock_site.things({"key~": "/works/*"}) == []
assert mock_site.things({"last_modified>": "2010-01-01"}) == ["/books/OL1M"]
assert mock_site.things({"last_modified>": "2010-01-03"}) == []
assert mock_site.things({"isbn_10": ["nomatch", "0123456789"]}) == [
"/books/OL1M"
]
assert mock_site.things({"isbn_10": "0123456789"}) == ["/books/OL1M"]
assert mock_site.things({"isbn_": "0123456789"}) == ["/books/OL1M"]
assert mock_site.things({"isbn_": ["0123456789abc"]}) == ["/books/OL1M"]
def test_work_authors(self, mock_site):
a2 = mock_site.quicksave("/authors/OL2A", "/type/author", name="A2")
work = mock_site.quicksave(
"/works/OL1W",
"/type/work",
title="Foo",
authors=[{"author": {"key": "/authors/OL2A"}}],
)
book = mock_site.quicksave(
"/books/OL1M", "/type/edition", title="Foo", works=[{"key": "/works/OL1W"}]
)
w = book.works[0]
assert w.dict() == work.dict()
a = w.authors[0].author
assert a.dict() == a2.dict()
assert a.key == '/authors/OL2A'
assert a.type.key == '/type/author'
assert a.name == 'A2'
assert [a.type.key for a in work.get_authors()] == ['/type/author']
assert [a.type.key for a in work.get_authors()] == ['/type/author']
# this is the query format used in openlibrary/openlibrary/catalog/works/find_works.py get_existing_works(akey)
# and https://github.com/internetarchive/openlibrary/blob/dabd7b8c0c42e3ac2700779da9f303a6344073f6/openlibrary/plugins/openlibrary/api.py#L228
author_works_q = {'type': '/type/work', 'authors': {'author': {'key': a.key}}}
assert mock_site.things(author_works_q) == ['/works/OL1W']
def test_ilike_compatibility(self, mock_site) -> None:
name = "B. Baggins"
mock_site.quicksave("/authors/OL1A", "/type/author", name="B. R. Mc Baggins")
mock_site.quicksave("/authors/OL2A", "/type/author", name="B* Baggins")
mock_site.quicksave("/authors/OL3A", "/type/author", name=".B. Baggins")
mock_site.quicksave("/authors/OL4A", "/type/author", name="b_ BaGGinS")
mock_site.quicksave("/authors/OL5A", "/type/author", name="b. BaGGinS")
query = {"type": "/type/author", "name~": name}
reply = web.ctx.site.things(query)
assert reply == ["/authors/OL5A"]
| 4,736 | Python | .py | 96 | 39.59375 | 150 | 0.553823 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
311 | formats.py | internetarchive_openlibrary/openlibrary/core/formats.py | """Library for loading and dumping data to json and yaml.
"""
import json
import yaml
from openlibrary.core.helpers import NothingEncoder
__all__ = ["load_yaml", "dump_yaml"]
def load_yaml(text):
return yaml.safe_load(text)
def dump_yaml(data):
return yaml.safe_dump(data, indent=4, allow_unicode=True, default_flow_style=False)
def load(text, format):
if format == "json":
return json.loads(text)
elif format == "yaml":
return load_yaml(text)
else:
raise Exception("unsupported format %r" % format)
def dump(data, format):
if format == "json":
return json.dumps(data, cls=NothingEncoder)
elif format == "yml":
return dump_yaml(data)
else:
raise Exception("unsupported format %r" % format)
| 781 | Python | .py | 24 | 27.75 | 87 | 0.679625 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
312 | db.py | internetarchive_openlibrary/openlibrary/core/db.py | """Interface to access the database of openlibrary.
"""
import web
import sqlite3
from datetime import datetime
from sqlite3 import IntegrityError
from psycopg2.errors import UniqueViolation
from infogami.utils import stats
@web.memoize
def _get_db():
return web.database(**web.config.db_parameters)
def get_db():
"""Returns an instance of webpy database object.
The database object is cached so that one object is used everywhere.
"""
return _get_db()
class CommonExtras:
"""
A set of methods used by bookshelves, booknotes, ratings, and observations tables
"""
@classmethod
def update_work_id(cls, current_work_id, new_work_id, _test=False):
"""This method allows all instances of a work_id (such as that of a
redirect) to be updated to a new work_id.
"""
oldb = get_db()
t = oldb.transaction()
rows_changed = 0
rows_deleted = 0
failed_deletes = 0
try:
rows_changed = oldb.update(
cls.TABLENAME,
where="work_id=$work_id",
work_id=new_work_id,
vars={"work_id": current_work_id},
)
except (UniqueViolation, IntegrityError):
(
rows_changed,
rows_deleted,
failed_deletes,
) = cls.update_work_ids_individually(
current_work_id, new_work_id, _test=_test
)
t.rollback() if _test else t.commit()
return {
'rows_changed': rows_changed,
'rows_deleted': rows_deleted,
'failed_deletes': failed_deletes,
}
@classmethod
def update_work_ids_individually(cls, current_work_id, new_work_id, _test=False):
oldb = get_db()
rows_changed = 0
rows_deleted = 0
failed_deletes = 0
# get records with old work_id
# `list` used to solve sqlite cursor test
rows = list(
oldb.select(
cls.TABLENAME,
where="work_id=$work_id",
vars={"work_id": current_work_id},
)
)
for row in rows:
where = " AND ".join(
[f"{k}='{v}'" for k, v in row.items() if k in cls.PRIMARY_KEY]
)
try:
# try to update the row to new_work_id
t_update = oldb.transaction()
oldb.query(
f"UPDATE {cls.TABLENAME} set work_id={new_work_id} where {where}"
)
rows_changed += 1
t_update.rollback() if _test else t_update.commit()
except (UniqueViolation, IntegrityError):
t_delete = oldb.transaction()
# otherwise, delete row with current_work_id if failed
oldb.query(f"DELETE FROM {cls.TABLENAME} WHERE {where}")
rows_deleted += 1
if _test or not cls.ALLOW_DELETE_ON_CONFLICT:
t_delete.rollback()
else:
t_delete.commit()
if not cls.ALLOW_DELETE_ON_CONFLICT:
failed_deletes += 1
rows_deleted -= 1
return rows_changed, rows_deleted, failed_deletes
@classmethod
def select_all_by_username(cls, username, _test=False):
oldb = get_db()
return list(
oldb.select(
cls.TABLENAME, where="username=$username", vars={"username": username}
)
)
@classmethod
def update_username(cls, username, new_username, _test=False):
oldb = get_db()
t = oldb.transaction()
try:
rows_changed = oldb.update(
cls.TABLENAME,
where="username=$username",
username=new_username,
vars={"username": username},
)
except (UniqueViolation, IntegrityError):
# if any of the records would conflict with an exiting
# record associated with new_username
pass # assuming impossible for now, not a great assumption
t.rollback() if _test else t.commit()
return rows_changed
@classmethod
def delete_all_by_username(cls, username, _test=False):
oldb = get_db()
t = oldb.transaction()
try:
rows_deleted = oldb.delete(
cls.TABLENAME, where="username=$username", vars={"username": username}
)
except (UniqueViolation, IntegrityError):
pass
t.rollback() if _test else t.commit()
return rows_deleted
def _proxy(method_name):
"""Create a new function that call method with given name on the
database object.
The new function also takes care of recording the stats about how
long it took to execute this query etc.
"""
def f(*args, **kwargs):
stats.begin("db", method=method_name, args=list(args), kwargs=kwargs)
m = getattr(get_db(), method_name)
result = m(*args, **kwargs)
stats.end()
return result
f.__name__ = method_name
f.__doc__ = f"Equivalent to get_db().{method_name}(*args, **kwargs)."
return f
def adapt_datetime_iso(date_time: datetime) -> str:
"""
Convert a Python datetime.datetime into a timezone-naive ISO 8601 date string.
>>> adapt_datetime_iso(datetime(2023, 4, 5, 6, 7, 8, 9))
'2023-04-05 06:07:08.000009'
"""
return date_time.isoformat(" ")
sqlite3.register_adapter(datetime, adapt_datetime_iso)
query = _proxy("query")
select = _proxy("select")
where = _proxy("where")
insert = _proxy("insert")
update = _proxy("update")
delete = _proxy("delete")
transaction = _proxy("transaction")
| 5,780 | Python | .py | 158 | 26.78481 | 86 | 0.572961 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
313 | models.py | internetarchive_openlibrary/openlibrary/core/models.py | """Models of various OL objects.
"""
from datetime import datetime, timedelta
import logging
from openlibrary.core.vendors import get_amazon_metadata
import web
import json
import requests
from typing import Any, TypedDict
from collections import defaultdict
from dataclasses import dataclass, field
from infogami.infobase import client
from openlibrary.core.helpers import parse_datetime, safesort, urlsafe
# TODO: fix this. openlibrary.core should not import plugins.
from openlibrary import accounts
from openlibrary.core import lending
from openlibrary.catalog import add_book
from openlibrary.core.booknotes import Booknotes
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.core.follows import PubSub
from openlibrary.core.helpers import private_collection_in
from openlibrary.core.imports import ImportItem
from openlibrary.core.observations import Observations
from openlibrary.core.ratings import Ratings
from openlibrary.utils import extract_numeric_id_from_olid, dateutil
from openlibrary.utils.isbn import to_isbn_13, isbn_13_to_isbn_10, canonical
from openlibrary.core.wikidata import WikidataEntity, get_wikidata_entity
from . import cache, waitinglist
from urllib.parse import urlencode
from pydantic import ValidationError
from .ia import get_metadata
from .waitinglist import WaitingLoan
from ..accounts import OpenLibraryAccount
from ..plugins.upstream.utils import get_coverstore_url, get_coverstore_public_url
logger = logging.getLogger("openlibrary.core")
def _get_ol_base_url() -> str:
# Anand Oct 2013
# Looks like the default value when called from script
if "[unknown]" in web.ctx.home:
return "https://openlibrary.org"
else:
return web.ctx.home
class Image:
def __init__(self, site, category, id):
self._site = site
self.category = category
self.id = id
def info(self):
url = f'{get_coverstore_url()}/{self.category}/id/{self.id}.json'
if url.startswith("//"):
url = "http:" + url
try:
d = requests.get(url).json()
d['created'] = parse_datetime(d['created'])
if d['author'] == 'None':
d['author'] = None
d['author'] = d['author'] and self._site.get(d['author'])
return web.storage(d)
except OSError:
# coverstore is down
return None
def url(self, size="M"):
"""Get the public URL of the image."""
coverstore_url = get_coverstore_public_url()
return f"{coverstore_url}/{self.category}/id/{self.id}-{size.upper()}.jpg"
def __repr__(self):
return "<image: %s/%d>" % (self.category, self.id)
ThingKey = str
class Thing(client.Thing):
"""Base class for all OL models."""
key: ThingKey
@cache.method_memoize
def get_history_preview(self):
"""Returns history preview."""
history = self._get_history_preview()
history = web.storage(history)
history.revision = self.revision
history.lastest_revision = self.revision
history.created = self.created
def process(v):
"""Converts entries in version dict into objects."""
v = web.storage(v)
v.created = parse_datetime(v.created)
v.author = v.author and self._site.get(v.author, lazy=True)
return v
history.initial = [process(v) for v in history.initial]
history.recent = [process(v) for v in history.recent]
return history
@cache.memoize(engine="memcache", key=lambda self: ("d" + self.key, "h"))
def _get_history_preview(self):
h = {}
if self.revision < 5:
h['recent'] = self._get_versions(limit=5)
h['initial'] = h['recent'][-1:]
h['recent'] = h['recent'][:-1]
else:
h['initial'] = self._get_versions(limit=1, offset=self.revision - 1)
h['recent'] = self._get_versions(limit=4)
return h
def _get_versions(self, limit, offset=0):
q = {"key": self.key, "limit": limit, "offset": offset}
versions = self._site.versions(q)
for v in versions:
v.created = v.created.isoformat()
v.author = v.author and v.author.key
# XXX-Anand: hack to avoid too big data to be stored in memcache.
# v.changes is not used and it contrinutes to memcache bloat in a big way.
v.changes = '[]'
return versions
def get_most_recent_change(self):
"""Returns the most recent change."""
preview = self.get_history_preview()
if preview.recent:
return preview.recent[0]
else:
return preview.initial[0]
def prefetch(self):
"""Prefetch all the anticipated data."""
preview = self.get_history_preview()
authors = {v.author.key for v in preview.initial + preview.recent if v.author}
# preload them
self._site.get_many(list(authors))
def _make_url(self, label: str | None, suffix: str, relative=True, **params):
"""Make url of the form $key/$label$suffix?$params."""
if label is not None:
u = self.key + "/" + urlsafe(label) + suffix
else:
u = self.key + suffix
if params:
u += '?' + urlencode(params)
if not relative:
u = _get_ol_base_url() + u
return u
def get_url(self, suffix="", **params) -> str:
"""Constructs a URL for this page with given suffix and query params.
The suffix is added to the URL of the page and query params are appended after adding "?".
"""
return self._make_url(label=self.get_url_suffix(), suffix=suffix, **params)
def get_url_suffix(self) -> str | None:
"""Returns the additional suffix that is added to the key to get the URL of the page.
Models of Edition, Work etc. should extend this to return the suffix.
This is used to construct the URL of the page. By default URL is the
key of the page. If this method returns None, nothing is added to the
key. If this method returns a string, it is sanitized and added to key
after adding a "/".
"""
return None
def _get_lists(self, limit=50, offset=0, sort=True):
# cache the default case
if limit == 50 and offset == 0:
keys = self._get_lists_cached()
else:
keys = self._get_lists_uncached(limit=limit, offset=offset)
lists = self._site.get_many(keys)
if sort:
lists = safesort(lists, reverse=True, key=lambda list: list.last_modified)
return lists
@cache.memoize(engine="memcache", key=lambda self: ("d" + self.key, "l"))
def _get_lists_cached(self):
return self._get_lists_uncached(limit=50, offset=0)
def _get_lists_uncached(self, limit, offset):
q = {
"type": "/type/list",
"seeds": {"key": self.key},
"limit": limit,
"offset": offset,
}
return self._site.things(q)
def _get_d(self):
"""Returns the data that goes into memcache as d/$self.key.
Used to measure the memcache usage.
"""
return {
"h": self._get_history_preview(),
"l": self._get_lists_cached(),
}
class ThingReferenceDict(TypedDict):
key: ThingKey
class Edition(Thing):
"""Class to represent /type/edition objects in OL."""
table_of_contents: list[dict] | list[str] | list[str | dict] | None
"""
Should be a list of dict; the other types are legacy
"""
def url(self, suffix="", **params):
return self.get_url(suffix, **params)
def get_url_suffix(self):
return self.title or "untitled"
def __repr__(self):
return "<Edition: %s>" % repr(self.title)
__str__ = __repr__
def full_title(self):
# retained for backward-compatibility. Is anybody using this really?
return self.title
def get_publish_year(self):
if self.publish_date:
m = web.re_compile(r"(\d\d\d\d)").search(self.publish_date)
return m and int(m.group(1))
def get_lists(self, limit=50, offset=0, sort=True):
return self._get_lists(limit=limit, offset=offset, sort=sort)
def get_ebook_info(self):
"""Returns the ebook info with the following fields.
* read_url - url to read the book
* borrow_url - url to borrow the book
* borrowed - True if the book is already borrowed
* daisy_url - url to access the daisy format of the book
* daisy_only - a boolean indicating whether book avail
exclusively as daisy
Sample return values:
{
"read_url": "http://www.archive.org/stream/foo00bar",
"daisy_url": "/books/OL1M/foo/daisy"
}
{
"daisy_url": "/books/OL1M/foo/daisy",
"borrow_url": "/books/OL1M/foo/borrow",
"borrowed": False
}
"""
d = {}
if self.ocaid:
d['has_ebook'] = True
d['daisy_url'] = self.url('/daisy')
d['daisy_only'] = True
collections = self.get_ia_collections()
borrowable = self.in_borrowable_collection()
if borrowable:
d['borrow_url'] = self.url("/borrow")
key = "ebooks" + self.key
doc = self._site.store.get(key) or {}
# caution, solr borrow status may be stale!
d['borrowed'] = doc.get("borrowed") == "true"
d['daisy_only'] = False
elif 'printdisabled' not in collections:
d['read_url'] = "https://archive.org/stream/%s" % self.ocaid
d['daisy_only'] = False
return d
def get_ia_collections(self):
return self.get_ia_meta_fields().get("collection", [])
def is_access_restricted(self):
collections = self.get_ia_collections()
return bool(collections) and (
'printdisabled' in collections
or 'lendinglibrary' in collections
or self.get_ia_meta_fields().get("access-restricted") is True
)
def is_in_private_collection(self):
"""Private collections are lendable books that should not be
linked/revealed from OL
"""
return private_collection_in(self.get_ia_collections())
def in_borrowable_collection(self):
collections = self.get_ia_collections()
return (
'lendinglibrary' in collections or 'inlibrary' in collections
) and not self.is_in_private_collection()
def get_waitinglist(self):
"""Returns list of records for all users currently waiting for this book."""
return waitinglist.get_waitinglist_for_book(self.key)
@property # type: ignore[misc]
def ia_metadata(self):
ocaid = self.get('ocaid')
return get_metadata(ocaid) if ocaid else {}
def get_waitinglist_size(self, ia=False):
"""Returns the number of people on waiting list to borrow this book."""
return waitinglist.get_waitinglist_size(self.key)
def get_loans(self):
from ..plugins.upstream import borrow
return borrow.get_edition_loans(self)
def get_ia_download_link(self, suffix):
"""Returns IA download link for given suffix.
The suffix is usually one of '.pdf', '.epub', '.mobi', '_djvu.txt'
"""
if self.ocaid:
metadata = self.get_ia_meta_fields()
# The _filenames field is set by ia.get_metadata function
filenames = metadata.get("_filenames")
if filenames:
filename = next((f for f in filenames if f.endswith(suffix)), None)
else:
# filenames is not in cache.
# This is required only until all the memcache entries expire
filename = self.ocaid + suffix
if filename is None and self.is_ia_scan():
# IA scans will have all the required suffixes.
# Sometimes they are generated on the fly.
filename = self.ocaid + suffix
if filename:
return f"https://archive.org/download/{self.ocaid}/{filename}"
@staticmethod
def get_isbn_or_asin(isbn_or_asin: str) -> tuple[str, str]:
"""
Return a tuple with an ISBN or an ASIN, accompanied by an empty string.
If the identifier is an ISBN, it appears in index 0.
If the identifier is an ASIN, it appears in index 1.
"""
isbn = canonical(isbn_or_asin)
asin = isbn_or_asin.upper() if isbn_or_asin.upper().startswith("B") else ""
return (isbn, asin)
@staticmethod
def is_valid_identifier(isbn: str, asin: str) -> bool:
"""Return `True` if there is a valid identifier."""
return len(isbn) in [10, 13] or len(asin) == 10
@staticmethod
def get_identifier_forms(isbn: str, asin: str) -> list[str]:
"""Make a list of ISBN 10, ISBN 13, and ASIN, insofar as each is available."""
isbn_13 = to_isbn_13(isbn)
isbn_10 = isbn_13_to_isbn_10(isbn_13) if isbn_13 else None
return [id_ for id_ in [isbn_10, isbn_13, asin] if id_]
@classmethod
def from_isbn(
cls, isbn_or_asin: str, high_priority: bool = False
) -> "Edition | None":
"""
Attempts to fetch an edition by ISBN or ASIN, or if no edition is found, then
check the import_item table for a match, then as a last result, attempt
to import from Amazon.
:param bool high_priority: If `True`, (1) any AMZ import requests will block
until AMZ has fetched data, and (2) the AMZ request will go to
the front of the queue. If `False`, the import will simply be
queued up if the item is not in the AMZ cache, and the affiliate
server will return a promise.
:return: an open library edition for this ISBN or None.
"""
# Determine if we've got an ISBN or ASIN and if it's facially valid.
isbn, asin = cls.get_isbn_or_asin(isbn_or_asin)
if not cls.is_valid_identifier(isbn=isbn, asin=asin):
return None
# Create a list of ISBNs (or an ASIN) to match.
if not (book_ids := cls.get_identifier_forms(isbn=isbn, asin=asin)):
return None
# Attempt to fetch book from OL
for book_id in book_ids:
if book_id == asin:
query = {"type": "/type/edition", 'identifiers': {'amazon': asin}}
else:
query = {"type": "/type/edition", 'isbn_%s' % len(book_id): book_id}
if matches := web.ctx.site.things(query):
return web.ctx.site.get(matches[0])
# Attempt to fetch the book from the import_item table
if edition := ImportItem.import_first_staged(identifiers=book_ids):
return edition
# Finally, try to fetch the book data from Amazon + import.
# If `high_priority=True`, then the affiliate-server, which `get_amazon_metadata()`
# uses, will block + wait until the Product API responds and the result, if any,
# is staged in `import_item`.
try:
id_ = asin or book_ids[0]
id_type = "asin" if asin else "isbn"
get_amazon_metadata(id_=id_, id_type=id_type, high_priority=high_priority)
return ImportItem.import_first_staged(identifiers=book_ids)
except requests.exceptions.ConnectionError:
logger.exception("Affiliate Server unreachable")
except requests.exceptions.HTTPError:
logger.exception(f"Affiliate Server: id {id_} not found")
return None
def is_ia_scan(self):
metadata = self.get_ia_meta_fields()
# all IA scans will have scanningcenter field set
return bool(metadata.get("scanningcenter"))
def make_work_from_orphaned_edition(self):
"""
Create a dummy work from an orphaned_edition.
"""
return web.ctx.site.new(
'',
{
'key': '',
'type': {'key': '/type/work'},
'title': self.title,
'authors': [
{'type': {'key': '/type/author_role'}, 'author': {'key': a['key']}}
for a in self.get('authors', [])
],
'editions': [self],
'subjects': self.get('subjects', []),
},
)
class Work(Thing):
"""Class to represent /type/work objects in OL."""
def url(self, suffix="", **params):
return self.get_url(suffix, **params)
def get_url_suffix(self):
return self.title or "untitled"
def __repr__(self):
return "<Work: %s>" % repr(self.key)
__str__ = __repr__
@property # type: ignore[misc]
@cache.method_memoize
@cache.memoize(engine="memcache", key=lambda self: ("d" + self.key, "e"))
def edition_count(self):
return self._site._request("/count_editions_by_work", data={"key": self.key})
def get_lists(self, limit=50, offset=0, sort=True):
return self._get_lists(limit=limit, offset=offset, sort=sort)
def get_users_rating(self, username: str) -> int | None:
if not username:
return None
work_id = extract_numeric_id_from_olid(self.key)
rating = Ratings.get_users_rating_for_work(username, work_id)
return rating
def get_patrons_who_also_read(self):
key = self.key.split('/')[-1][2:-1]
return Bookshelves.patrons_who_also_read(key)
def get_users_read_status(self, username):
if not username:
return None
work_id = extract_numeric_id_from_olid(self.key)
status_id = Bookshelves.get_users_read_status_of_work(username, work_id)
return status_id
def get_users_notes(self, username, edition_olid=None):
if not username:
return None
work_id = extract_numeric_id_from_olid(self.key)
edition_id = extract_numeric_id_from_olid(edition_olid) if edition_olid else -1
return Booknotes.get_patron_booknote(username, work_id, edition_id=edition_id)
def has_book_note(self, username, edition_olid):
if not username:
return False
work_id = extract_numeric_id_from_olid(self.key)
edition_id = extract_numeric_id_from_olid(edition_olid)
return (
len(Booknotes.get_patron_booknote(username, work_id, edition_id=edition_id))
> 0
)
def get_users_observations(self, username):
if not username:
return None
work_id = extract_numeric_id_from_olid(self.key)
raw_observations = Observations.get_patron_observations(username, work_id)
formatted_observations = defaultdict(list)
for r in raw_observations:
kv_pair = Observations.get_key_value_pair(r['type'], r['value'])
formatted_observations[kv_pair.key].append(kv_pair.value)
return formatted_observations
def get_num_users_by_bookshelf(self):
if not self.key: # a dummy work
return {'want-to-read': 0, 'currently-reading': 0, 'already-read': 0}
work_id = extract_numeric_id_from_olid(self.key)
num_users_by_bookshelf = Bookshelves.get_num_users_by_bookshelf_by_work_id(
work_id
)
return {
'want-to-read': num_users_by_bookshelf.get(
Bookshelves.PRESET_BOOKSHELVES['Want to Read'], 0
),
'currently-reading': num_users_by_bookshelf.get(
Bookshelves.PRESET_BOOKSHELVES['Currently Reading'], 0
),
'already-read': num_users_by_bookshelf.get(
Bookshelves.PRESET_BOOKSHELVES['Already Read'], 0
),
}
def get_rating_stats(self):
if not self.key: # a dummy work
return {'avg_rating': 0, 'num_ratings': 0}
work_id = extract_numeric_id_from_olid(self.key)
rating_stats = Ratings.get_rating_stats(work_id)
if rating_stats and rating_stats['num_ratings'] > 0:
return {
'avg_rating': round(rating_stats['avg_rating'], 2),
'num_ratings': rating_stats['num_ratings'],
}
def _get_d(self):
"""Returns the data that goes into memcache as d/$self.key.
Used to measure the memcache usage.
"""
return {
"h": self._get_history_preview(),
"l": self._get_lists_cached(),
"e": self.edition_count,
}
def _make_subject_link(self, title, prefix=""):
slug = web.safestr(title.lower().replace(' ', '_').replace(',', ''))
key = f"/subjects/{prefix}{slug}"
return web.storage(key=key, title=title, slug=slug)
def get_subject_links(self, type="subject"):
"""Returns all the subjects as link objects.
Each link is a web.storage object with title and key fields.
The type should be one of subject, place, person or time.
"""
if type == 'subject':
return [self._make_subject_link(s) for s in self.get_subjects()]
elif type == 'place':
return [self._make_subject_link(s, "place:") for s in self.subject_places]
elif type == 'person':
return [self._make_subject_link(s, "person:") for s in self.subject_people]
elif type == 'time':
return [self._make_subject_link(s, "time:") for s in self.subject_times]
else:
return []
def get_ebook_info(self):
"""Returns the ebook info with the following fields.
* read_url - url to read the book
* borrow_url - url to borrow the book
* borrowed - True if the book is already borrowed
* daisy_url - url to access the daisy format of the book
Sample return values:
{
"read_url": "http://www.archive.org/stream/foo00bar",
"daisy_url": "/books/OL1M/foo/daisy"
}
{
"daisy_url": "/books/OL1M/foo/daisy",
"borrow_url": "/books/OL1M/foo/borrow",
"borrowed": False
}
"""
solrdata = web.storage(self._solr_data or {})
d = {}
if solrdata.get('has_fulltext') and solrdata.get('public_scan_b'):
d['read_url'] = f"https://archive.org/stream/{solrdata.ia[0]}"
d['has_ebook'] = True
elif solrdata.get('lending_edition_s'):
d['borrow_url'] = f"/books/{solrdata.lending_edition_s}/x/borrow"
d['has_ebook'] = True
if solrdata.get('ia'):
d['ia'] = solrdata.get('ia')
return d
@staticmethod
def get_redirect_chain(work_key: str) -> list:
resolved_key = None
redirect_chain = []
key = work_key
while not resolved_key:
thing = web.ctx.site.get(key)
redirect_chain.append(thing)
if thing.type.key == "/type/redirect":
key = thing.location
else:
resolved_key = thing.key
return redirect_chain
@classmethod
def resolve_redirect_chain(
cls, work_key: str, test: bool = False
) -> dict[str, Any]:
summary: dict[str, Any] = {
'key': work_key,
'redirect_chain': [],
'resolved_key': None,
'modified': False,
}
redirect_chain = cls.get_redirect_chain(work_key)
summary['redirect_chain'] = [
{"key": thing.key, "occurrences": {}, "updates": {}}
for thing in redirect_chain
]
summary['resolved_key'] = redirect_chain[-1].key
for r in summary['redirect_chain']:
olid = r['key'].split('/')[-1][2:-1] # 'OL1234x' --> '1234'
new_olid = summary['resolved_key'].split('/')[-1][2:-1]
# count reading log entries
r['occurrences']['readinglog'] = len(Bookshelves.get_works_shelves(olid))
r['occurrences']['ratings'] = len(Ratings.get_all_works_ratings(olid))
r['occurrences']['booknotes'] = len(Booknotes.get_booknotes_for_work(olid))
r['occurrences']['observations'] = len(
Observations.get_observations_for_work(olid)
)
if new_olid != olid:
# track updates
r['updates']['readinglog'] = Bookshelves.update_work_id(
olid, new_olid, _test=test
)
r['updates']['ratings'] = Ratings.update_work_id(
olid, new_olid, _test=test
)
r['updates']['booknotes'] = Booknotes.update_work_id(
olid, new_olid, _test=test
)
r['updates']['observations'] = Observations.update_work_id(
olid, new_olid, _test=test
)
summary['modified'] = summary['modified'] or any(
any(r['updates'][group].values())
for group in ['readinglog', 'ratings', 'booknotes', 'observations']
)
return summary
@classmethod
def get_redirects(cls, day, batch_size=1000, batch=0):
tomorrow = day + timedelta(days=1)
work_redirect_ids = web.ctx.site.things(
{
"type": "/type/redirect",
"key~": "/works/*",
"limit": batch_size,
"offset": (batch * batch_size),
"sort": "-last_modified",
"last_modified>": day.strftime('%Y-%m-%d'),
"last_modified<": tomorrow.strftime('%Y-%m-%d'),
}
)
more = len(work_redirect_ids) == batch_size
logger.info(
f"[update-redirects] batch: {batch}, size {batch_size}, offset {batch * batch_size}, more {more}, len {len(work_redirect_ids)}"
)
return work_redirect_ids, more
@classmethod
def resolve_redirects_bulk(
cls,
days: int = 1,
batch_size: int = 1000,
grace_period_days: int = 7,
cutoff_date: datetime = datetime(year=2017, month=1, day=1),
test: bool = False,
):
"""
batch_size - how many records to fetch per batch
start_offset - what offset to start from
grace_period_days - ignore redirects created within period of days
cutoff_date - ignore redirects created before this date
test - don't resolve stale redirects, just identify them
"""
fixed = 0
total = 0
current_date = datetime.today() - timedelta(days=grace_period_days)
cutoff_date = (current_date - timedelta(days)) if days else cutoff_date
while current_date > cutoff_date:
has_more = True
batch = 0
while has_more:
logger.info(
f"[update-redirects] {current_date}, batch {batch+1}: #{total}",
)
work_redirect_ids, has_more = cls.get_redirects(
current_date, batch_size=batch_size, batch=batch
)
work_redirect_batch = web.ctx.site.get_many(work_redirect_ids)
for work in work_redirect_batch:
total += 1
chain = Work.resolve_redirect_chain(work.key, test=test)
if chain['modified']:
fixed += 1
logger.info(
f"[update-redirects] {current_date}, Update: #{total} fix#{fixed} batch#{batch} <{work.key}> {chain}"
)
else:
logger.info(
f"[update-redirects] No Update Required: #{total} <{work.key}>"
)
batch += 1
current_date = current_date - timedelta(days=1)
logger.info(f"[update-redirects] Done, processed {total}, fixed {fixed}")
class Author(Thing):
"""Class to represent /type/author objects in OL."""
def url(self, suffix="", **params):
return self.get_url(suffix, **params)
def get_url_suffix(self):
return self.name or "unnamed"
def wikidata(
self, bust_cache: bool = False, fetch_missing: bool = False
) -> WikidataEntity | None:
if wd_id := self.remote_ids.get("wikidata"):
return get_wikidata_entity(
qid=wd_id, bust_cache=bust_cache, fetch_missing=fetch_missing
)
return None
def __repr__(self):
return "<Author: %s>" % repr(self.key)
__str__ = __repr__
def foaf_agent(self):
"""
Friend of a friend ontology Agent type. http://xmlns.com/foaf/spec/#term_Agent
https://en.wikipedia.org/wiki/FOAF_(ontology)
"""
if self.get('entity_type') == 'org':
return 'Organization'
elif self.get('birth_date') or self.get('death_date'):
return 'Person'
return 'Agent'
def get_edition_count(self):
return self._site._request('/count_editions_by_author', data={'key': self.key})
edition_count = property(get_edition_count)
def get_lists(self, limit=50, offset=0, sort=True):
return self._get_lists(limit=limit, offset=offset, sort=sort)
class User(Thing):
DEFAULT_PREFERENCES = {
'updates': 'no',
'public_readlog': 'no',
# New users are now public by default for new patrons
# As of 2020-05, OpenLibraryAccount.create will
# explicitly set public_readlog: 'yes'.
# Legacy accounts w/ no public_readlog key
# will continue to default to 'no'
}
def get_status(self):
account = self.get_account() or {}
return account.get("status")
def get_usergroups(self):
keys = self._site.things({'type': '/type/usergroup', 'members': self.key})
return self._site.get_many(keys)
usergroups = property(get_usergroups)
def get_account(self):
username = self.get_username()
return accounts.find(username=username)
def get_email(self):
account = self.get_account() or {}
return account.get("email")
def get_username(self):
return self.key.split("/")[-1]
def preferences(self):
key = "%s/preferences" % self.key
prefs = web.ctx.site.get(key)
return (prefs and prefs.dict().get('notifications')) or self.DEFAULT_PREFERENCES
def save_preferences(self, new_prefs, msg='updating user preferences'):
key = '%s/preferences' % self.key
old_prefs = web.ctx.site.get(key)
prefs = (old_prefs and old_prefs.dict()) or {
'key': key,
'type': {'key': '/type/object'},
}
if 'notifications' not in prefs:
prefs['notifications'] = self.DEFAULT_PREFERENCES
prefs['notifications'].update(new_prefs)
web.ctx.site.save(prefs, msg)
def is_usergroup_member(self, usergroup):
if not usergroup.startswith('/usergroup/'):
usergroup = '/usergroup/%s' % usergroup
return usergroup in [g.key for g in self.usergroups]
def is_subscribed_user(self, username):
my_username = self.get_username()
return (
PubSub.is_subscribed(my_username, username)
if my_username != username
else -1
)
def has_cookie(self, name):
return web.cookies().get(name, False)
def is_printdisabled(self):
return web.cookies().get('pd')
def is_admin(self):
return self.is_usergroup_member('/usergroup/admin')
def is_librarian(self):
return self.is_usergroup_member('/usergroup/librarians')
def is_super_librarian(self):
return self.is_usergroup_member('/usergroup/super-librarians')
def is_beta_tester(self):
return self.is_usergroup_member('/usergroup/beta-testers')
def is_read_only(self):
return self.is_usergroup_member('/usergroup/read-only')
def get_lists(self, seed=None, limit=100, offset=0, sort=True):
"""Returns all the lists of this user.
When seed is specified, this returns all the lists which contain the
given seed.
seed could be an object or a string like "subject:cheese".
"""
# cache the default case
if seed is None and limit == 100 and offset == 0:
keys = self._get_lists_cached()
else:
keys = self._get_lists_uncached(seed=seed, limit=limit, offset=offset)
lists = self._site.get_many(keys)
if sort:
lists = safesort(lists, reverse=True, key=lambda list: list.last_modified)
return lists
@classmethod
# @cache.memoize(engine="memcache", key="user-avatar")
def get_avatar_url(cls, username):
username = username.split('/people/')[-1]
user = web.ctx.site.get('/people/%s' % username)
itemname = user.get_account().get('internetarchive_itemname')
return f'https://archive.org/services/img/{itemname}'
@cache.memoize(engine="memcache", key=lambda self: ("d" + self.key, "l"))
def _get_lists_cached(self):
return self._get_lists_uncached(limit=100, offset=0)
def _get_lists_uncached(self, seed=None, limit=100, offset=0):
q = {
"type": "/type/list",
"key~": self.key + "/lists/*",
"limit": limit,
"offset": offset,
}
if seed:
if isinstance(seed, Thing):
seed = {"key": seed.key}
q['seeds'] = seed
return self._site.things(q)
def new_list(self, name, description, seeds, tags=None):
tags = tags or []
"""Creates a new list object with given name, description, and seeds.
seeds must be a list containing references to author, edition, work or subject strings.
Sample seeds:
{"key": "/authors/OL1A"}
{"key": "/books/OL1M"}
{"key": "/works/OL1W"}
"subject:love"
"place:san_francisco"
"time:1947"
"person:gerge"
The caller must call list._save(...) to save the list.
"""
id = self._site.seq.next_value("list")
# since the owner is part of the URL, it might be difficult to handle
# change of ownerships. Need to think of a way to handle redirects.
key = f"{self.key}/lists/OL{id}L"
doc = {
"key": key,
"type": {"key": "/type/list"},
"name": name,
"description": description,
"seeds": seeds,
"tags": tags,
}
return self._site.new(key, doc)
def is_waiting_for(self, book):
"""Returns True if this user is waiting to loan given book."""
return waitinglist.is_user_waiting_for(self.key, book.key)
def get_waitinglist(self):
"""Returns list of records for all the books the user is currently waiting for."""
return waitinglist.get_waitinglist_for_user(self.key)
def has_borrowed(self, book):
"""Returns True if this user has borrowed given book."""
loan = self.get_loan_for(book.ocaid)
return loan is not None
def get_loan_for(self, ocaid, use_cache=False):
"""Returns the loan object for given ocaid.
Returns None if this user hasn't borrowed the given book.
"""
from ..plugins.upstream import borrow
loans = (
lending.get_cached_loans_of_user(self.key)
if use_cache
else lending.get_loans_of_user(self.key)
)
for loan in loans:
if ocaid == loan['ocaid']:
return loan
def get_waiting_loan_for(self, ocaid):
"""
:param str or None ocaid: edition ocaid
:rtype: dict (e.g. {position: number})
"""
return ocaid and WaitingLoan.find(self.key, ocaid)
def get_user_waiting_loans(self, ocaid=None, use_cache=False):
"""
Similar to get_waiting_loan_for, but fetches and caches all of user's waiting loans
:param str or None ocaid: edition ocaid
:rtype: dict (e.g. {position: number})
"""
all_user_waiting_loans = (
lending.get_cached_user_waiting_loans
if use_cache
else lending.get_user_waiting_loans
)(self.key)
if ocaid:
return next(
(
loan
for loan in all_user_waiting_loans
if loan['identifier'] == ocaid
),
None,
)
return all_user_waiting_loans
def __repr__(self):
return "<User: %s>" % repr(self.key)
__str__ = __repr__
def render_link(self, cls=None):
"""
Generate an HTML link of this user
:param str cls: HTML class to add to the link
:rtype: str
"""
extra_attrs = ''
if cls:
extra_attrs += 'class="%s" ' % cls
# Why nofollow?
return f'<a rel="nofollow" href="{self.key}" {extra_attrs}>{web.net.htmlquote(self.displayname)}</a>'
def set_data(self, data):
self._data = data
self._save()
class UserGroup(Thing):
@classmethod
def from_key(cls, key: str):
"""
:param str key: e.g. /usergroup/foo
:rtype: UserGroup | None
"""
if not key.startswith('/usergroup/'):
key = "/usergroup/%s" % key
return web.ctx.site.get(key)
def add_user(self, userkey: str) -> None:
"""Administrative utility (designed to be used in conjunction with
accounts.RunAs) to add a patron to a usergroup
:param str userkey: e.g. /people/mekBot
"""
if not web.ctx.site.get(userkey):
raise KeyError("Invalid userkey")
# Make sure userkey not already in group members:
members = self.get('members', [])
if not any(userkey == member['key'] for member in members):
members.append({'key': userkey})
self.members = members
web.ctx.site.save(self.dict(), f"Adding {userkey} to {self.key}")
def remove_user(self, userkey):
if not web.ctx.site.get(userkey):
raise KeyError("Invalid userkey")
members = self.get('members', [])
# find index of userkey and remove user
for i, m in enumerate(members):
if m.get('key', None) == userkey:
members.pop(i)
break
self.members = members
web.ctx.site.save(self.dict(), f"Removing {userkey} from {self.key}")
class Subject(web.storage):
key: str
def get_lists(self, limit=1000, offset=0, sort=True):
q = {
"type": "/type/list",
"seeds": self.get_seed(),
"limit": limit,
"offset": offset,
}
keys = web.ctx.site.things(q)
lists = web.ctx.site.get_many(keys)
if sort:
lists = safesort(lists, reverse=True, key=lambda list: list.last_modified)
return lists
def get_seed(self):
seed = self.key.split("/")[-1]
if seed.split(":")[0] not in ["place", "person", "time"]:
seed = "subject:" + seed
return seed
def url(self, suffix="", relative=True, **params):
u = self.key + suffix
if params:
u += '?' + urlencode(params)
if not relative:
u = _get_ol_base_url() + u
return u
# get_url is a common method available in all Models.
# Calling it `get_url` instead of `url` because there are some types that
# have a property with name `url`.
get_url = url
def get_default_cover(self):
for w in self.works:
cover_id = w.get("cover_id")
if cover_id:
return Image(web.ctx.site, "b", cover_id)
class Tag(Thing):
"""Class to represent /type/tag objects in OL."""
def url(self, suffix="", **params):
return self.get_url(suffix, **params)
def get_url_suffix(self):
return self.name or "unnamed"
@classmethod
def find(cls, tag_name, tag_type):
"""Returns a Tag key for a given tag name and tag type."""
q = {'type': '/type/tag', 'name': tag_name, 'tag_type': tag_type}
match = list(web.ctx.site.things(q))
return match[0] if match else None
@classmethod
def create(
cls,
tag_name,
tag_description,
tag_type,
tag_plugins,
ip='127.0.0.1',
comment='New Tag',
):
"""Creates a new Tag object."""
current_user = web.ctx.site.get_user()
patron = current_user.get_username() if current_user else 'ImportBot'
key = web.ctx.site.new_key('/type/tag')
from openlibrary.accounts import RunAs
with RunAs(patron):
web.ctx.ip = web.ctx.ip or ip
web.ctx.site.save(
{
'key': key,
'name': tag_name,
'tag_description': tag_description,
'tag_type': tag_type,
'tag_plugins': json.loads(tag_plugins or "[]"),
'type': {"key": '/type/tag'},
},
comment=comment,
)
return key
@dataclass
class LoggedBooksData:
"""
LoggedBooksData contains data used for displaying a page of the reading log, such
as the page size for pagination, the docs returned from the reading log DB for
a particular shelf, query, sorting, etc.
param page_size specifies how many results per page should display in the
reading log.
param shelf_totals holds the counts for books on the three default shelves.
param docs holds the documents returned from Solr.
param q holds an optional query string (len >= 3, per my_books_view in mybooks.py)
for filtering the reading log.
param ratings holds a list of ratings such that the index of each rating corresponds
to the index of each doc/work in self.docs.
"""
username: str
page_size: int
total_results: int
shelf_totals: dict[int, int]
docs: list[web.storage]
q: str = ""
ratings: list[int] = field(default_factory=list)
def load_ratings(self) -> None:
"""
Load the ratings into self.ratings from the storage docs, such that the index
of each returned rating corresponds to the index of each web storage doc. This
allows them to be zipped together if needed. E.g. in a template.
The intent of this is so that there is no need to query ratings from the
template, as the docs and ratings are together when needed.
"""
for doc in self.docs:
work_id = extract_numeric_id_from_olid(doc.key)
rating = Ratings.get_users_rating_for_work(self.username, work_id)
self.ratings.append(rating or 0)
def register_models():
client.register_thing_class(None, Thing) # default
client.register_thing_class('/type/edition', Edition)
client.register_thing_class('/type/work', Work)
client.register_thing_class('/type/author', Author)
client.register_thing_class('/type/user', User)
client.register_thing_class('/type/usergroup', UserGroup)
client.register_thing_class('/type/tag', Tag)
def register_types():
"""Register default types for various path patterns used in OL."""
from infogami.utils import types
types.register_type('^/authors/[^/]*$', '/type/author')
types.register_type('^/books/[^/]*$', '/type/edition')
types.register_type('^/works/[^/]*$', '/type/work')
types.register_type('^/languages/[^/]*$', '/type/language')
types.register_type('^/tags/[^/]*$', '/type/tag')
types.register_type('^/usergroup/[^/]*$', '/type/usergroup')
types.register_type('^/permission/[^/]*$', '/type/permission')
types.register_type('^/(css|js)/[^/]*$', '/type/rawtext')
| 44,419 | Python | .py | 1,032 | 33.119186 | 139 | 0.587445 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
314 | bookshelves_events.py | internetarchive_openlibrary/openlibrary/core/bookshelves_events.py | from datetime import datetime
from enum import IntEnum
from . import db
class BookshelfEvent(IntEnum):
START = 1
UPDATE = 2
FINISH = 3
@classmethod
def has_value(cls, value: int) -> bool:
return value in (item.value for item in BookshelfEvent.__members__.values())
class BookshelvesEvents(db.CommonExtras):
TABLENAME = 'bookshelves_events'
NULL_EDITION_ID = -1
# Create methods:
@classmethod
def create_event(
cls,
username,
work_id,
edition_id,
event_date,
event_type=BookshelfEvent.START.value,
):
oldb = db.get_db()
return oldb.insert(
cls.TABLENAME,
username=username,
work_id=work_id,
edition_id=edition_id or cls.NULL_EDITION_ID,
event_type=event_type,
event_date=event_date,
)
# Read methods:
@classmethod
def select_by_id(cls, pid):
oldb = db.get_db()
return list(oldb.select(cls.TABLENAME, where='id=$id', vars={'id': pid}))
@classmethod
def get_latest_event_date(cls, username, work_id, event_type):
oldb = db.get_db()
data = {
'username': username,
'work_id': work_id,
'event_type': event_type,
}
query = (
f'SELECT id, event_date FROM {cls.TABLENAME}'
' WHERE username=$username AND work_id=$work_id'
' AND event_type=$event_type'
' ORDER BY event_date DESC LIMIT 1'
)
results = list(oldb.query(query, vars=data))
return results[0] if results else None
@classmethod
def select_by_book_user_and_type(cls, username, work_id, edition_id, event_type):
oldb = db.get_db()
data = {
'username': username,
'work_id': work_id,
'edition_id': edition_id,
'event_type': event_type,
}
where = """
username=$username AND
work_id=$work_id AND
edition_id=$edition_id AND
event_type=$event_type
"""
return list(oldb.select(cls.TABLENAME, where=where, vars=data))
@classmethod
def select_by_user_type_and_year(cls, username, event_type, year):
oldb = db.get_db()
data = {
'username': username,
'event_type': event_type,
'event_date': f'{year}%',
}
where = """
username=$username AND
event_type=$event_type AND
event_date LIKE $event_date
"""
return list(oldb.select(cls.TABLENAME, where=where, vars=data))
@classmethod
def select_distinct_by_user_type_and_year(cls, username, event_type, year):
"""Returns a list of the most recent check-in events, with no repeating
work IDs. Useful for calculating one's yearly reading goal progress.
"""
oldb = db.get_db()
data = {
'username': username,
'event_type': event_type,
'event_date': f'{year}%',
}
query = (
f"select distinct on (work_id) work_id, * from {cls.TABLENAME} "
"where username=$username and event_type=$event_type and "
"event_date LIKE $event_date "
"order by work_id, updated desc"
)
return list(oldb.query(query, vars=data))
# Update methods:
@classmethod
def update_event(cls, pid, edition_id=None, event_date=None, data=None):
oldb = db.get_db()
updates = {}
if event_date:
updates['event_date'] = event_date
if data:
updates['data'] = data
if edition_id:
updates['edition_id'] = edition_id
if updates:
return oldb.update(
cls.TABLENAME,
where='id=$id',
vars={'id': pid},
updated=datetime.now(),
**updates,
)
return 0
@classmethod
def update_event_date(cls, pid, event_date):
oldb = db.get_db()
where_clause = 'id=$id'
where_vars = {'id': pid}
update_time = datetime.now()
return oldb.update(
cls.TABLENAME,
where=where_clause,
vars=where_vars,
updated=update_time,
event_date=event_date,
)
def update_event_data(cls, pid, data):
oldb = db.get_db()
where_clause = 'id=$id'
where_vars = {'id': pid}
update_time = datetime.now()
return oldb.update(
cls.TABLENAME,
where=where_clause,
vars=where_vars,
updated=update_time,
data=data,
)
# Delete methods:
@classmethod
def delete_by_id(cls, pid):
oldb = db.get_db()
where_clause = 'id=$id'
where_vars = {'id': pid}
return oldb.delete(cls.TABLENAME, where=where_clause, vars=where_vars)
@classmethod
def delete_by_username(cls, username):
oldb = db.get_db()
where_clause = 'username=$username'
where_vars = {'username': username}
return oldb.delete(cls.TABLENAME, where=where_clause, vars=where_vars)
@classmethod
def delete_by_username_and_work(cls, username, work_id):
oldb = db.get_db()
where_clause = 'username=$username AND work_id=$work_id'
data = {
'username': username,
'work_id': work_id,
}
return oldb.delete(cls.TABLENAME, where=where_clause, vars=data)
| 5,616 | Python | .py | 168 | 23.803571 | 85 | 0.554836 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
315 | sendmail.py | internetarchive_openlibrary/openlibrary/core/sendmail.py | from infogami import config
from infogami.utils.view import render_template
import web
def sendmail_with_template(template, to, cc=None, frm=None, **kwargs):
msg = render_template(template, **kwargs)
_sendmail(to, msg, cc=cc, frm=frm)
def _sendmail(to, msg, cc=None, frm=None):
cc = cc or []
frm = frm or config.from_address
if config.get('dummy_sendmail'):
message = (
f'To: {to}\n'
f'From:{config.from_address}\n'
f'Subject:{msg.subject}\n'
f'\n{web.safestr(msg)}'
)
print("sending email", message, file=web.debug)
else:
web.sendmail(
frm, to, subject=msg.subject.strip(), message=web.safestr(msg), cc=cc
)
| 738 | Python | .py | 21 | 28 | 81 | 0.61236 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
316 | yearly_reading_goals.py | internetarchive_openlibrary/openlibrary/core/yearly_reading_goals.py | from datetime import date, datetime
from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
from . import db
class YearlyReadingGoals:
TABLENAME = 'yearly_reading_goals'
@classmethod
def summary(cls):
return {
'total_yearly_reading_goals': {
'total': YearlyReadingGoals.total_yearly_reading_goals(),
'month': YearlyReadingGoals.total_yearly_reading_goals(
since=DATE_ONE_MONTH_AGO
),
'week': YearlyReadingGoals.total_yearly_reading_goals(
since=DATE_ONE_WEEK_AGO
),
},
}
# Create methods:
@classmethod
def create(cls, username: str, year: int, target: int):
oldb = db.get_db()
return oldb.insert(cls.TABLENAME, username=username, year=year, target=target)
# Read methods:
@classmethod
def select_by_username(cls, username: str, order='year ASC'):
oldb = db.get_db()
where = 'username=$username'
data = {
'username': username,
}
return list(oldb.select(cls.TABLENAME, where=where, order=order, vars=data))
@classmethod
def select_by_username_and_year(cls, username: str, year: int):
oldb = db.get_db()
where = 'username=$username AND year=$year'
data = {
'username': username,
'year': year,
}
return list(oldb.select(cls.TABLENAME, where=where, vars=data))
@classmethod
def has_reached_goal(cls, username: str, year: int) -> bool:
oldb = db.get_db()
where = 'username=$username AND year=$year'
data = {
'username': username,
'year': year,
}
results = list(oldb.select(cls.TABLENAME, where=where, vars=data))
if not results:
return False
else:
return results[0]['current'] >= results[0]['target']
@classmethod
def total_yearly_reading_goals(cls, since: date | None = None) -> int:
"""Returns the number reading goals that were set. `since` may be used
number reading goals updated. `since` may be used
to limit the result to those reading goals updated since a specific
date. Any python datetime.date type should work.
:param since: returns all reading goals after date
"""
oldb = db.get_db()
query = f"SELECT count(*) from {cls.TABLENAME}"
if since:
query += " WHERE updated >= $since"
results = oldb.query(query, vars={'since': since})
return results[0]['count'] if results else 0
# Update methods:
@classmethod
def update_current_count(cls, username: str, year: int, current_count: int):
oldb = db.get_db()
where = 'username=$username AND year=$year'
data = {
'username': username,
'year': year,
}
return oldb.update(
cls.TABLENAME,
where=where,
vars=data,
current=current_count,
updated=datetime.now(),
)
@classmethod
def update_target(cls, username: str, year: int, new_target: int):
oldb = db.get_db()
where = 'username=$username AND year=$year'
data = {
'username': username,
'year': year,
}
return oldb.update(
cls.TABLENAME,
where=where,
vars=data,
target=new_target,
updated=datetime.now(),
)
# Delete methods:
@classmethod
def delete_by_username(cls, username):
oldb = db.get_db()
where = 'username=$username'
data = {'username': username}
return oldb.delete(cls.TABLENAME, where=where, vars=data)
@classmethod
def delete_by_username_and_year(cls, username, year):
oldb = db.get_db()
data = {
'username': username,
'year': year,
}
where = 'username=$username AND year=$year'
return oldb.delete(cls.TABLENAME, where=where, vars=data)
| 4,148 | Python | .py | 115 | 26.504348 | 86 | 0.577173 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
317 | batch_imports.py | internetarchive_openlibrary/openlibrary/core/batch_imports.py | from dataclasses import dataclass
import json
import hashlib
from json.decoder import JSONDecodeError
from pydantic import ValidationError
from pydantic_core import ErrorDetails
from openlibrary import accounts
from openlibrary.core.imports import Batch
from openlibrary.plugins.importapi.import_edition_builder import import_edition_builder
from openlibrary.catalog.add_book import (
IndependentlyPublished,
PublicationYearTooOld,
PublishedInFutureYear,
SourceNeedsISBN,
validate_record,
)
def generate_hash(data: bytes, length: int = 20):
"""
Generate a SHA256 hash of data and truncate it to length.
This is used to help uniquely identify a batch_import. Truncating
to 20 characters gives about a 50% chance of collision after 1 billion
hash imports. According to ChatGPT, anyway. :)
"""
hash = hashlib.sha256()
hash.update(data)
return hash.hexdigest()[:length]
@dataclass
class BatchImportError:
"""
Represents a Batch Import error item, containing a line number and error message.
As the JSONL in a batch import file is parsed, it's validated and errors are
recorded by line number and the error thrown, and this item represents that information,
which is returned to the uploading patron.
"""
line_number: int
error_message: str
@classmethod
def from_pydantic_error(
cls, line_number: int, error: ErrorDetails
) -> "BatchImportError":
"""Create a BatchImportError object from Pydantic's ErrorDetails."""
if loc := error.get("loc"):
loc_str = ", ".join(map(str, loc))
else:
loc_str = ""
msg = error["msg"]
error_type = error["type"]
error_message = f"{loc_str} field: {msg}. (Type: {error_type})"
return BatchImportError(line_number=line_number, error_message=error_message)
@dataclass
class BatchResult:
"""
Represents the response item from batch_import().
"""
batch: Batch | None = None
errors: list[BatchImportError] | None = None
def batch_import(raw_data: bytes, import_status: str) -> BatchResult:
"""
This processes raw byte data from a JSONL POST to the batch import endpoint.
Each line in the JSONL file (i.e. import item) must pass the same validation as
required for a valid import going through load().
The return value has `batch` and `errors` attributes for the caller to use to
access the batch and any errors, respectively. See BatchResult for more.
The line numbers errors use 1-based counting because they are line numbers in a file.
import_status: "pending", "needs_review", etc.
"""
user = accounts.get_current_user()
username = user.get_username() if user else None
batch_name = f"batch-{generate_hash(raw_data)}"
errors: list[BatchImportError] = []
raw_import_records: list[dict] = []
for index, line in enumerate(raw_data.decode("utf-8").splitlines()):
# Allow comments with `#` in these import records, even though they're JSONL.
if line.startswith("#"):
continue
try:
raw_record = json.loads(line)
# Validate the JSON + convert to import rec format and use validation from load().
edition_builder = import_edition_builder(init_dict=raw_record)
validate_record(edition_builder.get_dict())
# Add the raw_record for later processing; it still must go througd load() independently.
raw_import_records.append(raw_record)
except (
JSONDecodeError,
PublicationYearTooOld,
PublishedInFutureYear,
IndependentlyPublished,
SourceNeedsISBN,
) as e:
errors.append(BatchImportError(line_number=index + 1, error_message=str(e)))
except ValidationError as e:
errors.extend(
[
BatchImportError.from_pydantic_error(
line_number=index + 1, error=error
)
for error in e.errors()
]
)
if errors:
return BatchResult(errors=errors)
# Format data for queueing via batch import.
batch_data = [
{
'ia_id': item['source_records'][0],
'data': item,
'submitter': username,
'status': import_status,
}
for item in raw_import_records
]
# Create the batch
batch = Batch.find(batch_name) or Batch.new(name=batch_name, submitter=username)
batch.add_items(batch_data)
return BatchResult(batch=batch)
| 4,647 | Python | .py | 115 | 32.869565 | 101 | 0.664964 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
318 | observations.py | internetarchive_openlibrary/openlibrary/core/observations.py | """Module for handling patron observation functionality"""
from collections import defaultdict, namedtuple
from infogami import config
from infogami.utils.view import public
from openlibrary.utils import extract_numeric_id_from_olid
from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
from . import cache
from . import db
ObservationIds = namedtuple('ObservationIds', ['type_id', 'value_id'])
ObservationKeyValue = namedtuple('ObservationKeyValue', ['key', 'value'])
OBSERVATIONS = {
'observations': [
{
'id': 1,
'label': 'Pace',
'description': 'How would you rate the pacing of this book?',
'multi_choice': True,
'order': [4, 5, 6, 7],
'values': [
{'id': 1, 'name': 'Slow', 'deleted': True},
{'id': 2, 'name': 'Medium', 'deleted': True},
{'id': 3, 'name': 'Fast', 'deleted': True},
{'id': 4, 'name': 'Slow paced'},
{'id': 5, 'name': 'Medium paced'},
{'id': 6, 'name': 'Fast paced'},
{'id': 7, 'name': 'Meandering'},
],
},
{
'id': 2,
'label': 'Enjoyability',
'description': 'How much did you enjoy reading this book?',
'multi_choice': True,
'order': [3, 7, 8, 9],
'values': [
{'id': 1, 'name': 'Not applicable', 'deleted': True},
{'id': 2, 'name': 'Very boring', 'deleted': True},
{'id': 4, 'name': 'Neither entertaining nor boring', 'deleted': True},
{'id': 5, 'name': 'Entertaining', 'deleted': True},
{'id': 6, 'name': 'Very entertaining', 'deleted': True},
{'id': 3, 'name': 'Boring'},
{'id': 7, 'name': 'Engaging'},
{'id': 8, 'name': 'Exciting'},
{'id': 9, 'name': 'Neutral'},
],
},
{
'id': 3,
'label': 'Clarity',
'description': 'How clearly was this book written and presented?',
'multi_choice': True,
'order': [6, 7, 8, 9, 10, 11, 12, 13],
'values': [
{'id': 1, 'name': 'Not applicable', 'deleted': True},
{'id': 2, 'name': 'Very unclearly', 'deleted': True},
{'id': 3, 'name': 'Unclearly', 'deleted': True},
{'id': 4, 'name': 'Clearly', 'deleted': True},
{'id': 5, 'name': 'Very clearly', 'deleted': True},
{'id': 6, 'name': 'Succinct'},
{'id': 7, 'name': 'Dense'},
{'id': 8, 'name': 'Incomprehensible'},
{'id': 9, 'name': 'Confusing'},
{'id': 10, 'name': 'Clearly written'},
{'id': 11, 'name': 'Effective explanations'},
{'id': 12, 'name': 'Well organized'},
{'id': 13, 'name': 'Disorganized'},
],
},
{
'id': 4,
'label': 'Jargon',
'description': 'How technical is the content?',
'multi_choice': False,
'order': [1, 2, 3, 4, 5],
'values': [
{'id': 1, 'name': 'Not applicable'},
{'id': 2, 'name': 'Not technical'},
{'id': 3, 'name': 'Somewhat technical'},
{'id': 4, 'name': 'Technical'},
{'id': 5, 'name': 'Very technical'},
],
'deleted': True,
},
{
'id': 5,
'label': 'Originality',
'description': 'How original is this book?',
'multi_choice': False,
'order': [1, 2, 3, 4, 5],
'values': [
{'id': 1, 'name': 'Not applicable'},
{'id': 2, 'name': 'Very unoriginal'},
{'id': 3, 'name': 'Somewhat unoriginal'},
{'id': 4, 'name': 'Somewhat original'},
{'id': 5, 'name': 'Very original'},
],
'deleted': True,
},
{
'id': 6,
'label': 'Difficulty',
'description': 'How would you rate the difficulty of '
'this book for a general audience?',
'multi_choice': True,
'order': [6, 7, 8, 9, 10, 11, 12],
'values': [
{'id': 1, 'name': 'Not applicable', 'deleted': True},
{'id': 2, 'name': 'Requires domain expertise', 'deleted': True},
{'id': 3, 'name': 'A lot of prior knowledge needed', 'deleted': True},
{'id': 4, 'name': 'Some prior knowledge needed', 'deleted': True},
{'id': 5, 'name': 'No prior knowledge needed', 'deleted': True},
{'id': 6, 'name': 'Beginner'},
{'id': 7, 'name': 'Intermediate'},
{'id': 8, 'name': 'Advanced'},
{'id': 9, 'name': 'Expert'},
{'id': 10, 'name': 'University'},
{'id': 11, 'name': 'Layman'},
{'id': 12, 'name': 'Juvenile'},
],
},
{
'id': 7,
'label': 'Usefulness',
'description': 'How useful is the content of this book?',
'multi_choice': False,
'order': [1, 2, 3, 4, 5],
'values': [
{'id': 1, 'name': 'Not applicable'},
{'id': 2, 'name': 'Not useful'},
{'id': 3, 'name': 'Somewhat useful'},
{'id': 4, 'name': 'Useful'},
{'id': 5, 'name': 'Very useful'},
],
'deleted': True,
},
{
'id': 8,
'label': 'Breadth',
'description': "How would you describe the breadth and depth of this book?",
'multi_choice': True,
'order': [7, 8, 9, 10, 11, 12, 13],
'values': [
{'id': 1, 'name': 'Not applicable', 'deleted': True},
{'id': 2, 'name': 'Much more deep', 'deleted': True},
{'id': 3, 'name': 'Somewhat more deep', 'deleted': True},
{'id': 4, 'name': 'Equally broad and deep', 'deleted': True},
{'id': 5, 'name': 'Somewhat more broad', 'deleted': True},
{'id': 6, 'name': 'Much more broad', 'deleted': True},
{'id': 7, 'name': 'Comprehensive'},
{'id': 8, 'name': 'Not comprehensive'},
{'id': 9, 'name': 'Focused'},
{'id': 10, 'name': 'Interdisciplinary'},
{'id': 11, 'name': 'Extraneous'},
{'id': 12, 'name': 'Shallow'},
{'id': 13, 'name': 'Introductory'},
],
},
{
'id': 9,
'label': 'Objectivity',
'description': 'Are there causes to question the accuracy of this book?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6, 7, 8],
'values': [
{'id': 1, 'name': 'Not applicable'},
{'id': 2, 'name': 'No, it seems accurate'},
{'id': 3, 'name': 'Yes, it needs citations'},
{'id': 4, 'name': 'Yes, it is inflammatory'},
{'id': 5, 'name': 'Yes, it has typos'},
{'id': 6, 'name': 'Yes, it is inaccurate'},
{'id': 7, 'name': 'Yes, it is misleading'},
{'id': 8, 'name': 'Yes, it is biased'},
],
'deleted': True,
},
{
'id': 10,
'label': 'Genres',
'description': 'What are the genres of this book?',
'multi_choice': True,
'order': [
1,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
],
'values': [
{'id': 1, 'name': 'Sci-fi'},
{'id': 2, 'name': 'Philosophy', 'deleted': True},
{'id': 3, 'name': 'Satire'},
{'id': 4, 'name': 'Poetry'},
{'id': 5, 'name': 'Memoir'},
{'id': 6, 'name': 'Paranormal'},
{'id': 7, 'name': 'Mystery'},
{'id': 8, 'name': 'Humor'},
{'id': 9, 'name': 'Horror'},
{'id': 10, 'name': 'Fantasy'},
{'id': 11, 'name': 'Drama'},
{'id': 12, 'name': 'Crime'},
{'id': 13, 'name': 'Graphical'},
{'id': 14, 'name': 'Classic'},
{'id': 15, 'name': 'Anthology'},
{'id': 16, 'name': 'Action'},
{'id': 17, 'name': 'Romance'},
{'id': 18, 'name': 'How-to'},
{'id': 19, 'name': 'Encyclopedia'},
{'id': 20, 'name': 'Dictionary'},
{'id': 21, 'name': 'Technical'},
{'id': 22, 'name': 'Reference'},
{'id': 23, 'name': 'Textbook'},
{'id': 24, 'name': 'Biographical'},
{'id': 25, 'name': 'Fiction'},
{'id': 26, 'name': 'Nonfiction'},
{'id': 27, 'name': 'Biography'},
{'id': 28, 'name': 'Based on a true story'},
{'id': 29, 'name': 'Exploratory'},
{'id': 30, 'name': 'Research'},
{'id': 31, 'name': 'Philosophical'},
{'id': 32, 'name': 'Essay'},
{'id': 33, 'name': 'Review'},
],
},
{
'id': 11,
'label': 'Fictionality',
'description': "Is this book a work of fact or fiction?",
'multi_choice': False,
'order': [1, 2, 3],
'values': [
{'id': 1, 'name': 'Nonfiction'},
{'id': 2, 'name': 'Fiction'},
{'id': 3, 'name': 'Biography'},
],
'deleted': True,
},
{
'id': 12,
'label': 'Audience',
'description': "What are the intended age groups for this book?",
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6, 7],
'values': [
{'id': 1, 'name': 'Experts'},
{'id': 2, 'name': 'College'},
{'id': 3, 'name': 'High school'},
{'id': 4, 'name': 'Elementary'},
{'id': 5, 'name': 'Kindergarten'},
{'id': 6, 'name': 'Baby'},
{'id': 7, 'name': 'General audiences'},
],
'deleted': True,
},
{
'id': 13,
'label': 'Mood',
'description': 'What are the moods of this book?',
'multi_choice': True,
'order': [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
],
'values': [
{'id': 1, 'name': 'Scientific'},
{'id': 2, 'name': 'Dry'},
{'id': 3, 'name': 'Emotional'},
{'id': 4, 'name': 'Strange'},
{'id': 5, 'name': 'Suspenseful'},
{'id': 6, 'name': 'Sad'},
{'id': 7, 'name': 'Dark'},
{'id': 8, 'name': 'Lonely'},
{'id': 9, 'name': 'Tense'},
{'id': 10, 'name': 'Fearful'},
{'id': 11, 'name': 'Angry'},
{'id': 12, 'name': 'Hopeful'},
{'id': 13, 'name': 'Lighthearted'},
{'id': 14, 'name': 'Calm'},
{'id': 15, 'name': 'Informative'},
{'id': 16, 'name': 'Ominous'},
{'id': 17, 'name': 'Mysterious'},
{'id': 18, 'name': 'Romantic'},
{'id': 19, 'name': 'Whimsical'},
{'id': 20, 'name': 'Idyllic'},
{'id': 21, 'name': 'Melancholy'},
{'id': 22, 'name': 'Humorous'},
{'id': 23, 'name': 'Gloomy'},
{'id': 24, 'name': 'Reflective'},
{'id': 25, 'name': 'Inspiring'},
{'id': 26, 'name': 'Cheerful'},
],
},
{
'id': 14,
'label': 'Impressions',
'description': 'How did you feel about this book and do you recommend it?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
'values': [
{'id': 1, 'name': 'Recommend'},
{'id': 2, 'name': 'Highly recommend'},
{'id': 3, 'name': "Don't recommend"},
{'id': 4, 'name': 'Field defining'},
{'id': 5, 'name': 'Actionable'},
{'id': 6, 'name': 'Forgettable'},
{'id': 7, 'name': 'Quotable'},
{'id': 8, 'name': 'Citable'},
{'id': 9, 'name': 'Original'},
{'id': 10, 'name': 'Unremarkable'},
{'id': 11, 'name': 'Life changing'},
{'id': 12, 'name': 'Best in class'},
{'id': 13, 'name': 'Overhyped'},
{'id': 14, 'name': 'Underrated'},
],
},
{
'id': 15,
'label': 'Type',
'description': 'How would you classify this work?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
'values': [
{'id': 1, 'name': 'Fiction'},
{'id': 2, 'name': 'Nonfiction'},
{'id': 3, 'name': 'Biography'},
{'id': 4, 'name': 'Based on a true story'},
{'id': 5, 'name': 'Textbook'},
{'id': 6, 'name': 'Reference'},
{'id': 7, 'name': 'Exploratory'},
{'id': 8, 'name': 'Research'},
{'id': 9, 'name': 'Philosophical'},
{'id': 10, 'name': 'Essay'},
{'id': 11, 'name': 'Review'},
{'id': 12, 'name': 'Classic'},
],
'deleted': True,
},
{
'id': 16,
'label': 'Length',
'description': 'How would you rate or describe the length of this book?',
'multi_choice': True,
'order': [1, 2, 3],
'values': [
{'id': 1, 'name': 'Short'},
{'id': 2, 'name': 'Medium'},
{'id': 3, 'name': 'Long'},
],
},
{
'id': 17,
'label': 'Credibility',
'description': 'How factually accurate and reliable '
'is the content of this book?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
'values': [
{'id': 1, 'name': 'Accurate'},
{'id': 2, 'name': 'Inaccurate'},
{'id': 3, 'name': 'Outdated'},
{'id': 4, 'name': 'Evergreen'},
{'id': 5, 'name': 'Biased'},
{'id': 6, 'name': 'Objective'},
{'id': 7, 'name': 'Subjective'},
{'id': 8, 'name': 'Rigorous'},
{'id': 9, 'name': 'Misleading'},
{'id': 10, 'name': 'Controversial'},
{'id': 11, 'name': 'Trendy'},
],
},
{
'id': 18,
'label': 'Features',
'description': 'What text features does this book utilize?'
'does this book make use of?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
'values': [
{'id': 1, 'name': 'Tables, diagrams, and figures'},
{'id': 2, 'name': 'Problem sets'},
{'id': 3, 'name': 'Proofs'},
{'id': 4, 'name': 'Interviews'},
{'id': 5, 'name': 'Table of contents'},
{'id': 6, 'name': 'Illustrations'},
{'id': 7, 'name': 'Index'},
{'id': 8, 'name': 'Glossary'},
{'id': 9, 'name': 'Chapters'},
{'id': 10, 'name': 'Appendix'},
{'id': 11, 'name': 'Bibliography'},
],
},
{
'id': 19,
'label': 'Content Warnings',
'description': 'Does this book contain objectionable content?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6],
'values': [
{'id': 1, 'name': 'Adult themes'},
{'id': 2, 'name': 'Trigger warnings'},
{'id': 3, 'name': 'Offensive language'},
{'id': 4, 'name': 'Graphic imagery'},
{'id': 5, 'name': 'Insensitivity'},
{'id': 6, 'name': 'Racism'},
{'id': 7, 'name': 'Sexual themes'},
{'id': 8, 'name': 'Drugs'},
],
},
{
'id': 20,
'label': 'Style',
'description': 'What type of verbiage, nomenclature, '
'or symbols are employed in this book?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5],
'values': [
{'id': 1, 'name': 'Technical'},
{'id': 2, 'name': 'Jargony'},
{'id': 3, 'name': 'Neologisms'},
{'id': 4, 'name': 'Slang'},
{'id': 5, 'name': 'Olde'},
],
},
{
'id': 21,
'label': 'Purpose',
'description': 'Why should someone read this book?',
'multi_choice': True,
'order': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'values': [
{'id': 1, 'name': 'Entertainment'},
{'id': 2, 'name': 'Broaden perspective'},
{'id': 3, 'name': 'How-to'},
{'id': 4, 'name': 'Learn about'},
{'id': 5, 'name': 'Self-help'},
{'id': 6, 'name': 'Hope'},
{'id': 7, 'name': 'Inspiration'},
{'id': 8, 'name': 'Fact checking'},
{'id': 9, 'name': 'Problem solving'},
],
},
]
}
cache_duration = config.get('observation_cache_duration') or 86400
@public
@cache.memoize(engine="memcache", key="observations", expires=cache_duration)
def get_observations():
"""
Returns a dictionary of observations that are used to populate forms for patron feedback about a book.
Dictionary has the following structure:
{
'observations': [
{
'id': 1,
'label': 'pace',
'description': 'What is the pace of this book?',
'multi_choice': False,
'values': [
'slow',
'medium',
'fast'
]
}
]
}
If an observation is marked as deleted, it will not be included in the dictionary.
return: Dictionary of all possible observations that can be made about a book.
"""
observations_list = []
for o in OBSERVATIONS['observations']:
if 'deleted' not in o:
list_item = {
'id': o['id'],
'label': o['label'],
'description': o['description'],
'multi_choice': o['multi_choice'],
'values': _sort_values(o['order'], o['values']),
}
observations_list.append(list_item)
return {'observations': observations_list}
def _sort_values(order_list, values_list):
"""
Given a list of ordered value IDs and a list of value dictionaries, returns an ordered list of
values.
return: An ordered list of values.
"""
ordered_values = []
for id in order_list:
value = next(
(
v['name']
for v in values_list
if v['id'] == id and not v.get('deleted', False)
),
None,
)
if value:
ordered_values.append(value)
return ordered_values
def _get_deleted_types_and_values():
"""
Returns a dictionary containing all deleted observation types and values.
return: Deleted types and values dictionary.
"""
results = {'types': [], 'values': defaultdict(list)}
for o in OBSERVATIONS['observations']:
if o.get('deleted'):
results['types'].append(o['id'])
else:
for v in o['values']:
if v.get('deleted'):
results['values'][o['id']].append(v['id'])
return results
def convert_observation_ids(id_dict):
"""
Given a dictionary of type and value IDs, returns a dictionary of equivalent
type and value strings.
return: Dictionary of type and value strings
"""
types_and_values = _get_all_types_and_values()
conversion_results = {}
for k in id_dict:
if not types_and_values[str(k)].get('deleted', False):
conversion_results[types_and_values[str(k)]['type']] = [
types_and_values[str(k)]['values'][str(i)]['name']
for i in id_dict[k]
if not types_and_values[str(k)]['values'][str(i)].get('deleted', False)
]
# Remove types with no values (all values of type were marked 'deleted'):
return {k: v for (k, v) in conversion_results.items() if len(v)}
@cache.memoize(
engine="memcache", key="all_observation_types_and_values", expires=cache_duration
)
def _get_all_types_and_values():
"""
Returns a dictionary of observation types and values mappings. The keys for the
dictionary are the id numbers.
return: A dictionary of observation id to string value mappings.
"""
types_and_values = {}
for o in OBSERVATIONS['observations']:
types_and_values[str(o['id'])] = {
'type': o['label'],
'deleted': o.get('deleted', False),
'values': {
str(v['id']): {'name': v['name'], 'deleted': v.get('deleted', False)}
for v in o['values']
},
}
return types_and_values
@public
def get_observation_metrics(work_olid):
"""
Returns a dictionary of observation statistics for the given work. Statistics
will be used to populate a book's "Reader Observations" component.
Dictionary will have the following structure:
{
'work_id': 12345,
'total_respondents': 100,
'observations': [
{
'label': 'pace',
'description': 'What is the pace of this book?',
'multi_choice': False,
'total_respondents_for_type': 10,
'total_responses': 10,
'values': [
{
'value': 'fast',
'count': 6
},
{
'value': 'medium',
'count': 4
}
]
}
... Other observations omitted for brevity ...
]
}
If no observations were made for a specific type, that type will be excluded from
the 'observations' list. Items in the 'observations.values' list will be
ordered from greatest count to least.
return: A dictionary of observation statistics for a work.
"""
work_id = extract_numeric_id_from_olid(work_olid)
total_respondents = Observations.total_unique_respondents(work_id)
metrics = {}
metrics['work_id'] = work_id
metrics['total_respondents'] = total_respondents
metrics['observations'] = []
if total_respondents > 0:
respondents_per_type_dict = Observations.count_unique_respondents_by_type(
work_id
)
observation_totals = Observations.count_observations(work_id)
if not observation_totals:
# It is possible to have a non-zero number of respondents and no
# observation totals if deleted book tags are present in the
# observations table.
return metrics
current_type_id = observation_totals[0]['type_id']
observation_item = next(
o for o in OBSERVATIONS['observations'] if current_type_id == o['id']
)
current_observation = {
'label': observation_item['label'],
'description': observation_item['description'],
'multi_choice': observation_item['multi_choice'],
'total_respondents_for_type': respondents_per_type_dict[current_type_id],
'values': [],
}
total_responses = 0
for i in observation_totals:
if i['type_id'] != current_type_id:
current_observation['total_responses'] = total_responses
total_responses = 0
metrics['observations'].append(current_observation)
current_type_id = i['type_id']
observation_item = next(
o
for o in OBSERVATIONS['observations']
if current_type_id == o['id']
)
current_observation = {
'label': observation_item['label'],
'description': observation_item['description'],
'multi_choice': observation_item['multi_choice'],
'total_respondents_for_type': respondents_per_type_dict[
current_type_id
],
'values': [],
}
current_observation['values'].append(
{
'value': next(
v['name']
for v in observation_item['values']
if v['id'] == i['value_id']
),
'count': i['total'],
}
)
total_responses += i['total']
current_observation['total_responses'] = total_responses
metrics['observations'].append(current_observation)
return metrics
class Observations(db.CommonExtras):
TABLENAME = "observations"
NULL_EDITION_VALUE = -1
PRIMARY_KEY = [
"work_id",
"edition_id",
"username",
"observation_value",
"observation_type",
]
ALLOW_DELETE_ON_CONFLICT = True
@classmethod
def summary(cls):
return {
'total_reviews': {
'total': Observations.total_reviews(),
'month': Observations.total_reviews(since=DATE_ONE_MONTH_AGO),
'week': Observations.total_reviews(since=DATE_ONE_WEEK_AGO),
},
'total_books_reviewed': {
'total': Observations.total_books_reviewed(),
'month': Observations.total_books_reviewed(since=DATE_ONE_MONTH_AGO),
'week': Observations.total_books_reviewed(since=DATE_ONE_WEEK_AGO),
},
'total_reviewers': {
'total': Observations.total_unique_respondents(),
'month': Observations.total_unique_respondents(
since=DATE_ONE_MONTH_AGO
),
'week': Observations.total_unique_respondents(since=DATE_ONE_WEEK_AGO),
},
}
@classmethod
def total_reviews(cls, since=None):
oldb = db.get_db()
query = "SELECT COUNT(*) from observations"
if since:
query += " WHERE created >= $since"
return oldb.query(query, vars={'since': since})[0]['count']
@classmethod
def total_books_reviewed(cls, since=None):
oldb = db.get_db()
query = "SELECT COUNT(DISTINCT(work_id)) from observations"
if since:
query += " WHERE created >= $since"
return oldb.query(query, vars={'since': since})[0]['count']
@classmethod
def total_unique_respondents(cls, work_id=None, since=None):
"""
Returns total number of patrons who have submitted observations for the given work ID.
If no work ID is passed, returns total number of patrons who have submitted observations
for any work.
return: Total number of patrons who have made an observation.
"""
oldb = db.get_db()
data = {
'work_id': work_id,
'since': since,
}
query = "SELECT COUNT(DISTINCT(username)) FROM observations"
if work_id:
query += " WHERE work_id = $work_id"
if since:
query += " AND created >= $since"
elif since:
query += " WHERE created >= $since"
return oldb.query(query, vars=data)[0]['count']
@classmethod
def count_unique_respondents_by_type(cls, work_id):
"""
Returns the total number of respondents who have made an observation per each type of a work
ID.
return: Dictionary of total patron respondents per type id for the given work ID.
"""
oldb = db.get_db()
data = {
'work_id': work_id,
}
query = """
SELECT
observation_type AS type,
count(distinct(username)) AS total_respondents
FROM observations
WHERE work_id = $work_id """
deleted_observations = _get_deleted_types_and_values()
if len(deleted_observations['types']):
deleted_type_ids = ', '.join(str(i) for i in deleted_observations['types'])
query += f'AND observation_type not in ({deleted_type_ids}) '
if len(deleted_observations['values']):
for key in deleted_observations['values']:
deleted_value_ids = ', '.join(
str(i) for i in deleted_observations['values'][key]
)
query += f'AND NOT (observation_type = {key} AND observation_value IN ({deleted_value_ids})) '
query += 'GROUP BY type'
return {
i['type']: i['total_respondents']
for i in list(oldb.query(query, vars=data))
}
@classmethod
def count_observations(cls, work_id):
"""
For a given work, fetches the count of each observation made for the work. Counts are returned in
a list, grouped by observation type and ordered from highest count to lowest.
return: A list of value counts for the given work.
"""
oldb = db.get_db()
data = {'work_id': work_id}
query = """
SELECT
observation_type as type_id,
observation_value as value_id,
COUNT(observation_value) AS total
FROM observations
WHERE observations.work_id = $work_id """
deleted_observations = _get_deleted_types_and_values()
if len(deleted_observations['types']):
deleted_type_ids = ', '.join(str(i) for i in deleted_observations['types'])
query += f'AND observation_type not in ({deleted_type_ids}) '
if len(deleted_observations['values']):
for key in deleted_observations['values']:
deleted_value_ids = ', '.join(
str(i) for i in deleted_observations['values'][key]
)
query += f'AND NOT (observation_type = {key} AND observation_value IN ({deleted_value_ids})) '
query += """
GROUP BY type_id, value_id
ORDER BY type_id, total DESC"""
return list(oldb.query(query, vars=data))
@classmethod
def count_distinct_observations(cls, username):
"""
Fetches the number of works in which the given user has made at least
one observation.
return: The number of works for which the given user has made at least
one observation
"""
oldb = db.get_db()
data = {'username': username}
query = """
SELECT
COUNT(DISTINCT(work_id))
FROM observations
WHERE observations.username = $username
"""
return oldb.query(query, vars=data)[0]['count']
@classmethod
def get_key_value_pair(cls, type_id, value_id):
"""
Given a type ID and value ID, returns a key-value pair of the observation's type and value.
return: Type and value key-value pair
"""
observation = next(
o for o in OBSERVATIONS['observations'] if o['id'] == type_id
)
key = observation['label']
value = next(v['name'] for v in observation['values'] if v['id'] == value_id)
return ObservationKeyValue(key, value)
@classmethod
def get_patron_observations(cls, username, work_id=None):
"""
Returns a list of observation records containing only type and value IDs.
Gets all of a patron's observation records by default. Returns only the observations for
the given work if work_id is passed.
return: A list of a patron's observations
"""
oldb = db.get_db()
data = {'username': username, 'work_id': work_id}
query = """
SELECT
observations.observation_type AS type,
observations.observation_value AS value
FROM observations
WHERE observations.username=$username"""
if work_id:
query += " AND work_id=$work_id"
return list(oldb.query(query, vars=data))
@classmethod
def get_observations_for_work(cls, work_id):
oldb = db.get_db()
query = "SELECT * from observations where work_id=$work_id"
return list(oldb.query(query, vars={"work_id": work_id}))
@classmethod
def get_observations_grouped_by_work(cls, username, limit=25, page=1):
"""
Returns a list of records which contain a work id and a JSON string
containing all of the observations for that work_id.
"""
oldb = db.get_db()
data = {'username': username, 'limit': limit, 'offset': limit * (page - 1)}
query = """
SELECT
work_id,
JSON_AGG(ROW_TO_JSON(
(SELECT r FROM
(SELECT observation_type, observation_values) r)
)
) AS observations
FROM (
SELECT
work_id,
observation_type,
JSON_AGG(observation_value) AS observation_values
FROM observations
WHERE username=$username
GROUP BY work_id, observation_type) s
GROUP BY work_id
LIMIT $limit OFFSET $offset
"""
return list(oldb.query(query, vars=data))
def get_multi_choice(type):
"""Searches for the given type in the observations object, and
returns the type's 'multi_choice' value.
return: The multi_choice value for the given type
"""
for o in OBSERVATIONS['observations']:
if o['label'] == type:
return o['multi_choice']
@classmethod
def persist_observation(
cls, username, work_id, observation, action, edition_id=NULL_EDITION_VALUE
):
"""Inserts or deletes a single observation, depending on the given action.
If the action is 'delete', the observation will be deleted
from the observations table.
If the action is 'add', and the observation type only allows a
single value (multi_choice == True), an attempt is made to
delete previous observations of the same type before the new
observation is persisted.
Otherwise, the new observation is stored in the DB.
"""
def get_observation_ids(observation):
"""
Given an observation key-value pair, returns an ObservationIds tuple.
return: An ObservationsIds tuple
"""
key = next(iter(observation))
item = next(o for o in OBSERVATIONS['observations'] if o['label'] == key)
return ObservationIds(
item['id'],
next(v['id'] for v in item['values'] if v['name'] == observation[key]),
)
oldb = db.get_db()
observation_ids = get_observation_ids(observation)
data = {
'username': username,
'work_id': work_id,
'edition_id': edition_id,
'observation_type': observation_ids.type_id,
'observation_value': observation_ids.value_id,
}
where_clause = 'username=$username AND work_id=$work_id AND observation_type=$observation_type '
if action == 'delete':
# Delete observation and return:
where_clause += 'AND observation_value=$observation_value'
return oldb.delete('observations', vars=data, where=where_clause)
elif not cls.get_multi_choice(next(iter(observation))):
# A radio button value has changed. Delete old value, if one exists:
oldb.delete('observations', vars=data, where=where_clause)
# Insert new value and return:
return oldb.insert(
'observations',
username=username,
work_id=work_id,
edition_id=edition_id,
observation_type=observation_ids.type_id,
observation_value=observation_ids.value_id,
)
@classmethod
def remove_observations(
cls,
username,
work_id,
edition_id=NULL_EDITION_VALUE,
observation_type=None,
observation_value=None,
):
"""Deletes observations from the observations table. If both
observation_type and observation_value are passed, only one
row will be deleted from the table. Otherwise, all of a
patron's observations for an edition are deleted.
return: A list of deleted rows.
"""
oldb = db.get_db()
data = {
'username': username,
'work_id': work_id,
'edition_id': edition_id,
'observation_type': observation_type,
'observation_value': observation_value,
}
where_clause = (
'username=$username AND work_id=$work_id AND edition_id=$edition_id'
)
if observation_type and observation_value:
where_clause += ' AND observation_type=$observation_type AND observation_value=$observation_value'
return oldb.delete('observations', where=(where_clause), vars=data)
@classmethod
def select_all_by_username(cls, username, _test=False):
rows = super().select_all_by_username(username, _test=_test)
types_and_values = _get_all_types_and_values()
for row in rows:
type_id = f"{row['observation_type']}"
value_id = f"{row['observation_value']}"
row['observation_type'] = types_and_values[type_id]['type']
row['observation_value'] = types_and_values[type_id]['values'][value_id][
'name'
]
return rows
| 40,055 | Python | .py | 1,006 | 26.978131 | 110 | 0.470984 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
319 | vendors.py | internetarchive_openlibrary/openlibrary/core/vendors.py | from __future__ import annotations
import logging
import re
import time
from datetime import date
from typing import Any, Literal
import requests
from dateutil import parser as isoparser
from infogami.utils.view import public
from paapi5_python_sdk.api.default_api import DefaultApi
from paapi5_python_sdk.get_items_request import GetItemsRequest
from paapi5_python_sdk.get_items_resource import GetItemsResource
from paapi5_python_sdk.partner_type import PartnerType
from paapi5_python_sdk.rest import ApiException
from paapi5_python_sdk.search_items_request import SearchItemsRequest
from openlibrary import accounts
from openlibrary.catalog.add_book import load
from openlibrary.core import cache
from openlibrary.core import helpers as h
from openlibrary.utils import dateutil
from openlibrary.utils.isbn import (
isbn_10_to_isbn_13,
isbn_13_to_isbn_10,
normalize_isbn,
)
logger = logging.getLogger("openlibrary.vendors")
BETTERWORLDBOOKS_BASE_URL = 'https://betterworldbooks.com'
BETTERWORLDBOOKS_API_URL = (
'https://products.betterworldbooks.com/service.aspx?IncludeAmazon=True&ItemId='
)
affiliate_server_url = None
BWB_AFFILIATE_LINK = 'http://www.anrdoezrs.net/links/{}/type/dlg/http://www.betterworldbooks.com/-id-%s'.format(
h.affiliate_id('betterworldbooks')
)
AMAZON_FULL_DATE_RE = re.compile(r'\d{4}-\d\d-\d\d')
ISBD_UNIT_PUNCT = ' : ' # ISBD cataloging title-unit separator punctuation
def setup(config):
global affiliate_server_url
affiliate_server_url = config.get('affiliate_server')
def get_lexile(isbn):
try:
url = 'https://atlas-fab.lexile.com/free/books/' + str(isbn)
headers = {'accept': 'application/json; version=1.0'}
lexile = requests.get(url, headers=headers)
lexile.raise_for_status() # this will raise an error for us if the http status returned is not 200 OK
data = lexile.json()
return data, data.get("error_msg")
except Exception as e: # noqa: BLE001
if e.response.status_code not in [200, 404]:
raise Exception(f"Got bad response back from server: {e}")
return {}, e
class AmazonAPI:
"""
Amazon Product Advertising API 5.0 wrapper for Python
See https://webservices.amazon.com/paapi5/documentation/
"""
RESOURCES = {
'all': [ # Hack: pulls all resource consts from GetItemsResource
getattr(GetItemsResource, v) for v in vars(GetItemsResource) if v.isupper()
],
'import': [
GetItemsResource.IMAGES_PRIMARY_LARGE,
GetItemsResource.ITEMINFO_BYLINEINFO,
GetItemsResource.ITEMINFO_CONTENTINFO,
GetItemsResource.ITEMINFO_MANUFACTUREINFO,
GetItemsResource.ITEMINFO_PRODUCTINFO,
GetItemsResource.ITEMINFO_TITLE,
GetItemsResource.ITEMINFO_CLASSIFICATIONS,
GetItemsResource.OFFERS_LISTINGS_PRICE,
],
'prices': [GetItemsResource.OFFERS_LISTINGS_PRICE],
}
def __init__(
self,
key: str,
secret: str,
tag: str,
host: str = 'webservices.amazon.com',
region: str = 'us-east-1',
throttling: float = 0.9,
) -> None:
"""
Creates an instance containing your API credentials.
:param str key: affiliate key
:param str secret: affiliate secret
:param str tag: affiliate string
:param str host: which server to query
:param str region: which regional host to query
:param float throttling: Reduce this value to wait longer between API calls.
"""
self.tag = tag
self.throttling = throttling
self.last_query_time = time.time()
self.api = DefaultApi(
access_key=key, secret_key=secret, host=host, region=region
)
def search(self, keywords):
"""Adding method to test amz searches from the CLI, unused otherwise"""
return self.api.search_items(
SearchItemsRequest(
partner_tag=self.tag,
partner_type=PartnerType.ASSOCIATES,
keywords=keywords,
)
)
def get_product(self, asin: str, serialize: bool = False, **kwargs):
if products := self.get_products([asin], **kwargs):
return next(self.serialize(p) if serialize else p for p in products)
def get_products(
self,
asins: list | str,
serialize: bool = False,
marketplace: str = 'www.amazon.com',
resources: Any | None = None,
**kwargs,
) -> list | None:
"""
:param str asins: One or more ItemIds like ASIN that uniquely identify an item
or product URL. (Max 10) Separated by comma or as a list.
"""
# Wait before doing the request
wait_time = 1 / self.throttling - (time.time() - self.last_query_time)
if wait_time > 0:
time.sleep(wait_time)
self.last_query_time = time.time()
item_ids = asins if isinstance(asins, list) else [asins]
_resources = self.RESOURCES[resources or 'import']
try:
request = GetItemsRequest(
partner_tag=self.tag,
partner_type=PartnerType.ASSOCIATES,
marketplace=marketplace,
item_ids=item_ids,
resources=_resources,
**kwargs,
)
except ApiException:
logger.error(
f"Amazon fetch failed for: {', '.join(item_ids)}", exc_info=True
)
return None
response = self.api.get_items(request)
products = (
[p for p in response.items_result.items if p]
if response.items_result
else []
)
return products if not serialize else [self.serialize(p) for p in products]
@staticmethod
def serialize(product: Any) -> dict:
"""Takes a full Amazon product Advertising API returned AmazonProduct
with multiple ResponseGroups, and extracts the data we are
interested in.
:param AmazonAPI product:
:return: Amazon metadata for one product
{
'price': '$54.06',
'price_amt': 5406,
'physical_format': 'hardcover',
'authors': [{'name': 'Guterson, David'}],
'publish_date': 'Jan 21, 2020',
#'dimensions': {
# 'width': [1.7, 'Inches'],
# 'length': [8.5, 'Inches'],
# 'weight': [5.4, 'Pounds'],
# 'height': [10.875, 'Inches']
# },
'publishers': ['Victory Belt Publishing'],
'source_records': ['amazon:1628603976'],
'title': 'Boundless: Upgrade Your Brain, Optimize Your Body & Defy Aging',
'url': 'https://www.amazon.com/dp/1628603976/?tag=internetarchi-20',
'number_of_pages': 640,
'cover': 'https://m.media-amazon.com/images/I/51IT9MV3KqL._AC_.jpg',
'languages': ['English']
'edition_num': '1'
}
"""
if not product:
return {} # no match?
item_info = getattr(product, 'item_info')
images = getattr(product, 'images')
edition_info = item_info and getattr(item_info, 'content_info')
attribution = item_info and getattr(item_info, 'by_line_info')
price = (
getattr(product, 'offers')
and product.offers.listings
and product.offers.listings[0].price
)
brand = (
attribution
and getattr(attribution, 'brand')
and getattr(attribution.brand, 'display_value')
)
manufacturer = (
item_info
and getattr(item_info, 'by_line_info')
and getattr(item_info.by_line_info, 'manufacturer')
and item_info.by_line_info.manufacturer.display_value
)
product_group = (
item_info
and getattr(
item_info,
'classifications',
)
and getattr(item_info.classifications, 'product_group')
and item_info.classifications.product_group.display_value
)
try:
publish_date = (
edition_info
and edition_info.publication_date
and isoparser.parse(
edition_info.publication_date.display_value
).strftime('%b %d, %Y')
)
except Exception:
logger.exception(f"serialize({product})")
publish_date = None
asin_is_isbn10 = not product.asin.startswith("B")
isbn_13 = isbn_10_to_isbn_13(product.asin) if asin_is_isbn10 else None
book = {
'url': "https://www.amazon.com/dp/{}/?tag={}".format(
product.asin, h.affiliate_id('amazon')
),
'source_records': ['amazon:%s' % product.asin],
'isbn_10': [product.asin] if asin_is_isbn10 else [],
'isbn_13': [isbn_13] if isbn_13 else [],
'price': price and price.display_amount,
'price_amt': price and price.amount and int(100 * price.amount),
'title': (
item_info
and item_info.title
and getattr(item_info.title, 'display_value')
),
'cover': (
images.primary.large.url
if images
and images.primary
and images.primary.large
and images.primary.large.url
and '/01RmK+J4pJL.' not in images.primary.large.url
else None
),
'authors': attribution
and [{'name': contrib.name} for contrib in attribution.contributors or []],
'publishers': list({p for p in (brand, manufacturer) if p}),
'number_of_pages': (
edition_info
and edition_info.pages_count
and edition_info.pages_count.display_value
),
'edition_num': (
edition_info
and edition_info.edition
and edition_info.edition.display_value
),
'publish_date': publish_date,
'product_group': product_group,
'physical_format': (
item_info
and item_info.classifications
and getattr(
item_info.classifications.binding, 'display_value', ''
).lower()
),
}
if is_dvd(book):
return {}
return book
def is_dvd(book) -> bool:
"""
If product_group or physical_format is a dvd, it will return True.
"""
product_group = book['product_group']
physical_format = book['physical_format']
try:
product_group = product_group.lower()
except AttributeError:
product_group = None
try:
physical_format = physical_format.lower()
except AttributeError:
physical_format = None
return 'dvd' in [product_group, physical_format]
@public
def get_amazon_metadata(
id_: str,
id_type: Literal['asin', 'isbn'] = 'isbn',
resources: Any = None,
high_priority: bool = False,
stage_import: bool = True,
) -> dict | None:
"""Main interface to Amazon LookupItem API. Will cache results.
:param str id_: The item id: isbn (10/13), or Amazon ASIN.
:param str id_type: 'isbn' or 'asin'.
:param bool high_priority: Priority in the import queue. High priority
goes to the front of the queue.
param bool stage_import: stage the id_ for import if not in the cache.
:return: A single book item's metadata, or None.
"""
return cached_get_amazon_metadata(
id_,
id_type=id_type,
resources=resources,
high_priority=high_priority,
stage_import=stage_import,
)
def search_amazon(title: str = '', author: str = '') -> dict: # type: ignore[empty-body]
"""Uses the Amazon Product Advertising API ItemSearch operation to search for
books by author and/or title.
https://docs.aws.amazon.com/AWSECommerceService/latest/DG/ItemSearch.html
XXX! Broken while migrating from paapi 4.0 to 5.0
:return: dict of "results", a list of one or more found books, with metadata.
"""
pass
def _get_amazon_metadata(
id_: str,
id_type: Literal['asin', 'isbn'] = 'isbn',
resources: Any = None,
high_priority: bool = False,
stage_import: bool = True,
) -> dict | None:
"""Uses the Amazon Product Advertising API ItemLookup operation to locate a
specific book by identifier; either 'isbn' or 'asin'.
https://webservices.amazon.com/paapi5/documentation/get-items.html
:param str id_: The item id: isbn (10/13), or Amazon ASIN.
:param str id_type: 'isbn' or 'asin'.
:param Any resources: Used for AWSE Commerce Service lookup
See https://webservices.amazon.com/paapi5/documentation/get-items.html
:param bool high_priority: Priority in the import queue. High priority
goes to the front of the queue.
param bool stage_import: stage the id_ for import if not in the cache.
:return: A single book item's metadata, or None.
"""
if not affiliate_server_url:
return None
if id_type == 'isbn':
isbn = normalize_isbn(id_)
if isbn is None:
return None
id_ = isbn
if len(id_) == 13 and id_.startswith('978'):
isbn = isbn_13_to_isbn_10(id_)
if isbn is None:
return None
id_ = isbn
try:
priority = "true" if high_priority else "false"
stage = "true" if stage_import else "false"
r = requests.get(
f'http://{affiliate_server_url}/isbn/{id_}?high_priority={priority}&stage_import={stage}'
)
r.raise_for_status()
if data := r.json().get('hit'):
return data
else:
return None
except requests.exceptions.ConnectionError:
logger.exception("Affiliate Server unreachable")
except requests.exceptions.HTTPError:
logger.exception(f"Affiliate Server: id {id_} not found")
return None
def stage_bookworm_metadata(identifier: str | None) -> dict | None:
"""
`stage` metadata, if found. into `import_item` via BookWorm.
:param str identifier: ISBN 10, ISBN 13, or B*ASIN. Spaces, hyphens, etc. are fine.
"""
if not identifier:
return None
try:
r = requests.get(
f"http://{affiliate_server_url}/isbn/{identifier}?high_priority=true&stage_import=true"
)
r.raise_for_status()
if data := r.json().get('hit'):
return data
else:
return None
except requests.exceptions.ConnectionError:
logger.exception("Affiliate Server unreachable")
except requests.exceptions.HTTPError:
logger.exception(f"Affiliate Server: id {identifier} not found")
return None
def split_amazon_title(full_title: str) -> tuple[str, str | None]:
"""
Splits an Amazon title into (title, subtitle | None) and strips parenthetical
tags.
"""
# strip parenthetical blocks wherever they occur
# can handle 1 level of nesting
re_parens_strip = re.compile(r'\(([^\)\(]*|[^\(]*\([^\)]*\)[^\)]*)\)')
full_title = re.sub(re_parens_strip, '', full_title)
titles = full_title.split(':')
subtitle = titles.pop().strip() if len(titles) > 1 else None
title = ISBD_UNIT_PUNCT.join([unit.strip() for unit in titles])
return (title, subtitle)
def clean_amazon_metadata_for_load(metadata: dict) -> dict:
"""This is a bootstrapping helper method which enables us to take the
results of get_amazon_metadata() and create an OL book catalog record.
:param dict metadata: Metadata representing an Amazon product.
:return: A dict representing a book suitable for importing into OL.
"""
# TODO: convert languages into /type/language list
conforming_fields = [
'title',
'authors',
'publish_date',
'source_records',
'number_of_pages',
'publishers',
'cover',
'isbn_10',
'isbn_13',
'physical_format',
]
conforming_metadata = {}
for k in conforming_fields:
# if valid key and value not None
if metadata.get(k) is not None:
conforming_metadata[k] = metadata[k]
if source_records := metadata.get('source_records'):
asin = source_records[0].replace('amazon:', '')
if asin[0].isalpha():
# Only store asin if it provides more information than ISBN
conforming_metadata['identifiers'] = {'amazon': [asin]}
title, subtitle = split_amazon_title(metadata['title'])
conforming_metadata['title'] = title
if subtitle:
conforming_metadata['full_title'] = f'{title}{ISBD_UNIT_PUNCT}{subtitle}'
conforming_metadata['subtitle'] = subtitle
# Record original title if some content has been removed (i.e. parentheses)
if metadata['title'] != conforming_metadata.get('full_title', title):
conforming_metadata['notes'] = "Source title: %s" % metadata['title']
return conforming_metadata
def create_edition_from_amazon_metadata(
id_: str, id_type: Literal['asin', 'isbn'] = 'isbn'
) -> str | None:
"""Fetches Amazon metadata by id from Amazon Product Advertising API, attempts to
create OL edition from metadata, and returns the resulting edition key `/key/OL..M`
if successful or None otherwise.
:param str id_: The item id: isbn (10/13), or Amazon ASIN.
:param str id_type: 'isbn' or 'asin'.
:return: Edition key '/key/OL..M' or None
"""
md = get_amazon_metadata(id_, id_type=id_type)
if md and md.get('product_group') == 'Book':
with accounts.RunAs('ImportBot'):
reply = load(
clean_amazon_metadata_for_load(md), account_key='account/ImportBot'
)
if reply and reply.get('success'):
return reply['edition'].get('key')
return None
def cached_get_amazon_metadata(*args, **kwargs):
"""If the cached data is `None`, it's likely a 503 throttling occurred on
Amazon's side. Try again to fetch the value instead of using the
cached value. It may 503 again, in which case the next access of
this page will trigger another re-cache. If the Amazon API call
succeeds but the book has no price data, then {"price": None} will
be cached as to not trigger a re-cache (only the value `None`
will cause re-cache)
"""
# fetch/compose a cache controller obj for
# "upstream.code._get_amazon_metadata"
memoized_get_amazon_metadata = cache.memcache_memoize(
_get_amazon_metadata,
"upstream.code._get_amazon_metadata",
timeout=dateutil.WEEK_SECS,
)
# fetch cached value from this controller
result = memoized_get_amazon_metadata(*args, **kwargs)
# if no result, then recache / update this controller's cached value
return result or memoized_get_amazon_metadata.update(*args, **kwargs)[0]
@public
def get_betterworldbooks_metadata(isbn: str) -> dict | None:
"""
:param str isbn: Unnormalisied ISBN10 or ISBN13
:return: Metadata for a single BWB book, currently lited on their catalog, or
an error dict.
"""
isbn = normalize_isbn(isbn) or isbn
if isbn is None:
return None
try:
return _get_betterworldbooks_metadata(isbn)
except Exception:
logger.exception(f"_get_betterworldbooks_metadata({isbn})")
return betterworldbooks_fmt(isbn)
def _get_betterworldbooks_metadata(isbn: str) -> dict | None:
"""Returns price and other metadata (currently minimal)
for a book currently available on betterworldbooks.com
:param str isbn: Normalised ISBN10 or ISBN13
:return: Metadata for a single BWB book currently listed on their catalog,
or an error dict.
"""
url = BETTERWORLDBOOKS_API_URL + isbn
response = requests.get(url)
if response.status_code != requests.codes.ok:
return {'error': response.text, 'code': response.status_code}
text = response.text
new_qty = re.findall("<TotalNew>([0-9]+)</TotalNew>", text)
new_price = re.findall(r"<LowestNewPrice>\$([0-9.]+)</LowestNewPrice>", text)
used_price = re.findall(r"<LowestUsedPrice>\$([0-9.]+)</LowestUsedPrice>", text)
used_qty = re.findall("<TotalUsed>([0-9]+)</TotalUsed>", text)
market_price = re.findall(
r"<LowestMarketPrice>\$([0-9.]+)</LowestMarketPrice>", text
)
price = qlt = None
if used_qty and used_qty[0] and used_qty[0] != '0':
price = used_price[0] if used_price else ''
qlt = 'used'
if new_qty and new_qty[0] and new_qty[0] != '0':
_price = new_price[0] if new_price else None
if _price and (not price or float(_price) < float(price)):
price = _price
qlt = 'new'
market_price = ('$' + market_price[0]) if market_price else None
return betterworldbooks_fmt(isbn, qlt, price, market_price)
def betterworldbooks_fmt(
isbn: str,
qlt: str | None = None,
price: str | None = None,
market_price: list[str] | None = None,
) -> dict | None:
"""Defines a standard interface for returning bwb price info
:param str qlt: Quality of the book, e.g. "new", "used"
:param str price: Price of the book as a decimal str, e.g. "4.28"
"""
price_fmt = f"${price} ({qlt})" if price and qlt else None
return {
'url': BWB_AFFILIATE_LINK % isbn,
'isbn': isbn,
'market_price': market_price,
'price': price_fmt,
'price_amt': price,
'qlt': qlt,
}
cached_get_betterworldbooks_metadata = cache.memcache_memoize(
_get_betterworldbooks_metadata,
"upstream.code._get_betterworldbooks_metadata",
timeout=dateutil.HALF_DAY_SECS,
)
| 22,018 | Python | .py | 543 | 32.186004 | 112 | 0.620741 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
320 | fulltext.py | internetarchive_openlibrary/openlibrary/core/fulltext.py | import json
import logging
import requests
import web
from infogami import config
from openlibrary.core.lending import get_availability_of_ocaids
from openlibrary.plugins.openlibrary.home import format_book_data
from urllib.parse import urlencode
logger = logging.getLogger("openlibrary.inside")
def fulltext_search_api(params):
if not hasattr(config, 'plugin_inside'):
return {'error': 'Unable to prepare search engine'}
search_endpoint = config.plugin_inside['search_endpoint']
search_select = search_endpoint + '?' + urlencode(params, 'utf-8')
logger.debug('URL: ' + search_select)
try:
response = requests.get(search_select, timeout=30)
response.raise_for_status()
return response.json()
except requests.HTTPError:
return {'error': 'Unable to query search engine'}
except json.decoder.JSONDecodeError:
return {'error': 'Error converting search engine data to JSON'}
def fulltext_search(q, page=1, limit=100, js=False, facets=False):
offset = (page - 1) * limit
params = {
'q': q,
'from': offset,
'size': limit,
**({'nofacets': 'true'} if not facets else {}),
'olonly': 'true',
}
ia_results = fulltext_search_api(params)
if 'error' not in ia_results and ia_results['hits']:
hits = ia_results['hits'].get('hits', [])
ocaids = [hit['fields'].get('identifier', [''])[0] for hit in hits]
availability = get_availability_of_ocaids(ocaids)
if 'error' in availability:
return []
editions = web.ctx.site.get_many(
[
'/books/%s' % availability[ocaid].get('openlibrary_edition')
for ocaid in availability
if availability[ocaid].get('openlibrary_edition')
]
)
for ed in editions:
if ed.ocaid in ocaids:
idx = ocaids.index(ed.ocaid)
ia_results['hits']['hits'][idx]['edition'] = (
format_book_data(ed, fetch_availability=False) if js else ed
)
ia_results['hits']['hits'][idx]['availability'] = availability[ed.ocaid]
return ia_results
| 2,209 | Python | .py | 54 | 32.796296 | 88 | 0.624127 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
321 | booknotes.py | internetarchive_openlibrary/openlibrary/core/booknotes.py | from . import db
from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
class Booknotes(db.CommonExtras):
TABLENAME = "booknotes"
PRIMARY_KEY = ["username", "work_id", "edition_id"]
NULL_EDITION_VALUE = -1
ALLOW_DELETE_ON_CONFLICT = False
@classmethod
def summary(cls) -> dict:
return {
'total_notes_created': {
'total': cls.total_booknotes(),
'month': cls.total_booknotes(since=DATE_ONE_MONTH_AGO),
'week': cls.total_booknotes(since=DATE_ONE_WEEK_AGO),
},
'total_note_takers': {
'total': cls.total_unique_users(),
'month': cls.total_unique_users(since=DATE_ONE_MONTH_AGO),
'week': cls.total_unique_users(since=DATE_ONE_WEEK_AGO),
},
}
@classmethod
def total_booknotes(cls, since=None) -> int:
oldb = db.get_db()
query = f"SELECT count(*) from {cls.TABLENAME}"
if since:
query += " WHERE created >= $since"
results = oldb.query(query, vars={'since': since})
return results[0]['count'] if results else 0
@classmethod
def total_unique_users(cls, since=None) -> int:
"""Returns the total number of unique patrons who have made
booknotes. `since` may be provided to only return the number of users after
a certain datetime.date.
XXX: This function is identical in all but docstring and db
tablename from Bookshelves. This makes @mek think both classes
could inherit a common BookDBModel class. Will try to keep
this in mind and design accordingly
"""
oldb = db.get_db()
query = "select count(DISTINCT username) from booknotes"
if since:
query += " WHERE created >= $since"
results = oldb.query(query, vars={'since': since})
return results[0]['count'] if results else 0
@classmethod
def most_notable_books(cls, limit=10, since=False):
"""Across all patrons"""
oldb = db.get_db()
query = "select work_id, count(*) as cnt from booknotes"
if since:
query += " AND created >= $since"
query += ' group by work_id order by cnt desc limit $limit'
return list(oldb.query(query, vars={'limit': limit, 'since': since}))
@classmethod
def get_booknotes_for_work(cls, work_id):
oldb = db.get_db()
query = "SELECT * from booknotes where work_id=$work_id"
return list(oldb.query(query, vars={"work_id": work_id}))
@classmethod
def count_total_booksnotes_by_user(cls, username):
"""Counts the (int) total number of books logged by this `username`"""
oldb = db.get_db()
data = {'username': username}
query = "SELECT count(*) from booknotes WHERE username=$username"
return oldb.query(query, vars=data)[0]['count']
@classmethod
def count_works_with_notes_by_user(cls, username):
"""
Counts the total number of works logged by this 'username'
"""
oldb = db.get_db()
data = {'username': username}
query = """
SELECT
COUNT(DISTINCT(work_id))
FROM booknotes
WHERE username=$username
"""
return oldb.query(query, vars=data)[0]['count']
@classmethod
def get_patron_booknote(cls, username, work_id, edition_id=NULL_EDITION_VALUE):
note = cls.get_patron_booknotes(
username, work_id=work_id, edition_id=edition_id
)
return note and note[0]
@classmethod
def get_patron_booknotes(
cls,
username,
work_id=None,
edition_id=NULL_EDITION_VALUE,
search=None,
limit=100,
page=1,
):
"""By default, get all a patron's booknotes. if work_id, get book
note for that work_id and edition_id.
"""
oldb = db.get_db()
page = int(page) if page else 1
data = {
'username': username,
'work_id': work_id,
'edition_id': edition_id,
'limit': limit,
'offset': limit * (page - 1),
'search': search,
}
query = "SELECT * from booknotes WHERE username=$username "
if work_id:
query += "AND work_id=$work_id AND edition_id=$edition_id "
if search:
query += "AND notes LIKE '%$search%' "
query += "LIMIT $limit OFFSET $offset"
return list(oldb.query(query, vars=data))
@classmethod
def get_notes_grouped_by_work(cls, username, limit=25, page=1):
"""
Returns a list of book notes records, which are grouped by work_id.
The 'notes' field contains a JSON string consisting of 'edition_id'/
book note key-value pairs.
return: List of records grouped by works.
"""
oldb = db.get_db()
data = {'username': username, 'limit': limit, 'offset': limit * (page - 1)}
query = """
SELECT
work_id,
json_agg(row_to_json(
(SELECT r FROM (SELECT edition_id, notes) r)
)
) AS notes
FROM booknotes
WHERE username=$username
GROUP BY work_id
LIMIT $limit OFFSET $offset
"""
return list(oldb.query(query, vars=data))
@classmethod
def add(cls, username, work_id, notes, edition_id=NULL_EDITION_VALUE):
"""Insert or update booknote. Create a new booknote if one doesn't
exist, or gracefully update the record otherwise.
return: the updates booknote record from the db.
"""
oldb = db.get_db()
data = {
"work_id": work_id,
"username": username,
"notes": notes,
"edition_id": edition_id,
}
records = cls.get_patron_booknotes(
username, work_id=work_id, edition_id=edition_id
)
if not records:
return oldb.insert(
'booknotes',
username=username,
work_id=work_id,
notes=notes,
edition_id=edition_id,
)
return oldb.update(
'booknotes',
where="work_id=$work_id AND username=$username AND edition_id=$edition_id",
notes=notes,
edition_id=edition_id,
vars=data,
)
@classmethod
def remove(cls, username, work_id, edition_id=NULL_EDITION_VALUE):
"""Remove a patron's specific booknote by work_id.
Technical note: work_id is not an optional argument and
intentionally does not default to None (to reduce
accidents/risk), however if one passes None as a value to
work_id, this method will remove all booknotes for a patron
(useful for a patron who may decide to close their account.
Q: Is there a way to add a dryrun=False param to make this safer?
return: a list of the IDs affected
"""
oldb = db.get_db()
where = {
'username': username,
'work_id': int(work_id),
'edition_id': edition_id,
}
try:
return oldb.delete(
'booknotes',
where=(
'work_id=$work_id AND username=$username AND edition_id=$edition_id'
),
vars=where,
)
except: # we want to catch no entry exists
return None
| 7,617 | Python | .py | 197 | 28.456853 | 88 | 0.568262 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
322 | cache.py | internetarchive_openlibrary/openlibrary/core/cache.py | """Caching utilities.
"""
import hashlib
import random
import string
import time
import threading
import functools
from typing import Any, Literal, cast
from collections.abc import Callable
import memcache
import json
import web
from infogami import config
from infogami.utils import stats
from infogami.infobase.client import Nothing
from openlibrary.utils import olmemcache
from openlibrary.utils.dateutil import MINUTE_SECS
from openlibrary.core.helpers import NothingEncoder
__all__ = [
"cached_property",
"Cache",
"MemoryCache",
"MemcacheCache",
"RequestCache",
"memoize",
"memcache_memoize",
"get_memcache",
]
DEFAULT_CACHE_LIFETIME = 2 * MINUTE_SECS
class memcache_memoize:
"""Memoizes a function, caching its return values in memcached for each input.
After the timeout, a new thread is spawned to update the value and the old
value is used while the update is in progress.
This expects that both the args and return value are json encodable.
This uses the memcache servers specified in the configuration.
:param f: function to be memozied
:param key_prefix: key prefix used in memcache to store memoized results. A random value will be used if not specified.
:param servers: list of memcached servers, each specified as "ip:port"
:param timeout: timeout in seconds after which the return value must be updated
:param prethread: Function to call on the new thread to set it up
"""
def __init__(
self,
f: Callable,
key_prefix: str | None = None,
timeout: int = MINUTE_SECS,
prethread: Callable | None = None,
hash_args: bool = False,
):
"""Creates a new memoized function for ``f``."""
self.f = f
self.key_prefix = key_prefix or self._generate_key_prefix()
self.timeout = timeout
self._memcache = None
self.stats = web.storage(calls=0, hits=0, updates=0, async_updates=0)
self.active_threads: dict = {}
self.prethread = prethread
self.hash_args = hash_args
def _get_memcache(self):
if self._memcache is None:
servers = config.get("memcache_servers")
if servers:
self._memcache = memcache.Client(servers)
else:
web.debug(
"Could not find memcache_servers in the configuration. Used dummy memcache."
)
try:
import mockcache # Only supports legacy Python
self._memcache = mockcache.Client()
except ImportError: # Python 3
from pymemcache.test.utils import MockMemcacheClient
self._memcache = MockMemcacheClient()
return self._memcache
memcache = property(_get_memcache)
def _generate_key_prefix(self):
try:
prefix = self.f.__name__ + "_"
except (AttributeError, TypeError):
prefix = ""
return prefix + self._random_string(10)
def _random_string(self, n):
chars = string.ascii_letters + string.digits
return "".join(random.choice(chars) for i in range(n))
def __call__(self, *args, **kw):
"""Memoized function call.
Returns the cached value when available. Computes and adds the result
to memcache when not available. Updates asynchronously after timeout.
"""
_cache = kw.pop("_cache", None)
if _cache == "delete":
self.memcache_delete(args, kw)
return None
self.stats.calls += 1
value_time = self.memcache_get(args, kw)
if value_time is None:
self.stats.updates += 1
value, t = self.update(*args, **kw)
else:
self.stats.hits += 1
value, t = value_time
if t + self.timeout < time.time():
self.stats.async_updates += 1
self.update_async(*args, **kw)
return value
def update_async(self, *args, **kw):
"""Starts the update process asynchronously."""
t = threading.Thread(target=self._update_async_worker, args=args, kwargs=kw)
self.active_threads[t.name] = t
t.start()
def _update_async_worker(self, *args, **kw):
key = self.compute_key(args, kw) + "/flag"
if not self.memcache.add(key, "true"):
# already somebody else is computing this value.
return
try:
if self.prethread:
self.prethread()
self.update(*args, **kw)
finally:
# Remove current thread from active threads
self.active_threads.pop(threading.current_thread().name, None)
# remove the flag
self.memcache.delete(key)
def update(self, *args, **kw):
"""Computes the value and adds it to memcache.
Returns the computed value.
"""
value = self.f(*args, **kw)
t = time.time()
self.memcache_set(args, kw, value, t)
return value, t
def join_threads(self):
"""Waits for all active threads to finish.
Used only in testing.
"""
for name, thread in list(self.active_threads.items()):
thread.join()
def encode_args(self, args, kw=None):
"""Encodes arguments to construct the memcache key."""
kw = kw or {}
# strip [ and ] from key
a = self.json_encode(list(args))[1:-1]
if kw:
a = a + "-" + self.json_encode(kw)
if self.hash_args:
return f"{hashlib.md5(a.encode('utf-8')).hexdigest()}"
return a
def compute_key(self, args, kw):
"""Computes memcache key for storing result of function call with given arguments."""
key = self.key_prefix + "$" + self.encode_args(args, kw)
return key.replace(
" ", "_"
) # XXX: temporary fix to handle spaces in the arguments
def json_encode(self, value):
"""json.dumps without extra spaces.
memcache doesn't like spaces in the key.
"""
return json.dumps(
[] if isinstance(value, Nothing) else value,
separators=(",", ":"),
cls=NothingEncoder,
)
def memcache_set(self, args, kw, value, time):
"""Adds value and time to memcache. Key is computed from the arguments."""
key = self.compute_key(args, kw)
json_data = self.json_encode([value, time])
stats.begin("memcache.set", key=key)
self.memcache.set(key, json_data)
stats.end()
def memcache_delete(self, args, kw):
key = self.compute_key(args, kw)
stats.begin("memcache.delete", key=key)
self.memcache.delete(key)
stats.end()
def memcache_get(self, args, kw):
"""Reads the value from memcache. Key is computed from the arguments.
Returns (value, time) when the value is available, None otherwise.
"""
key = self.compute_key(args, kw)
stats.begin("memcache.get", key=key)
json_str = self.memcache.get(key)
stats.end(hit=bool(json_str))
return json_str and json.loads(json_str)
####
def cached_property(getter):
"""Decorator like `property`, but the value is computed on first call and cached.
class Foo:
@cached_property
def memcache_client(self):
...
"""
name = getter.__name__
def g(self):
if name in self.__dict__:
return self.__dict__[name]
value = getter(self)
self.__dict__[name] = value
return value
return property(g)
class Cache:
"""Cache interface."""
def get(self, key):
"""Returns the value for given key. Returns None if that key is not present in the cache."""
raise NotImplementedError()
def set(self, key, value, expires=0):
"""Sets a value in the cache.
If expires is non-zero, the cache may delete that entry from the cache after expiry.
The implementation can choose to ignore the expires argument.
"""
raise NotImplementedError()
def add(self, key, value, expires=0):
"""Adds a new entry in the cache. Nothing is done if there is already an entry with the same key.
Returns True if a new entry is added to the cache.
"""
raise NotImplementedError()
def delete(self, key):
"""Deletes an entry from the cache. No error is raised if there is no entry in present in the cache with that key.
Returns True if the key is deleted.
"""
raise NotImplementedError()
class MemoryCache(Cache):
"""Cache implementation in memory."""
def __init__(self):
self.d = {}
def get(self, key):
return self.d.get(key)
def set(self, key, value, expires=0):
self.d[key] = value
def add(self, key, value, expires=0):
return self.d.setdefault(key, value) is value
def delete(self, key):
return self.d.pop(key, None) is not None
def clear(self):
self.d.clear()
class MemcacheCache(Cache):
"""Cache implementation using memcache.
The values are json-encoded before adding to memcache and json-decoded on get.
Expects that the memcache servers are specified in web.config.memcache_servers.
"""
@cached_property
def memcache(self):
if servers := config.get("memcache_servers", None):
return olmemcache.Client(servers)
else:
web.debug(
"Could not find memcache_servers in the configuration. Used dummy memcache."
)
try:
import mockcache
return mockcache.Client()
except ImportError:
from pymemcache.test.utils import MockMemcacheClient
return MockMemcacheClient()
def _encode_key(self, key: str) -> str:
return cast(str, web.safestr(key))
def get(self, key: str) -> Any:
key = self._encode_key(key)
stats.begin("memcache.get", key=key)
value = self.memcache.get(key)
stats.end(hit=value is not None)
return value and json.loads(value)
def get_multi(self, keys: list[str]) -> dict[str, Any]:
keys = [self._encode_key(k) for k in keys]
stats.begin("memcache.get_multi")
d = self.memcache.get_multi(keys)
stats.end(hit=bool(d))
return {k: json.loads(v) for k, v in d.items()}
def set_multi(self, mapping: dict[str, Any], expires=0):
mapping = {self._encode_key(k): json.dumps(v) for k, v in mapping.items()}
stats.begin("memcache.set_multi")
d = self.memcache.set_multi(mapping, expires)
stats.end()
return d
def set(self, key: str, value: Any, expires=0):
key = cast(str, web.safestr(key))
value = json.dumps(value)
stats.begin("memcache.set", key=key)
value = self.memcache.set(key, value, expires)
stats.end()
return value
def add(self, key, value, expires=0):
key = web.safestr(key)
value = json.dumps(value)
stats.begin("memcache.add", key=key)
value = self.memcache.add(key, value, expires)
stats.end()
return value
def delete(self, key):
key = web.safestr(key)
stats.begin("memcache.delete", key=key)
value = self.memcache.delete(key)
stats.end()
return value
class RequestCache(Cache):
"""Request-Local cache.
The values are cached only in the context of the current request.
"""
@property
def d(self):
return web.ctx.setdefault("request-local-cache", {})
def get(self, key):
return self.d.get(key)
def set(self, key, value, expires=0):
self.d[key] = value
def add(self, key, value, expires=0):
return self.d.setdefault(key, value) is value
def delete(self, key):
return self.d.pop(key, None) is not None
memory_cache = MemoryCache()
memcache_cache = MemcacheCache()
request_cache = RequestCache()
def get_memcache():
return memcache_cache
def _get_cache(engine):
d = {
"memory": memory_cache,
"memcache": memcache_cache,
"memcache+memory": memcache_cache,
"request": request_cache,
}
return d.get(engine)
class memoize:
"""Memoize decorator to cache results in various cache engines.
Usage::
@cache.memoize(engine="memcache")
def some_func(args):
pass
Arguments::
* engine:
Engine to store the results. Available options are:
* memory: stores the result in memory.
* memcache: stores the result in memcached.
* request: stores the result only in the context of the current request.
* key:
key to be used in the cache. If this is a string, arguments are append
to it before making the cache-key. If this is a function, it's
return-value is used as cache-key and this function is called with the
arguments. If not specified, the default value is computed using the
function name and module name.
* expires:
The amount of time in seconds the value should be cached. Pass expires=0 to cache indefinitely.
(Not yet implemented)
* background:
Indicates that the value must be recomputed in the background after
the timeout. Until the new value is ready, the function continue to
return the same old value.
(not yet implemented)
* cacheable:
Function to determine if the returned value is cacheable. Sometimes it
is desirable to not cache return values generated due to error
conditions. The cacheable function is called with (key, value) as
arguments.
Advanced Usage:
Sometimes, it is desirable to store results of related functions in the
same cache entry to reduce memory usage. It can be achieved by making the
``key`` function return a tuple of two values. (Not Implemented yet)
@cache.memoize(engine="memcache", key=lambda page: (page.key, "history"))
def get_history(page):
pass
@cache.memoize(engine="memory", key=lambda key: (key, "doc"))
def get_page(key):
pass
"""
def __init__(
self,
engine: Literal["memory", "memcache", "request"],
key: str | Callable[..., str | tuple],
expires: int = 0,
background: bool = False,
cacheable: Callable | None = None,
):
self.cache = _get_cache(engine)
self.keyfunc = (
key if callable(key) else functools.partial(build_memcache_key, key)
)
self.cacheable = cacheable
self.expires = expires
def __call__(self, f):
"""Returns the memoized version of f."""
@functools.wraps(f)
def func(*args, **kwargs):
"""The memoized function.
If this is the first call with these arguments, function :attr:`f` is called and the return value is cached.
Otherwise, value from the cache is returned.
"""
key = self.keyfunc(*args, **kwargs)
value = self.cache_get(key)
if value is None:
value = f(*args, **kwargs)
self.cache_set(key, value)
return value
return func
def cache_get(self, key: str | tuple):
"""Reads value of a key from the cache.
When key is a string, this is equvivalant to::
return cache[key]
When key is a 2-tuple, this is equvivalant to::
k0, k1 = key
return cache[k0][k1]
"""
if isinstance(key, tuple):
k0, k1 = key
d = self.cache.get(k0)
return d and d.get(k1)
else:
return self.cache.get(key)
def cache_set(self, key: str | tuple, value):
"""Sets a key to a given value in the cache.
When key is a string, this is equvivalant to::
cache[key] = value
When key is a 2-tuple, this is equvivalant to::
k0, k1 = key
cache[k0][k1] = value
"""
# When cacheable is provided, use it to determine whether or not the cache should be updated.
if self.cacheable and self.cacheable(key, value) is False:
return
if isinstance(key, tuple):
k1, k2 = key
d = self.cache.get(k1) or {}
d[k2] = value
return self.cache.set(k1, d, expires=self.expires)
else:
return self.cache.set(key, value, expires=self.expires)
def build_memcache_key(prefix: str, *args, **kw) -> str:
key = prefix
if args:
key += "-" + json.dumps(args, separators=(",", ":"), sort_keys=True)[1:-1]
if kw:
key += "-" + json.dumps(kw, separators=(",", ":"), sort_keys=True)
return key
def method_memoize(f):
"""
object-local memoize.
Works only for functions with simple arguments; i.e. JSON serializeable
"""
@functools.wraps(f)
def g(self, *args, **kwargs):
cache = self.__dict__.setdefault('_memoize_cache', {})
key = json.dumps(
{
'function': f.__name__,
'args': args,
'kwargs': kwargs,
},
sort_keys=True,
)
if key not in cache:
cache[key] = f(self, *args, **kwargs)
return cache[key]
return g
| 17,630 | Python | .py | 453 | 30.154525 | 123 | 0.60674 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
323 | olmarkdown.py | internetarchive_openlibrary/openlibrary/core/olmarkdown.py | """Open Library Flavored Markdown, inspired by [Github Flavored Markdown][GFM].
GFM: http://github.github.com/github-flavored-markdown/
Differences from traditional Markdown:
* new lines in paragraph are treated as line breaks
* URLs are autolinked
* generated HTML is sanitized
The custom changes done here to markdown are also reptead in WMD editor,
the javascript markdown editor used in OL.
"""
import re
from infogami.utils.markdown import markdown
from openlibrary.core import helpers as h
# regexp to match urls and emails.
# Adopted from github-flavored-markdown (BSD-style open source license)
# http://github.com/github/github-flavored-markdown/blob/gh-pages/scripts/showdown.js#L158
AUTOLINK_RE = r'''(^|\s)(https?\:\/\/[^"\s<>]*[^.,;'">\:\s\<\>\)\]\!]|[a-z0-9_\-+=.]+@[a-z0-9\-]+(?:\.[a-z0-9-]+)+)'''
LINK_REFERENCE_RE = re.compile(r' *\[[^\[\] ]*\] *:')
class LineBreaksPreprocessor(markdown.Preprocessor):
def run(self, lines):
for i in range(len(lines) - 1):
# append <br/> to all lines expect blank lines and the line before blankline.
if (
lines[i].strip()
and lines[i + 1].strip()
and not markdown.RE.regExp['tabbed'].match(lines[i])
and not LINK_REFERENCE_RE.match(lines[i])
):
lines[i] += "<br />"
return lines
LINE_BREAKS_PREPROCESSOR = LineBreaksPreprocessor()
class AutolinkPreprocessor(markdown.Preprocessor):
rx = re.compile(AUTOLINK_RE)
def run(self, lines):
for i in range(len(lines)):
if not markdown.RE.regExp['tabbed'].match(lines[i]):
lines[i] = self.rx.sub(r'\1<\2>', lines[i])
return lines
AUTOLINK_PREPROCESSOR = AutolinkPreprocessor()
class OLMarkdown(markdown.Markdown):
"""Open Library flavored Markdown, inspired by [Github Flavored Markdown][GFM].
GFM: http://github.github.com/github-flavored-markdown/
Differences from traditional Markdown:
* new lines in paragraph are treated as line breaks
* URLs are autolinked
* generated HTML is sanitized
"""
def __init__(self, *a, **kw):
markdown.Markdown.__init__(self, *a, **kw)
self._patch()
def _patch(self):
patterns = self.inlinePatterns
autolink = markdown.AutolinkPattern(
markdown.AUTOLINK_RE.replace('http', 'https?')
)
patterns[patterns.index(markdown.AUTOLINK_PATTERN)] = autolink
p = self.preprocessors
p[p.index(markdown.LINE_BREAKS_PREPROCESSOR)] = LINE_BREAKS_PREPROCESSOR
p.append(AUTOLINK_PREPROCESSOR)
def convert(self):
html = markdown.Markdown.convert(self)
return h.sanitize(html)
| 2,735 | Python | .py | 61 | 38.098361 | 118 | 0.661011 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
324 | __init__.py | internetarchive_openlibrary/openlibrary/core/__init__.py | """Core functionality of Open Library.
This is a set of reusable, easily testable modules.
"""
| 96 | Python | .py | 3 | 30.666667 | 51 | 0.771739 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
325 | waitinglist.py | internetarchive_openlibrary/openlibrary/core/waitinglist.py | """Implementation of waiting-list feature for OL loans.
Each waiting instance is represented as a document in the store as follows:
{
"_key": "waiting-loan-OL123M-anand",
"type": "waiting-loan",
"user": "/people/anand",
"book": "/books/OL123M",
"status": "waiting",
"since": "2013-09-16T06:09:16.577942",
"last-update": "2013-10-01T06:09:16.577942"
}
"""
import datetime
import logging
import web
from openlibrary.accounts.model import OpenLibraryAccount
from . import helpers as h
from .sendmail import sendmail_with_template
from . import db
from . import lending
logger = logging.getLogger("openlibrary.waitinglist")
_wl_api = lending.ia_lending_api
def _get_book(identifier):
if keys := web.ctx.site.things({"type": '/type/edition', "ocaid": identifier}):
return web.ctx.site.get(keys[0])
else:
key = "/books/ia:" + identifier
return web.ctx.site.get(key)
class WaitingLoan(dict):
def get_book(self):
return _get_book(self['identifier'])
def get_user_key(self):
if user_key := self.get("user_key"):
return user_key
userid = self.get("userid")
username = ""
if userid.startswith('@'):
account = OpenLibraryAccount.get(link=userid)
username = account.username
elif userid.startswith('ol:'):
username = userid[len("ol:") :]
return "/people/%s" % username
def get_user(self):
user_key = self.get_user_key()
return user_key and web.ctx.site.get(user_key)
def get_position(self):
return self['position']
def get_waitinglist_size(self):
return self['wl_size']
def get_waiting_in_days(self):
since = h.parse_datetime(self['since'])
delta = datetime.datetime.utcnow() - since
# Adding 1 to round off the the extra seconds in the delta
return delta.days + 1
def get_expiry_in_hours(self):
if "expiry" in self:
delta = h.parse_datetime(self['expiry']) - datetime.datetime.utcnow()
delta_seconds = delta.days * 24 * 3600 + delta.seconds
delta_hours = delta_seconds / 3600
return max(0, delta_hours)
return 0
def is_expired(self):
return (
self['status'] == 'available'
and self['expiry'] < datetime.datetime.utcnow().isoformat()
)
def dict(self):
"""Converts this object into JSON-able dict.
Converts all datetime objects into strings.
"""
def process_value(v):
if isinstance(v, datetime.datetime):
v = v.isoformat()
return v
return {k: process_value(v) for k, v in self.items()}
@classmethod
def query(cls, **kw):
# kw.setdefault('order', 'since')
# # as of web.py 0.33, the version used by OL,
# # db.where doesn't work with no conditions
# if len(kw) > 1: # if has more keys other than "order"
# result = db.where("waitingloan", **kw)
# else:
# result = db.select('waitingloan')
# return [cls(row) for row in result]
rows = _wl_api.query(**kw) or []
return [cls(row) for row in rows]
@classmethod
def new(cls, **kw):
user_key = kw['user_key']
itemname = kw.get('itemname', '')
if not itemname:
account = OpenLibraryAccount.get(key=user_key)
itemname = account.itemname
_wl_api.join_waitinglist(kw['identifier'], itemname)
return cls.find(user_key, kw['identifier'], itemname=itemname)
@classmethod
def find(cls, user_key, identifier, itemname=None):
"""Returns the waitingloan for given book_key and user_key.
Returns None if there is no such waiting loan.
"""
if not itemname:
account = OpenLibraryAccount.get(key=user_key)
itemname = account.itemname
result = cls.query(userid=itemname, identifier=identifier)
if result:
return result[0]
@classmethod
def prune_expired(cls, identifier=None):
"""Deletes the expired loans from database and returns WaitingLoan objects
for each deleted row.
If book_key is specified, it deletes only the expired waiting loans of that book.
"""
return
def delete(self):
"""Delete this waiting loan from database."""
# db.delete("waitingloan", where="id=$id", vars=self)
_wl_api.leave_waitinglist(self['identifier'], self['userid'])
pass
def update(self, **kw):
# db.update("waitingloan", where="id=$id", vars=self, **kw)
_wl_api.update_waitinglist(
identifier=self['identifier'], userid=self['userid'], **kw
)
dict.update(self, kw)
def get_waitinglist_for_book(book_key):
"""Returns the list of records for the users waiting for the given book.
This is an admin-only feature. It works only if the current user is an admin.
"""
book = web.ctx.site.get(book_key)
if book and book.ocaid:
return WaitingLoan.query(identifier=book.ocaid)
else:
return []
def get_waitinglist_size(book_key):
"""Returns size of the waiting list for given book."""
return len(get_waitinglist_for_book(book_key))
def get_waitinglist_for_user(user_key):
"""Returns the list of records for all the books that a user is waiting for."""
waitlist = []
account = OpenLibraryAccount.get(key=user_key)
if account.itemname:
waitlist.extend(WaitingLoan.query(userid=account.itemname))
waitlist.extend(WaitingLoan.query(userid=lending.userkey2userid(user_key)))
return waitlist
def is_user_waiting_for(user_key, book_key):
"""Returns True if the user is waiting for specified book."""
book = web.ctx.site.get(book_key)
if book and book.ocaid:
return WaitingLoan.find(user_key, book.ocaid) is not None
def join_waitinglist(user_key, book_key, itemname=None):
"""Adds a user to the waiting list of given book.
It is done by creating a new record in the store.
"""
book = web.ctx.site.get(book_key)
if book and book.ocaid:
WaitingLoan.new(user_key=user_key, identifier=book.ocaid, itemname=itemname)
def leave_waitinglist(user_key, book_key, itemname=None):
"""Removes the given user from the waiting list of the given book."""
book = web.ctx.site.get(book_key)
if book and book.ocaid:
w = WaitingLoan.find(user_key, book.ocaid, itemname=itemname)
if w:
w.delete()
def on_waitinglist_update(identifier):
"""Triggered when a waiting list is updated."""
waitinglist = WaitingLoan.query(identifier=identifier)
if waitinglist:
book = _get_book(identifier)
checkedout = lending.is_loaned_out(identifier)
# If some people are waiting and the book is checked out,
# send email to the person who borrowed the book.
#
# If the book is not checked out, inform the first person
# in the waiting list
if not checkedout:
sendmail_book_available(book)
def update_ebook(ebook_key, **data):
ebook = web.ctx.site.store.get(ebook_key) or {}
# update ebook document.
ebook2 = dict(ebook, _key=ebook_key, type="ebook")
ebook2.update(data)
if ebook != ebook2: # save if modified
web.ctx.site.store[ebook_key] = dict(ebook2, _rev=None) # force update
def sendmail_book_available(book):
"""Informs the first person in the waiting list that the book is available.
Safe to call multiple times. This'll make sure the email is sent only once.
"""
wl = book.get_waitinglist()
if wl and wl[0]['status'] == 'available' and not wl[0].get('available_email_sent'):
record = wl[0]
user = record.get_user()
if not user:
return
email = user.get_email()
sendmail_with_template(
"email/waitinglist_book_available",
to=email,
user=user,
book=book,
waitinglist_record=record,
)
record.update(available_email_sent=True)
logger.info(
"%s is available, send email to the first person in WL. wl-size=%s",
book.key,
len(wl),
)
def _get_expiry_in_days(loan):
if loan.get("expiry"):
delta = h.parse_datetime(loan['expiry']) - datetime.datetime.utcnow()
# +1 to count the partial day
return delta.days + 1
def _get_loan_timestamp_in_days(loan):
t = datetime.datetime.fromtimestamp(loan['loaned_at'])
delta = datetime.datetime.utcnow() - t
return delta.days
def prune_expired_waitingloans():
"""Removes all the waiting loans that are expired.
A waiting loan expires if the person fails to borrow a book with in
24 hours after his waiting loan becomes "available".
"""
return
def update_all_waitinglists():
rows = WaitingLoan.query(limit=10000)
identifiers = {row['identifier'] for row in rows}
for identifier in identifiers:
try:
_wl_api.request("loan.sync", identifier=identifier)
except Exception:
logger.error(
"failed to update waitinglist for %s", identifier, exc_info=True
)
def update_all_ebooks():
rows = WaitingLoan.query(limit=10000)
identifiers = {row['identifier'] for row in rows}
loan_keys = web.ctx.site.store.keys(type='/type/loan', limit=-1)
for k in loan_keys:
id = k[len("loan-") :]
# would have already been updated
if id in identifiers:
continue
logger.info("updating ebooks/" + id)
update_ebook('ebooks/' + id, borrowed='true', wl_size=0)
| 9,833 | Python | .py | 244 | 32.643443 | 89 | 0.634646 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
326 | follows.py | internetarchive_openlibrary/openlibrary/core/follows.py | import logging
from typing import cast
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
from . import db
logger = logging.getLogger(__name__)
class PubSub:
TABLENAME = "follows"
PRIMARY_KEY = ["subscriber", "publisher"]
@classmethod
def subscribe(cls, subscriber, publisher):
oldb = db.get_db()
return oldb.insert(cls.TABLENAME, subscriber=subscriber, publisher=publisher)
@classmethod
def unsubscribe(cls, subscriber, publisher):
oldb = db.get_db()
return oldb.delete(
cls.TABLENAME,
where='subscriber=$subscriber AND publisher=$publisher',
vars={'subscriber': subscriber, 'publisher': publisher},
)
@classmethod
def is_subscribed(cls, subscriber, publisher):
oldb = db.get_db()
subscription = oldb.select(
cls.TABLENAME,
where='subscriber=$subscriber AND publisher=$publisher',
vars={'subscriber': subscriber, 'publisher': publisher},
limit=1, # Limiting to 1 result to check if the subscription exists
)
return len(subscription)
@classmethod
def get_followers(cls, publisher, limit=None, offset=0):
"""Get publishers subscribers"""
oldb = db.get_db()
where = 'publisher=$publisher'
subscribers = oldb.select(
cls.TABLENAME,
where=where,
vars={'publisher': publisher},
limit=limit,
offset=offset,
)
return subscribers
@classmethod
def get_following(cls, subscriber, limit=None, offset=0, exclude_disabled=False):
"""Get subscriber's subscriptions"""
oldb = db.get_db()
where = 'subscriber=$subscriber'
if exclude_disabled:
where += " AND disabled=false"
subscriptions = oldb.select(
cls.TABLENAME,
where=where,
vars={'subscriber': subscriber},
limit=limit,
offset=offset,
)
return [dict(s) for s in subscriptions]
@classmethod
def toggle_privacy(cls, publisher, private=True):
oldb = db.get_db()
return oldb.update(
cls.TABLENAME,
disabled=private,
where="publisher=$publisher",
vars={"publisher": publisher},
)
@classmethod
def get_feed(cls, subscriber, limit=25, offset=0):
oldb = db.get_db()
# Get subscriber's subscriptions
subscriptions = cls.get_following(subscriber, exclude_disabled=True)
# Extract usernames from subscriptions
usernames = [sub['publisher'] for sub in subscriptions]
if not usernames:
return []
# Formulate the SQL query to get latest 25 entries for subscribed users
query = (
"SELECT * FROM bookshelves_books WHERE username IN $usernames"
" ORDER BY created DESC LIMIT $limit OFFSET $offset"
)
# Fetch the recent books for subscribed users
recent_books = list(
oldb.query(
query,
vars={'usernames': usernames, 'limit': limit, 'offset': offset},
)
)
# Add keys
for i, rb in enumerate(recent_books):
recent_books[i].key = f'/works/OL{rb.work_id}W'
return Bookshelves.fetch(recent_books)
@classmethod
def count_following(cls, subscriber):
oldb = db.get_db()
count = oldb.select(
cls.TABLENAME,
what='count(*) as count',
where='subscriber=$subscriber',
vars={'subscriber': subscriber},
)
return cast(tuple[int], count)[0].get('count', 0)
@classmethod
def count_followers(cls, publisher):
oldb = db.get_db()
count = oldb.select(
cls.TABLENAME,
what='count(*) as count',
where='publisher=$publisher',
vars={'publisher': publisher},
)
return cast(tuple[int], count)[0].get('count', 0)
@classmethod
def total_followers(cls, since=None) -> int:
oldb = db.get_db()
query = f"SELECT count(DISTINCT subscriber) from {cls.TABLENAME}"
if since:
query += " WHERE created >= $since"
results = oldb.query(query, vars={'since': since})
return results[0]['count'] if results else 0
@classmethod
def summary(cls):
return {
"total_following_count": {
"total": cls.total_followers(),
"month": cls.total_followers(since=DATE_ONE_MONTH_AGO),
"week": cls.total_followers(since=DATE_ONE_WEEK_AGO),
}
}
@classmethod
def count_total_subscribers(cls):
oldb = db.get_db()
count = oldb.query("SELECT COUNT(DISTINCT subscriber) AS count FROM follows")
return cast(tuple[int], count)[0].get('count', 0)
@classmethod
def count_total_publishers(cls):
oldb = db.get_db()
count = oldb.query("SELECT COUNT(DISTINCT publisher) AS count FROM follows")
return cast(tuple[int], count)[0].get('count', 0)
@classmethod
def most_followed(cls, limit=100):
oldb = db.get_db()
top_publishers = oldb.query(
"SELECT publisher, COUNT(*) AS count FROM follows WHERE disabled=false GROUP BY publisher ORDER BY count DESC LIMIT $limit",
vars={'limit': limit},
)
return top_publishers
| 5,576 | Python | .py | 148 | 28.324324 | 136 | 0.599556 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
327 | admin.py | internetarchive_openlibrary/openlibrary/core/admin.py | """Admin functionality."""
import calendar
import datetime
import requests
import web
from infogami import config
from openlibrary.core import cache
class Stats:
def __init__(self, docs, key, total_key):
self.key = key
self.docs = docs
try:
self.latest = docs[-1].get(key, 0)
except IndexError:
self.latest = 0
try:
self.previous = docs[-2].get(key, 0)
except IndexError:
self.previous = 0
try:
# Last available total count
self.total = next(x for x in reversed(docs) if total_key in x)[total_key]
except (KeyError, StopIteration):
self.total = ""
def get_counts(self, ndays=28, times=False):
"""Returns the stats for last n days as an array useful for
plotting. i.e. an array of [x, y] tuples where y is the value
and `x` the x coordinate.
If times is True, the x coordinate in the tuple will be
timestamps for the day.
"""
def _convert_to_milli_timestamp(d):
"""Uses the `_id` of the document `d` to create a UNIX
timestamp and converts it to milliseconds"""
t = datetime.datetime.strptime(d, "counts-%Y-%m-%d")
return calendar.timegm(t.timetuple()) * 1000
if times:
return [
[_convert_to_milli_timestamp(x['_key']), x.get(self.key, 0)]
for x in self.docs[-ndays:]
]
else:
return zip(
range(0, ndays * 5, 5), (x.get(self.key, 0) for x in self.docs[-ndays:])
) # The *5 and 5 are for the bar widths
def get_summary(self, ndays=28):
"""Returns the summary of counts for past n days.
Summary can be either sum or average depending on the type of stats.
This is used to find counts for last 7 days and last 28 days.
"""
return sum(x[1] for x in self.get_counts(ndays))
@cache.memoize(
engine="memcache", key="admin._get_loan_counts_from_graphite", expires=5 * 60
)
def _get_loan_counts_from_graphite(ndays: int) -> list[list[int]] | None:
try:
r = requests.get(
'http://graphite.us.archive.org/render',
params={
'target': 'hitcount(stats.ol.loans.bookreader, "1d")',
'from': '-%ddays' % ndays,
'tz': 'UTC',
'format': 'json',
},
)
return r.json()[0]['datapoints']
except (requests.exceptions.RequestException, ValueError, AttributeError):
return None
class LoanStats(Stats):
"""
Temporary (2020-03-19) override of Stats for loans, due to bug
which caused 1mo of loans stats to be missing from regular
stats db. This implementation uses graphite, but only on prod,
so that we don't forget.
"""
def get_counts(self, ndays=28, times=False):
# Let dev.openlibrary.org show the true state of things
if 'dev' in config.features:
return Stats.get_counts(self, ndays, times)
if graphite_data := _get_loan_counts_from_graphite(ndays):
# convert timestamp seconds to ms (as required by API)
return [[timestamp * 1000, count] for [count, timestamp] in graphite_data]
else:
return Stats.get_counts(self, ndays, times)
@cache.memoize(
engine="memcache", key="admin._get_visitor_counts_from_graphite", expires=5 * 60
)
def _get_visitor_counts_from_graphite(self, ndays: int = 28) -> list[list[int]]:
"""
Read the unique visitors (IP addresses) per day for the last ndays from graphite.
:param ndays: number of days to read
:return: list containing [count, timestamp] for ndays
"""
try:
response = requests.get(
"http://graphite.us.archive.org/render/",
params={
"target": "summarize(stats.uniqueips.openlibrary, '1d')",
"from": f"-{ndays}days",
"tz": "UTC",
"format": "json",
},
)
response.raise_for_status()
visitors = response.json()[0]['datapoints']
except requests.exceptions.RequestException:
visitors = []
return visitors
class VisitorStats(Stats):
def get_counts(self, ndays: int = 28, times: bool = False) -> list[tuple[int, int]]:
visitors = _get_visitor_counts_from_graphite(ndays)
# Flip the order, convert timestamp to msec, and convert count==None to zero
return [
(int(timestamp * 1000), int(count or 0)) for count, timestamp in visitors
]
@cache.memoize(engine="memcache", key="admin._get_count_docs", expires=5 * 60)
def _get_count_docs(ndays):
"""Returns the count docs from admin stats database.
This function is memoized to avoid accessing the db for every request.
"""
today = datetime.datetime.utcnow().date()
dates = [today - datetime.timedelta(days=i) for i in range(ndays)]
# we want the dates in reverse order
dates = dates[::-1]
docs = [web.ctx.site.store.get(d.strftime("counts-%Y-%m-%d")) for d in dates]
return [d for d in docs if d]
def get_stats(ndays=30):
"""Returns the stats for the past `ndays`"""
docs = _get_count_docs(ndays)
return {
'human_edits': Stats(docs, "human_edits", "human_edits"),
'bot_edits': Stats(docs, "bot_edits", "bot_edits"),
'lists': Stats(docs, "lists", "total_lists"),
'visitors': VisitorStats(docs, "visitors", "visitors"),
'loans': LoanStats(docs, "loans", "loans"),
'members': Stats(docs, "members", "total_members"),
'works': Stats(docs, "works", "total_works"),
'editions': Stats(docs, "editions", "total_editions"),
'ebooks': Stats(docs, "ebooks", "total_ebooks"),
'covers': Stats(docs, "covers", "total_covers"),
'authors': Stats(docs, "authors", "total_authors"),
'subjects': Stats(docs, "subjects", "total_subjects"),
}
| 6,042 | Python | .py | 143 | 33.769231 | 88 | 0.603851 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
328 | middleware.py | internetarchive_openlibrary/openlibrary/core/middleware.py | """WSGI middleware used in Open Library.
"""
import web
from io import BytesIO
import gzip
class GZipMiddleware:
"""WSGI middleware to gzip the response."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
accept_encoding = environ.get("HTTP_ACCEPT_ENCODING", "")
if 'gzip' not in accept_encoding:
return self.app(environ, start_response)
response = web.storage(compress=False)
def get_response_header(name, default=None):
for hdr, value in response.headers:
if hdr.lower() == name.lower():
return value
return default
def compress(text, level=9):
f = BytesIO()
gz = gzip.GzipFile(None, 'wb', level, fileobj=f)
gz.write(text)
gz.close()
return f.getvalue()
def new_start_response(status, headers):
response.status = status
response.headers = headers
if status.startswith("200") and get_response_header(
"Content-Type", ""
).startswith("text/"):
headers.append(("Content-Encoding", "gzip"))
headers.append(("Vary", "Accept-Encoding"))
response.compress = True
return start_response(status, headers)
data = self.app(environ, new_start_response)
if response.compress:
return [compress(b"".join(data), 9)]
else:
return data
| 1,539 | Python | .py | 40 | 27.9 | 65 | 0.571909 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
329 | ia.py | internetarchive_openlibrary/openlibrary/core/ia.py | """Library for interacting with archive.org.
"""
import datetime
import logging
from urllib.parse import urlencode
import requests
import web
from infogami import config
from infogami.utils import stats
from openlibrary.core import cache
logger = logging.getLogger('openlibrary.ia')
# FIXME: We can't reference `config` in module scope like this; it will always be undefined!
# See lending.py for an example of how to do it correctly.
IA_BASE_URL = config.get('ia_base_url', 'https://archive.org')
VALID_READY_REPUB_STATES = ['4', '19', '20', '22']
def get_api_response(url: str, params: dict | None = None) -> dict:
"""
Makes an API GET request to archive.org, collects stats
Returns a JSON dict.
:param dict params: url parameters
"""
api_response = {}
stats.begin('archive.org', url=url)
try:
r = requests.get(url, params=params)
if r.status_code == requests.codes.ok:
api_response = r.json()
else:
logger.info(f'{r.status_code} response received from {url}')
except Exception as e:
logger.exception('Exception occurred accessing %s.' % url)
stats.end()
return api_response
def get_metadata_direct(
itemid: str, only_metadata: bool = True, cache: bool = True
) -> dict:
"""
Fetches metadata by querying the archive.org metadata API, without local caching.
:param bool cache: if false, requests uncached metadata from archive.org
:param bool only_metadata: whether to get the metadata without any processing
"""
url = f'{IA_BASE_URL}/metadata/{web.safestr(itemid.strip())}'
params = {}
if cache is False:
params['dontcache'] = 1
full_json = get_api_response(url, params)
return extract_item_metadata(full_json) if only_metadata else full_json
get_metadata = cache.memcache_memoize(
get_metadata_direct, key_prefix='ia.get_metadata', timeout=5 * cache.MINUTE_SECS
)
def extract_item_metadata(item_json):
metadata = process_metadata_dict(item_json.get('metadata', {}))
if metadata:
# if any of the files is access restricted, consider it as
# an access-restricted item.
files = item_json.get('files', [])
metadata['access-restricted'] = any(f.get('private') == 'true' for f in files)
# remember the filenames to construct download links
metadata['_filenames'] = [f['name'] for f in files]
return metadata
def process_metadata_dict(metadata):
"""Process metadata dict to make sure multi-valued fields like
collection and external-identifier are always lists.
The metadata API returns a list only if a field has more than one value
in _meta.xml. This puts burden on the application to handle both list and
non-list cases. This function makes sure the known multi-valued fields are
always lists.
"""
multivalued = {'collection', 'external-identifier', 'isbn', 'subject', 'oclc-id'}
def process_item(k, v):
if k in multivalued and not isinstance(v, list):
v = [v]
elif k not in multivalued and isinstance(v, list):
v = v[0]
return (k, v)
return dict(process_item(k, v) for k, v in metadata.items() if v)
def locate_item(itemid):
"""Returns (hostname, path) for the item."""
d = get_metadata_direct(itemid, only_metadata=False)
return d.get('server'), d.get('dir')
def edition_from_item_metadata(itemid, metadata):
"""Converts the item metadata into a form suitable to be used as edition
in Open Library.
This is used to show fake editon pages like '/books/ia:foo00bar' when
that item is not yet imported into Open Library.
"""
if ItemEdition.is_valid_item(itemid, metadata):
e = ItemEdition(itemid)
e.add_metadata(metadata)
return e
def get_cover_url(item_id):
"""Gets the URL of the archive.org item's title (or cover) page."""
base_url = f'{IA_BASE_URL}/download/{item_id}/page/'
title_response = requests.head(base_url + 'title.jpg', allow_redirects=True)
if title_response.status_code == 404:
return base_url + 'cover.jpg'
return base_url + 'title.jpg'
def get_item_manifest(item_id, item_server, item_path):
url = 'https://%s/BookReader/BookReaderJSON.php' % item_server
url += f'?itemPath={item_path}&itemId={item_id}&server={item_server}'
return get_api_response(url)
def get_item_status(itemid, metadata, **server):
item_server = server.pop('item_server', None)
item_path = server.pop('item_path', None)
return ItemEdition.get_item_status(
itemid, metadata, item_server=item_server, item_path=item_path
)
class ItemEdition(dict):
"""Class to convert item metadata into edition dict."""
def __init__(self, itemid):
dict.__init__(self)
self.itemid = itemid
timestamp = {"type": "/type/datetime", "value": "2010-01-01T00:00:00"}
self.update(
{
"key": "/books/ia:" + itemid,
"type": {"key": "/type/edition"},
"title": itemid,
"ocaid": itemid,
"revision": 1,
"created": timestamp,
"last_modified": timestamp,
}
)
@classmethod
def get_item_status(cls, itemid, metadata, item_server=None, item_path=None):
"""Returns the status of the item related to importing it in OL.
Possible return values are:
* ok
* not-texts-item
* bad-repub-state
* no-imagecount
* prefix-blacklisted
* noindex-true
* no-ol-import
"""
# Not a book, or scan not complete or no images uploaded
if metadata.get("mediatype") != "texts":
return "not-texts-item"
if metadata.get("repub_state", "4") not in VALID_READY_REPUB_STATES:
return "bad-repub-state"
if "imagecount" not in metadata:
if not (item_server and item_path):
return "no-imagecount"
else:
manifest = get_item_manifest(itemid, item_server, item_path)
if not manifest.get('numPages'):
return "no-imagecount"
# items start with these prefixes are not books
ignore_prefixes = config.get("ia_ignore_prefixes", [])
for prefix in ignore_prefixes:
# ignore all JSTOR items
if itemid.startswith(prefix):
return "prefix-blacklisted"
# Anand - Oct 2013
# If an item is with noindex=true and it is not marked as
# lending or printdisabled, ignore it. It would have been
# marked as noindex=true for some reason.
collections = metadata.get("collection", [])
if not isinstance(collections, list):
collections = [collections]
if (
metadata.get("noindex") == "true"
and "printdisabled" not in collections
and "inlibrary" not in collections
and "lendinglibrary" not in collections
):
return "noindex-true"
# Gio - April 2016
# items with metadata no_ol_import=true will be not imported
if metadata.get("no_ol_import", '').lower() == 'true':
return "no-ol-import"
return "ok"
@classmethod
def is_valid_item(cls, itemid, metadata):
"""Returns True if the item with metadata can be usable as edition
in Open Library.
Items that are not book scans, darked or with noindex=true etc. are
not eligible to be shown in Open Library.
"""
return cls.get_item_status(itemid, metadata) == 'ok'
def add_metadata(self, metadata):
self.metadata = metadata
self.add('title')
self.add('description', 'description')
self.add_list('publisher', 'publishers')
self.add_list('creator', 'author_names')
self.add('date', 'publish_date')
self.add_isbns()
def add(self, key, key2=None):
metadata = self.metadata
key2 = key2 or key
if value := metadata.get('key'):
if isinstance(value, list):
value = [v for v in value if v != {}]
if value:
if isinstance(value[0], str):
value = "\n\n".join(value)
else:
value = value[0]
else:
# empty list. Ignore.
return
self[key2] = value
def add_list(self, key, key2):
metadata = self.metadata
key2 = key2 or key
if value := metadata.get('key'):
if not isinstance(value, list):
value = [value]
self[key2] = value
def add_isbns(self):
isbn_10 = []
isbn_13 = []
if isbns := self.metadata.get('isbn'):
for isbn in isbns:
isbn = isbn.replace("-", "").strip()
if len(isbn) == 13:
isbn_13.append(isbn)
elif len(isbn) == 10:
isbn_10.append(isbn)
if isbn_10:
self["isbn_10"] = isbn_10
if isbn_13:
self["isbn_13"] = isbn_13
def get_candidates_url(
day: datetime.date,
marcs: bool = True,
) -> str:
DAY = datetime.timedelta(days=1)
hard_requirements = ' AND '.join(
[
"mediatype:texts",
f'indexdate:{day}*',
'!collection:litigationworks',
'!is_dark:true',
# Fetch back to items added before the day of interest, since items
# sometimes take a few days to process into the collection.
f'addeddate:[{day - 60 * DAY} TO {day + 1 * DAY}]',
]
)
repub_states = ' OR '.join(
f'repub_state:{state}' for state in VALID_READY_REPUB_STATES
)
soft_requirements = ' AND '.join(
[
f'({repub_states})',
'scanningcenter:*',
'scanner:*',
'scandate:*',
'format:pdf',
# TODO: format:marc seems to be getting more records than expected
*(['format:marc'] if marcs else []),
'!collection:opensource',
'!collection:additional_collections',
'!noindex:true',
]
)
exempt_collections = ' OR '.join( # noqa: FLY002
["collection:thoth-archiving-network"]
)
params = {
'q': f'({hard_requirements}) AND (({soft_requirements}) OR ({exempt_collections}))',
'fl': 'identifier,format',
'service': 'metadata__unlimited',
'rows': '100000', # This is the max, I believe
'output': 'json',
}
return f'{IA_BASE_URL}/advancedsearch.php?' + urlencode(params)
def get_candidate_ocaids(
day: datetime.date,
marcs: bool = True,
):
"""
Returns a list of identifiers that were finalized on the provided
day, which may need to be imported into Open Library.
:param day: only find items modified on this given day
:param marcs: require MARCs present?
"""
url = get_candidates_url(day, marcs=marcs)
results = requests.get(url).json()['response']['docs']
assert len(results) < 100_000, f'100,000 results returned for {day}'
for row in results:
if marcs:
# Exclude MARC Source since this doesn't contain the actual MARC data
formats = {fmt.lower() for fmt in row.get('format', [])}
if not formats & {'marc', 'marc binary'}:
continue
yield row['identifier']
| 11,615 | Python | .py | 289 | 31.598616 | 92 | 0.6041 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
330 | statsdb.py | internetarchive_openlibrary/openlibrary/core/statsdb.py | """Interface to Open Library stats database.
The stats table in the openlibrary database is of the following schema:
CREATE TABLE stats (
id serial primary key,
key text unique,
type text,
timestamp timestamp without time zone,
json text
);
see schema.py for more details.
"""
import logging
import json
import datetime
from .db import get_db
logger = logging.getLogger("openlibrary.statsdb")
def add_entry(key, data, timestamp=None):
"""Adds a new entry to the stats table.
If an entry is already present in the table, a warn message is logged
and no changes will be made to the database.
"""
jsontext = json.dumps(data)
timestamp = timestamp or datetime.datetime.utcnow()
t = timestamp.isoformat()
db = get_db()
result = db.query("SELECT * FROM stats WHERE key=$key", vars=locals())
if result:
logger.warning(
"Failed to add stats entry with key %r. An entry is already present."
)
else:
db.insert("stats", type='loan', key=key, created=t, updated=t, json=jsontext)
def get_entry(key):
result = get_db().query("SELECT * FROM stats WHERE key=$key", vars=locals())
if result:
return result[0]
def update_entry(key, data, timestamp=None):
"""Updates an already existing entry in the stats table.
If there is no entry with the given key, a new one will be added
after logging a warn message.
"""
jsontext = json.dumps(data)
timestamp = timestamp or datetime.datetime.utcnow()
t = timestamp.isoformat()
db = get_db()
result = db.query("SELECT * FROM stats WHERE key=$key", vars=locals())
if result:
db.update("stats", json=jsontext, updated=t, where="key=$key", vars=locals())
else:
logger.warning(
"stats entry with key %r doesn't exist to update. adding new entry...", key
)
db.insert("stats", type='loan', key=key, created=t, updated=t, json=jsontext)
| 1,999 | Python | .py | 53 | 32.056604 | 87 | 0.669083 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
331 | wikidata.py | internetarchive_openlibrary/openlibrary/core/wikidata.py | """
The purpose of this file is to:
1. Interact with the Wikidata API
2. Store the results
3. Make the results easy to access from other files
"""
import requests
import logging
from dataclasses import dataclass
from openlibrary.core.helpers import days_since
from datetime import datetime
import json
from openlibrary.core import db
logger = logging.getLogger("core.wikidata")
WIKIDATA_API_URL = 'https://www.wikidata.org/w/rest.php/wikibase/v0/entities/items/'
WIKIDATA_CACHE_TTL_DAYS = 30
@dataclass
class WikidataEntity:
"""
This is the model of the api response from WikiData plus the updated field
https://www.wikidata.org/wiki/Wikidata:REST_API
"""
id: str
type: str
labels: dict[str, str]
descriptions: dict[str, str]
aliases: dict[str, list[str]]
statements: dict[str, dict]
sitelinks: dict[str, dict]
_updated: datetime # This is when we fetched the data, not when the entity was changed in Wikidata
def get_description(self, language: str = 'en') -> str | None:
"""If a description isn't available in the requested language default to English"""
return self.descriptions.get(language) or self.descriptions.get('en')
@classmethod
def from_dict(cls, response: dict, updated: datetime):
return cls(
**response,
_updated=updated,
)
def to_wikidata_api_json_format(self) -> str:
"""
Transforms the dataclass a JSON string like we get from the Wikidata API.
This is used for storing the json in the database.
"""
entity_dict = {
'id': self.id,
'type': self.type,
'labels': self.labels,
'descriptions': self.descriptions,
'aliases': self.aliases,
'statements': self.statements,
'sitelinks': self.sitelinks,
}
return json.dumps(entity_dict)
def _cache_expired(entity: WikidataEntity) -> bool:
return days_since(entity._updated) > WIKIDATA_CACHE_TTL_DAYS
def get_wikidata_entity(
qid: str, bust_cache: bool = False, fetch_missing: bool = False
) -> WikidataEntity | None:
"""
This only supports QIDs, if we want to support PIDs we need to use different endpoints
By default this will only use the cache (unless it is expired).
This is to avoid overwhelming Wikidata servers with requests from every visit to an author page.
bust_cache must be set to True if you want to fetch new items from Wikidata.
# TODO: After bulk data imports we should set fetch_missing to true (or remove it).
"""
if bust_cache:
return _get_from_web(qid)
if entity := _get_from_cache(qid):
if _cache_expired(entity):
return _get_from_web(qid)
return entity
elif fetch_missing:
return _get_from_web(qid)
return None
def _get_from_web(id: str) -> WikidataEntity | None:
response = requests.get(f'{WIKIDATA_API_URL}{id}')
if response.status_code == 200:
entity = WikidataEntity.from_dict(
response=response.json(), updated=datetime.now()
)
_add_to_cache(entity)
return entity
else:
logger.error(f'Wikidata Response: {response.status_code}, id: {id}')
return None
# Responses documented here https://doc.wikimedia.org/Wikibase/master/js/rest-api/
def _get_from_cache_by_ids(ids: list[str]) -> list[WikidataEntity]:
response = list(
db.get_db().query(
'select * from wikidata where id IN ($ids)',
vars={'ids': ids},
)
)
return [
WikidataEntity.from_dict(response=r.data, updated=r.updated) for r in response
]
def _get_from_cache(id: str) -> WikidataEntity | None:
"""
The cache is OpenLibrary's Postgres instead of calling the Wikidata API
"""
if result := _get_from_cache_by_ids([id]):
return result[0]
return None
def _add_to_cache(entity: WikidataEntity) -> None:
# TODO: after we upgrade to postgres 9.5+ we should use upsert here
oldb = db.get_db()
json_data = entity.to_wikidata_api_json_format()
if _get_from_cache(entity.id):
return oldb.update(
"wikidata",
where="id=$id",
vars={'id': entity.id},
data=json_data,
updated=entity._updated,
)
else:
# We don't provide the updated column on insert because postgres defaults to the current time
return oldb.insert("wikidata", id=entity.id, data=json_data)
| 4,545 | Python | .py | 119 | 31.638655 | 103 | 0.658714 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
332 | seq.py | internetarchive_openlibrary/openlibrary/core/seq.py | """Library to generate keys for new documents using database sequences.
Currently new keys are generated for author, edition and work types.
"""
__all__ = ["get_new_key", "get_new_keys"]
def get_new_key(site, type):
"""Returns a new key for the given type of document."""
return site.new_key(type)
def get_new_keys(site, type, n):
"""Returns n new keys for given type of documents.
Example: (TODO: Not a valid doctest!)
# >>> get_new_keys(site, "/type/edition", 2)
# ["/books/OL12M", "/books/OL13M"]
"""
return [get_new_key(site, type) for i in range(n)]
| 602 | Python | .py | 14 | 38.642857 | 71 | 0.659208 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
333 | stats.py | internetarchive_openlibrary/openlibrary/core/stats.py | """
StatsD client to be used in the application to log various metrics
Based on the code in http://www.monkinetic.com/2011/02/statsd.html (pystatsd client)
"""
# statsd.py
# Steve Ivy <[email protected]>
# http://monkinetic.com
import logging
from statsd import StatsClient
from infogami import config
pystats_logger = logging.getLogger("openlibrary.pystats")
def create_stats_client(cfg=config):
"Create the client which can be used for logging statistics"
logger = logging.getLogger("pystatsd.client")
logger.addHandler(logging.StreamHandler())
try:
stats_server = cfg.get("admin", {}).get("statsd_server", None)
if stats_server:
host, port = stats_server.rsplit(":", 1)
return StatsClient(host, port)
else:
logger.critical("Couldn't find statsd_server section in config")
return False
except Exception as e:
logger.critical("Couldn't create stats client - %s", e, exc_info=True)
return False
def put(key, value, rate=1.0):
"Records this ``value`` with the given ``key``. It is stored as a millisecond count"
global client
if client:
pystats_logger.debug(f"Putting {value} as {key}")
client.timing(key, value, rate)
def increment(key, n=1, rate=1.0):
"Increments the value of ``key`` by ``n``"
global client
if client:
pystats_logger.debug("Incrementing %s" % key)
for i in range(n):
try:
client.increment(key, sample_rate=rate)
except AttributeError:
client.incr(key, rate=rate)
def gauge(key: str, value: int, rate: float = 1.0) -> None:
"""
Gauges are a constant data type. Ordinarily the rate should be 1.0.
See https://statsd.readthedocs.io/en/v3.3/types.html#gauges
"""
global client
if client:
pystats_logger.debug(f"Updating gauge {key} to {value}")
client.gauge(key, value, rate=rate)
client = create_stats_client()
| 2,007 | Python | .py | 52 | 32.230769 | 88 | 0.664256 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
334 | imports.py | internetarchive_openlibrary/openlibrary/core/imports.py | """Interface to import queue.
"""
from collections import defaultdict
from collections.abc import Iterable
from typing import TYPE_CHECKING, Any, Final
import logging
import datetime
import time
import web
import json
from psycopg2.errors import UndefinedTable, UniqueViolation
from pydantic import ValidationError
from web.db import ResultSet
from . import db
import contextlib
from openlibrary.catalog import add_book
from openlibrary.core import cache
logger = logging.getLogger("openlibrary.imports")
STAGED_SOURCES: Final = ('amazon', 'idb', 'google_books')
if TYPE_CHECKING:
from openlibrary.core.models import Edition
class Batch(web.storage):
def __init__(self, mapping, *requireds, **defaults):
"""
Initialize some statistics instance attributes yet retain web.storage's __init__ method.
"""
super().__init__(mapping, *requireds, **defaults)
self.total_submitted: int = 0
self.total_queued: int = 0
self.total_skipped: int = 0
self.items_skipped: set = set()
@staticmethod
def find(name: str, create: bool = False) -> "Batch": # type: ignore[return]
result = db.query("SELECT * FROM import_batch where name=$name", vars=locals())
if result:
return Batch(result[0])
elif create:
return Batch.new(name)
@staticmethod
def new(name: str, submitter: str | None = None) -> "Batch":
db.insert("import_batch", name=name, submitter=submitter)
return Batch.find(name=name)
def load_items(self, filename):
"""Adds all the items specified in the filename to this batch."""
items = [line.strip() for line in open(filename) if line.strip()]
self.add_items(items)
def dedupe_items(self, items):
ia_ids = [item.get('ia_id') for item in items if item.get('ia_id')]
already_present = {
row.ia_id
for row in db.query(
"SELECT ia_id FROM import_item WHERE ia_id IN $ia_ids",
vars={"ia_ids": ia_ids},
)
}
# ignore already present
logger.info(
"batch %s: %d items are already present, ignoring...",
self.name,
len(already_present),
)
# Update batch counts
self.total_submitted = len(ia_ids)
self.total_skipped = len(already_present)
self.total_queued = self.total_submitted - self.total_skipped
self.items_skipped = already_present
# Those unique items whose ia_id's aren't already present
return [item for item in items if item.get('ia_id') not in already_present]
def normalize_items(self, items):
return [
(
{'batch_id': self.id, 'ia_id': item}
if isinstance(item, str)
else {
'batch_id': self.id,
# Partner bots set ia_id to eg "partner:978..."
'ia_id': item.get('ia_id'),
'status': item.get('status', 'pending'),
'data': (
json.dumps(item.get('data'), sort_keys=True)
if item.get('data')
else None
),
'submitter': (
item.get('submitter') if item.get('submitter') else None
),
}
)
for item in items
]
def add_items(self, items: list[str] | list[dict]) -> None:
"""
:param items: either a list of `ia_id` (legacy) or a list of dicts
containing keys `ia_id` and book `data`. In the case of
the latter, `ia_id` will be of form e.g. "isbn:1234567890";
i.e. of a format id_type:value which cannot be a valid IA id.
"""
if not items:
return None
logger.info("batch %s: adding %d items", self.name, len(items))
items = self.dedupe_items(self.normalize_items(items))
if items:
try:
# TODO: Upgrade psql and use `INSERT OR IGNORE`
# otherwise it will fail on UNIQUE `data`
# https://stackoverflow.com/questions/1009584
db.get_db().multiple_insert("import_item", items)
except UniqueViolation:
for item in items:
with contextlib.suppress(UniqueViolation):
db.get_db().insert("import_item", **item)
logger.info("batch %s: added %d items", self.name, len(items))
return None
def get_items(self, status="pending"):
result = db.where("import_item", batch_id=self.id, status=status)
return [ImportItem(row) for row in result]
class ImportItem(web.storage):
@staticmethod
def find_pending(limit=1000):
if result := db.where("import_item", status="pending", order="id", limit=limit):
return map(ImportItem, result)
return None
@staticmethod
def find_staged_or_pending(
identifiers: Iterable[str], sources: Iterable[str] = STAGED_SOURCES
) -> ResultSet:
"""
Find staged or pending items in import_item matching the ia_id identifiers.
Given a list of ISBNs as identifiers, creates list of `ia_ids` and
queries the import_item table for them.
Generated `ia_ids` have the form `{source}:{identifier}` for each `source`
in `sources` and `identifier` in `identifiers`.
"""
ia_ids = [
f"{source}:{identifier}" for identifier in identifiers for source in sources
]
query = (
"SELECT * "
"FROM import_item "
"WHERE status IN ('staged', 'pending') "
"AND ia_id IN $ia_ids"
)
return db.query(query, vars={'ia_ids': ia_ids})
@staticmethod
def import_first_staged(
identifiers: list[str], sources: Iterable[str] = STAGED_SOURCES
) -> "Edition | None":
"""
Import the first staged item in import_item matching the ia_id identifiers.
This changes the status of matching ia_id identifiers to prevent a
race condition that can result in duplicate imports.
"""
ia_ids = [
f"{source}:{identifier}" for identifier in identifiers for source in sources
]
query_start_processing = (
"UPDATE import_item "
"SET status = 'processing' "
"WHERE status = 'staged' "
"AND ia_id IN $ia_ids "
"RETURNING *"
)
# TODO: Would this be better to update by the specific ID, given
# we have the IDs? If this approach works generally, it could work for
# both `staged` and `pending` by making a dictionary of the original
# `status` values, and restoring all the original values, based on `id`,
# save for the one upon which import was tested.
query_finish_processing = (
"UPDATE import_item "
"SET status = 'staged' "
"WHERE status = 'processing' "
"AND ia_id IN $ia_ids"
)
if in_process_items := db.query(
query_start_processing, vars={'ia_ids': ia_ids}
):
item: ImportItem = ImportItem(in_process_items[0])
try:
return item.single_import()
except Exception: # noqa: BLE001
return None
finally:
db.query(query_finish_processing, vars={'ia_ids': ia_ids})
return None
def single_import(self) -> "Edition | None":
"""Import the item using load(), swallow errors, update status, and return the Edition if any."""
try:
# Avoids a circular import issue.
from openlibrary.plugins.importapi.code import parse_data
edition, _ = parse_data(self.data.encode('utf-8'))
if edition:
reply = add_book.load(edition)
if reply.get('success') and 'edition' in reply:
edition = reply['edition']
self.set_status(edition['status'], ol_key=edition['key']) # type: ignore[index]
return web.ctx.site.get(edition['key']) # type: ignore[index]
else:
error_code = reply.get('error_code', 'unknown-error')
self.set_status("failed", error=error_code)
except ValidationError:
self.set_status("failed", error="invalid-value")
return None
except Exception: # noqa: BLE001
self.set_status("failed", error="unknown-error")
return None
return None
@staticmethod
def find_by_identifier(identifier):
result = db.where("import_item", ia_id=identifier)
if result:
return ImportItem(result[0])
@staticmethod
def bulk_mark_pending(
identifiers: list[str], sources: Iterable[str] = STAGED_SOURCES
):
"""
Given a list of ISBNs, creates list of `ia_ids` and queries the import_item
table the `ia_ids`.
Generated `ia_ids` have the form `{source}:{id}` for each `source` in `sources`
and `id` in `identifiers`.
"""
ia_ids = []
for id in identifiers:
ia_ids += [f'{source}:{id}' for source in sources]
query = (
"UPDATE import_item "
"SET status = 'pending' "
"WHERE status = 'staged' "
"AND ia_id IN $ia_ids"
)
db.query(query, vars={'ia_ids': ia_ids})
def set_status(self, status, error=None, ol_key=None):
id_ = self.ia_id or f"{self.batch_id}:{self.id}"
logger.info("set-status %s - %s %s %s", id_, status, error, ol_key)
d = {
"status": status,
"error": error,
"ol_key": ol_key,
"import_time": datetime.datetime.utcnow(),
}
if status != 'failed':
d = dict(**d, data=None)
db.update("import_item", where="id=$id", vars=self, **d)
self.update(d)
def mark_failed(self, error):
self.set_status(status='failed', error=error)
def mark_found(self, ol_key):
self.set_status(status='found', ol_key=ol_key)
def mark_created(self, ol_key):
self.set_status(status='created', ol_key=ol_key)
def mark_modified(self, ol_key):
self.set_status(status='modified', ol_key=ol_key)
@classmethod
def delete_items(
cls, ia_ids: list[str], batch_id: int | None = None, _test: bool = False
):
oldb = db.get_db()
data: dict[str, Any] = {
'ia_ids': ia_ids,
}
where = 'ia_id IN $ia_ids'
if batch_id:
data['batch_id'] = batch_id
where += ' AND batch_id=$batch_id'
return oldb.delete('import_item', where=where, vars=data, _test=_test)
class Stats:
"""Import Stats."""
@staticmethod
def get_imports_per_hour():
"""Returns the number imports happened in past one hour duration."""
try:
result = db.query(
"SELECT count(*) as count FROM import_item"
" WHERE import_time > CURRENT_TIMESTAMP - interval '1' hour"
)
except UndefinedTable:
logger.exception("Database table import_item may not exist on localhost")
return 0
return result[0].count
@staticmethod
def _get_count(status=None):
where = "status=$status" if status else "1=1"
try:
rows = db.select(
"import_item", what="count(*) as count", where=where, vars=locals()
)
except UndefinedTable:
logger.exception("Database table import_item may not exist on localhost")
return 0
return rows[0].count
@classmethod
def get_count(cls, status=None, use_cache=False):
return (
cache.memcache_memoize(
cls._get_count,
"imports.get_count",
timeout=5 * 60,
)
if use_cache
else cls._get_count
)(status=status)
@staticmethod
def get_count_by_status(date=None):
rows = db.query("SELECT status, count(*) FROM import_item GROUP BY status")
return {row.status: row.count for row in rows}
@staticmethod
def _get_count_by_date_status(ndays=10):
try:
result = db.query(
"SELECT added_time::date as date, status, count(*)"
" FROM import_item "
" WHERE added_time > current_date - interval '$ndays' day"
" GROUP BY 1, 2"
" ORDER BY 1 desc",
vars=locals(),
)
except UndefinedTable:
logger.exception("Database table import_item may not exist on localhost")
return []
d = defaultdict(dict)
for row in result:
d[row.date][row.status] = row.count
date_counts = sorted(d.items(), reverse=True)
return date_counts
@classmethod
def get_count_by_date_status(cls, ndays=10, use_cache=False):
if use_cache:
date_counts = cache.memcache_memoize(
cls._get_count_by_date_status,
"imports.get_count_by_date_status",
timeout=60 * 60,
)(ndays=ndays)
# Don't cache today
date_counts[0] = cache.memcache_memoize(
cls._get_count_by_date_status,
"imports.get_count_by_date_status_today",
timeout=60 * 3,
)(ndays=1)[0]
return date_counts
return cls._get_count_by_date_status(ndays=ndays)
@staticmethod
def _get_books_imported_per_day():
def date2millis(date):
return time.mktime(date.timetuple()) * 1000
try:
query = """
SELECT import_time::date as date, count(*) as count
FROM import_item WHERE status ='created'
GROUP BY 1 ORDER BY 1
"""
rows = db.query(query)
except UndefinedTable:
logger.exception("Database table import_item may not exist on localhost")
return []
return [[date2millis(row.date), row.count] for row in rows]
@classmethod
def get_books_imported_per_day(cls, use_cache=False):
return (
cache.memcache_memoize(
cls._get_books_imported_per_day,
"import_stats.get_books_imported_per_day",
timeout=60 * 60,
)
if use_cache
else cls._get_books_imported_per_day
)()
@staticmethod
def get_items(date=None, order=None, limit=None):
"""Returns all rows with given added date."""
where = "added_time::date = $date" if date else "1 = 1"
try:
return db.select(
"import_item", where=where, order=order, limit=limit, vars=locals()
)
except UndefinedTable:
logger.exception("Database table import_item may not exist on localhost")
return []
@staticmethod
def get_items_summary(date):
"""Returns all rows with given added date."""
rows = db.query(
"SELECT status, count(*) as count"
" FROM import_item"
" WHERE added_time::date = $date"
" GROUP BY status",
vars=locals(),
)
return {"counts": {row.status: row.count for row in rows}}
| 15,668 | Python | .py | 388 | 29.703608 | 105 | 0.565898 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
335 | schema.py | internetarchive_openlibrary/openlibrary/core/schema.py | """Infobase schema for Open Library
"""
from infogami.infobase import dbstore
import web
def get_schema():
schema = dbstore.Schema()
schema.add_table_group('type', '/type/type')
schema.add_table_group('type', '/type/property')
schema.add_table_group('type', '/type/backreference')
schema.add_table_group('user', '/type/user')
schema.add_table_group('user', '/type/usergroup')
schema.add_table_group('user', '/type/permission')
datatypes = ["str", "int", "ref", "boolean"]
schema.add_table_group('edition', '/type/edition', datatypes)
schema.add_table_group('author', '/type/author', datatypes)
schema.add_table_group('scan', '/type/scan_location', datatypes)
schema.add_table_group('scan', '/type/scan_record', datatypes)
schema.add_table_group('work', '/type/work', datatypes)
schema.add_table_group('publisher', '/type/publisher', datatypes)
schema.add_table_group('subject', '/type/subject', datatypes)
# schema.add_table_group('tag', '/type/tag', datatypes)
schema.add_seq('/type/edition', '/books/OL%dM')
schema.add_seq('/type/author', '/authors/OL%dA')
schema.add_seq('/type/work', '/works/OL%dW')
schema.add_seq('/type/publisher', '/publishers/OL%dP')
schema.add_seq('/type/tag', '/tags/OL%dT')
_sql = schema.sql
# custom postgres functions required by OL.
more_sql = """
CREATE OR REPLACE FUNCTION get_olid(text) RETURNS text AS $$
select regexp_replace($1, '.*(OL[0-9]+[A-Z])', E'\\1') where $1 ~ '^/.*/OL[0-9]+[A-Z]$';
$$ LANGUAGE SQL IMMUTABLE;
CREATE INDEX thing_olid_idx ON thing(get_olid(key));
CREATE TABLE stats (
id serial primary key,
key text unique,
type text,
created timestamp without time zone,
updated timestamp without time zone,
json text
);
CREATE INDEX stats_type_idx ON stats(type);
CREATE INDEX stats_created_idx ON stats(created);
CREATE INDEX stats_updated_idx ON stats(updated);
CREATE TABLE waitingloan (
id serial primary key,
book_key text,
user_key text,
status text default 'waiting',
position integer,
wl_size integer,
since timestamp without time zone default (current_timestamp at time zone 'utc'),
last_update timestamp without time zone default (current_timestamp at time zone 'utc'),
expiry timestamp without time zone,
available_email_sent boolean default 'f',
UNIQUE (book_key, user_key)
);
CREATE INDEX waitingloan_user_key_idx ON waitingloan(user_key);
CREATE INDEX waitingloan_status_idx ON waitingloan(status);
CREATE TABLE import_batch (
id serial primary key,
name text,
submitter text,
submit_time timestamp without time zone default (current_timestamp at time zone 'utc')
);
CREATE INDEX import_batch_name ON import_batch(name);
CREATE INDEX import_batch_submitter_idx ON import_batch(submitter);
CREATE INDEX import_batch_submit_time_idx ON import_batch(submit_time);
CREATE TABLE import_item (
id serial primary key,
batch_id integer references import_batch,
added_time timestamp without time zone default (current_timestamp at time zone 'utc'),
import_time timestamp without time zone,
status text default 'pending',
error text,
ia_id text,
data text,
ol_key text,
comments text,
submitter text,
UNIQUE (batch_id, ia_id)
);
CREATE INDEX import_item_batch_id ON import_item(batch_id);
CREATE INDEX import_item_import_time ON import_item(import_time);
CREATE INDEX import_item_status ON import_item(status);
CREATE INDEX import_item_ia_id ON import_item(ia_id);
"""
# monkey patch schema.sql to include the custom functions
schema.sql = lambda: web.safestr(_sql()) + more_sql
return schema
def register_schema():
"""Register the schema defined in this module as the default schema."""
dbstore.default_schema = get_schema()
if __name__ == "__main__":
print(get_schema().sql())
| 4,142 | Python | .py | 95 | 37.221053 | 96 | 0.670974 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
336 | edits.py | internetarchive_openlibrary/openlibrary/core/edits.py | import datetime
import json
import web
from sqlite3 import IntegrityError
from psycopg2.errors import UniqueViolation
from infogami.utils.view import public
from openlibrary.i18n import gettext as _
from openlibrary.core import cache
from openlibrary.utils import dateutil
from . import db
@public
def get_status_for_view(status_code: int) -> str:
"""Returns localized status string that corresponds with the given status code."""
if status_code == CommunityEditsQueue.STATUS['DECLINED']:
return _('Declined')
if status_code == CommunityEditsQueue.STATUS['PENDING']:
return _('Pending')
if status_code == CommunityEditsQueue.STATUS['MERGED']:
return _('Merged')
return _('Unknown')
class CommunityEditsQueue:
"""Schema
id: Primary identifier
submitter: username of person that made the request
reviewer: The username of the person who reviewed the request
url: URL of the merge request
status: Either "Pending", "Merged", or "Declined"
comment: Short note from reviewer (json blobs (can store timestamps, etc))
created: created timestamp
updated: update timestamp
"""
TABLENAME = 'community_edits_queue'
TYPE = {
'WORK_MERGE': 1,
'AUTHOR_MERGE': 2,
}
STATUS = {
'DECLINED': 0,
'PENDING': 1,
'MERGED': 2,
}
MODES = {
'all': [STATUS['DECLINED'], STATUS['PENDING'], STATUS['MERGED']],
'open': [STATUS['PENDING']],
'closed': [STATUS['DECLINED'], STATUS['MERGED']],
}
@classmethod
def get_requests(
cls,
limit: int = 50,
page: int = 1,
mode: str = 'all',
order: str | None = None,
**kwargs,
):
oldb = db.get_db()
query_kwargs = {
"limit": limit,
"offset": limit * (page - 1),
"vars": {**kwargs},
}
query_kwargs['where'] = cls.where_clause(mode, **kwargs)
if order:
query_kwargs['order'] = order
return oldb.select(cls.TABLENAME, **query_kwargs)
@classmethod
def get_counts_by_mode(cls, mode='all', **kwargs):
oldb = db.get_db()
query = f'SELECT count(*) from {cls.TABLENAME}'
if where_clause := cls.where_clause(mode, **kwargs):
query = f'{query} WHERE {where_clause}'
return oldb.query(query, vars=kwargs)[0]['count']
@classmethod
def get_submitters(cls):
oldb = db.get_db()
query = f'SELECT DISTINCT submitter FROM {cls.TABLENAME}'
return list(oldb.query(query))
@classmethod
def get_reviewers(cls):
oldb = db.get_db()
query = (
f'SELECT DISTINCT reviewer FROM {cls.TABLENAME} WHERE reviewer IS NOT NULL'
)
return list(oldb.query(query))
@classmethod
def where_clause(cls, mode, **kwargs):
wheres = []
if kwargs.get('reviewer') is not None:
wheres.append(
# if reviewer="" then get all unassigned MRs
"reviewer IS NULL"
if not kwargs.get('reviewer')
else "reviewer=$reviewer"
)
if "submitter" in kwargs:
wheres.append(
# If submitter not specified, default to any
"submitter IS NOT NULL"
if kwargs.get("submitter") is None
else "submitter=$submitter"
)
# If status not specified, don't include it
if 'status' in kwargs and kwargs.get('status'):
wheres.append('status=$status')
if "url" in kwargs:
wheres.append("url=$url")
if "id" in kwargs:
wheres.append("id=$id")
status_list = (
[f'status={status}' for status in cls.MODES[mode]] if mode != 'all' else []
)
where_clause = ''
if wheres:
where_clause = f'{" AND ".join(wheres)}'
if status_list:
status_query = f'({" OR ".join(status_list)})'
if where_clause:
where_clause = f'{where_clause} AND {status_query}'
else:
where_clause = status_query
return where_clause
@classmethod
def update_submitter_name(cls, submitter: str, new_username: str, _test=False):
oldb = db.get_db()
t = oldb.transaction()
try:
rows_changed = oldb.update(
cls.TABLENAME,
where="submitter=$submitter",
submitter=new_username,
vars={"submitter": submitter},
)
except (UniqueViolation, IntegrityError):
rows_changed = 0
t.rollback() if _test else t.commit()
return rows_changed
@classmethod
def submit_delete_request(cls, olid, submitter, comment=None):
if not comment:
# some default note from submitter
pass
url = f"{olid}/-/edit?m=delete"
cls.submit_request(cls, url, submitter=submitter, comment=comment)
@classmethod
def submit_request(
cls,
url: str,
submitter: str,
reviewer: str | None = None,
status: int = STATUS['PENDING'],
comment: str | None = None,
title: str | None = None,
mr_type: int | None = None,
):
"""
Inserts a new record into the table.
Preconditions: All data validations should be completed before calling this method.
"""
oldb = db.get_db()
comments = [cls.create_comment(submitter, comment)] if comment else []
json_comment = json.dumps({"comments": comments})
return oldb.insert(
cls.TABLENAME,
submitter=submitter,
reviewer=reviewer,
url=url,
status=status,
comments=json_comment,
title=title,
mr_type=mr_type,
)
@classmethod
def assign_request(cls, rid: int, reviewer: str | None) -> dict[str, str | None]:
"""Changes assignees to the request with the given ID.
This method only modifies requests that are not closed.
If the given reviewer is the same as the request's reviewer, nothing is
modified
"""
request = cls.find_by_id(rid)
if request['status'] not in cls.MODES['closed']:
if request['reviewer'] == reviewer:
return {
'status': 'error',
'error': f'{reviewer} is already assigned to this request',
}
oldb = db.get_db()
oldb.update(
cls.TABLENAME,
where="id=$rid",
reviewer=reviewer,
status=cls.STATUS['PENDING'],
updated=datetime.datetime.utcnow(),
vars={"rid": rid},
)
return {
'reviewer': reviewer,
'newStatus': get_status_for_view(cls.STATUS['PENDING']),
}
return {'status': 'error', 'error': 'This request has already been closed'}
@classmethod
def unassign_request(cls, rid: int):
"""
Changes status of given request to "Pending", and sets reviewer to None.
"""
oldb = db.get_db()
oldb.update(
cls.TABLENAME,
where="id=$rid",
status=cls.STATUS['PENDING'],
reviewer=None,
updated=datetime.datetime.utcnow(),
vars={"rid": rid},
)
@classmethod
def update_request_status(
cls, rid: int, status: int, reviewer: str, comment: str | None = None
) -> int:
"""
Changes the status of the request with the given rid.
If a comment is included, existing comments list for this request are fetched and
the new comment is appended.
"""
oldb = db.get_db()
update_kwargs = {}
# XXX Trim whitespace from comment first
if comment:
comments = cls.get_comments(rid)
comments['comments'].append(cls.create_comment(reviewer, comment))
update_kwargs['comments'] = json.dumps(comments)
return oldb.update(
cls.TABLENAME,
where="id=$rid",
status=status,
reviewer=reviewer,
updated=datetime.datetime.utcnow(),
vars={"rid": rid},
**update_kwargs,
)
@classmethod
def comment_request(cls, rid: int, username: str, comment: str) -> int:
oldb = db.get_db()
comments = cls.get_comments(rid)
comments['comments'].append(cls.create_comment(username, comment))
return oldb.update(
cls.TABLENAME,
where="id=$rid",
comments=json.dumps(comments),
updated=datetime.datetime.utcnow(),
vars={"rid": rid},
)
@classmethod
def find_by_id(cls, rid: int):
"""Returns the record with the given ID."""
return cls.get_requests(id=rid)[0] or None
@classmethod
def exists(cls, url: str) -> bool:
"""Returns True if a request with the given URL exists in the table."""
return len(cls.get_requests(limit=1, url=url)) > 0
@classmethod
def get_comments(cls, rid: int):
"""Fetches the comments for the given request, or an empty comments object."""
return cls.get_requests(id=rid)[0]['comments'] or {'comments': []}
@classmethod
def create_comment(cls, username: str, message: str) -> dict[str, str]:
"""Creates and returns a new comment with the given name and message.
Timestamp set as current time.
"""
return {
# isoformat to avoid to-json issues
"timestamp": datetime.datetime.utcnow().isoformat(),
"username": username,
"message": message,
# XXX It may be easier to update these comments if they had IDs
}
@public
def cached_get_counts_by_mode(mode='all', reviewer='', **kwargs):
return cache.memcache_memoize(
CommunityEditsQueue.get_counts_by_mode,
f"librarian_queue_counts_{mode}",
timeout=dateutil.MINUTE_SECS,
)(mode=mode, reviewer=reviewer, **kwargs)
| 10,286 | Python | .py | 281 | 27.014235 | 91 | 0.574415 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
337 | bookshelves.py | internetarchive_openlibrary/openlibrary/core/bookshelves.py | import logging
import web
from dataclasses import dataclass
from datetime import date, datetime
from typing import Literal, cast, Any, Final, TypedDict
from collections.abc import Iterable
from openlibrary.plugins.worksearch.search import get_solr
from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
from infogami.infobase.utils import flatten
from . import db
logger = logging.getLogger(__name__)
FILTER_BOOK_LIMIT: Final = 30_000
class WorkReadingLogSummary(TypedDict):
want_to_read: int
currently_reading: int
already_read: int
class Bookshelves(db.CommonExtras):
TABLENAME = "bookshelves_books"
PRIMARY_KEY = ["username", "work_id", "bookshelf_id"]
PRESET_BOOKSHELVES = {'Want to Read': 1, 'Currently Reading': 2, 'Already Read': 3}
ALLOW_DELETE_ON_CONFLICT = True
PRESET_BOOKSHELVES_JSON = {
'want_to_read': 1,
'currently_reading': 2,
'already_read': 3,
}
@classmethod
def summary(cls):
return {
'total_books_logged': {
'total': Bookshelves.total_books_logged(),
'month': Bookshelves.total_books_logged(since=DATE_ONE_MONTH_AGO),
'week': Bookshelves.total_books_logged(since=DATE_ONE_WEEK_AGO),
},
'total_users_logged': {
'total': Bookshelves.total_unique_users(),
'month': Bookshelves.total_unique_users(since=DATE_ONE_MONTH_AGO),
'week': Bookshelves.total_unique_users(since=DATE_ONE_WEEK_AGO),
},
}
@classmethod
def total_books_logged(
cls, shelf_ids: list[str] | None = None, since: date | None = None
) -> int:
"""Returns (int) number of books logged across all Reading Log shelves (e.g. those
specified in PRESET_BOOKSHELVES). One may alternatively specify a
`list` of `shelf_ids` to isolate or span multiple
shelves. `since` may be used to limit the result to those
books logged since a specific date. Any python datetime.date
type should work.
:param shelf_ids: one or more bookshelf_id values, see also the default values
specified in PRESET_BOOKSHELVES
:param since: returns all logged books after date
"""
oldb = db.get_db()
query = "SELECT count(*) from bookshelves_books"
if shelf_ids:
query += " WHERE bookshelf_id IN ($shelf_ids)"
if since:
query += " AND created >= $since"
elif since:
query += " WHERE created >= $since"
results = cast(
tuple[int],
oldb.query(query, vars={'since': since, 'shelf_ids': shelf_ids}),
)
return results[0]
@classmethod
def total_unique_users(cls, since: date | None = None) -> int:
"""Returns the total number of unique users who have logged a
book. `since` may be provided to only return the number of users after
a certain datetime.date.
"""
oldb = db.get_db()
query = "select count(DISTINCT username) from bookshelves_books"
if since:
query += " WHERE created >= $since"
results = cast(tuple[int], oldb.query(query, vars={'since': since}))
return results[0]
@classmethod
def patrons_who_also_read(cls, work_id: str, limit: int = 15):
oldb = db.get_db()
query = "select DISTINCT username from bookshelves_books where work_id=$work_id AND bookshelf_id=3 limit $limit"
results = oldb.query(query, vars={'work_id': work_id, 'limit': limit})
# get all patrons with public reading logs
return [
p
for p in web.ctx.site.get_many(
[f'/people/{r.username}/preferences' for r in results]
)
if p.dict().get('notifications', {}).get('public_readlog') == 'yes'
]
@classmethod
def most_logged_books(
cls,
shelf_id: str = '',
limit: int = 10,
since: date | None = None,
page: int = 1,
fetch: bool = False,
sort_by_count: bool = True,
minimum: int = 0,
) -> list:
"""Returns a ranked list of work OLIDs (in the form of an integer --
i.e. OL123W would be 123) which have been most logged by
users. This query is limited to a specific shelf_id (e.g. 1
for "Want to Read").
"""
page = int(page or 1)
offset = (page - 1) * limit
oldb = db.get_db()
where = 'WHERE bookshelf_id' + ('=$shelf_id' if shelf_id else ' IS NOT NULL ')
if since:
where += ' AND created >= $since'
group_by = 'group by work_id'
if minimum:
group_by += " HAVING COUNT(*) > $minimum"
order_by = 'order by cnt desc' if sort_by_count else ''
query = f"""
select work_id, count(*) as cnt
from bookshelves_books
{where} {group_by} {order_by}
limit $limit offset $offset"""
logger.info("Query: %s", query)
data = {
'shelf_id': shelf_id,
'limit': limit,
'offset': offset,
'since': since,
'minimum': minimum,
}
logged_books = list(oldb.query(query, vars=data))
return cls.fetch(logged_books) if fetch else logged_books
@classmethod
def fetch(cls, readinglog_items):
"""Given a list of readinglog_items, such as those returned by
Bookshelves.most_logged_books, fetch the corresponding Open Library
book records from solr with availability
"""
from openlibrary.plugins.worksearch.code import get_solr_works
from openlibrary.core.lending import get_availabilities
# This gives us a dict of all the works representing
# the logged_books, keyed by work_id
work_index = get_solr_works(
f"/works/OL{i['work_id']}W" for i in readinglog_items
)
# Loop over each work in the index and inject its availability
availability_index = get_availabilities(work_index.values())
for work_key in availability_index:
work_index[work_key]['availability'] = availability_index[work_key]
# Return items from the work_index in the order
# they are represented by the trending logged books
for i, item in enumerate(readinglog_items):
key = f"/works/OL{item['work_id']}W"
if key in work_index:
readinglog_items[i]['work'] = work_index[key]
return readinglog_items
@classmethod
def count_total_books_logged_by_user(
cls, username: str, bookshelf_ids: list[str] | None = None
) -> int:
"""Counts the (int) total number of books logged by this `username`,
with the option of limiting the count to specific bookshelves
by `bookshelf_id`
"""
return sum(
cls.count_total_books_logged_by_user_per_shelf(
username, bookshelf_ids=bookshelf_ids
).values()
)
@classmethod
def count_user_books_on_shelf(
cls,
username: str,
bookshelf_id: int,
) -> int:
result = db.get_db().query(
"""
SELECT count(*) from bookshelves_books
WHERE bookshelf_id=$bookshelf_id AND username=$username
""",
vars={
'bookshelf_id': bookshelf_id,
'username': username,
},
)
return result[0].count if result else 0
@classmethod
def count_total_books_logged_by_user_per_shelf(
cls, username: str, bookshelf_ids: list[str] | None = None
) -> dict[int, int]:
"""Returns a dict mapping the specified user's bookshelves_ids to the
number of number of books logged per each shelf, i.e. {bookshelf_id:
count}. By default, we limit bookshelf_ids to those in PRESET_BOOKSHELVES
TODO: add `since` to fetch books logged after a certain
date. Useful for following/subscribing-to users and being
notified of books they log. Also add to
count_total_books_logged_by_user
"""
oldb = db.get_db()
data = {'username': username}
_bookshelf_ids = ','.join(
[str(x) for x in bookshelf_ids or cls.PRESET_BOOKSHELVES.values()]
)
query = (
"SELECT bookshelf_id, count(*) from bookshelves_books WHERE "
"bookshelf_id=ANY('{" + _bookshelf_ids + "}'::int[]) "
"AND username=$username GROUP BY bookshelf_id"
)
result = oldb.query(query, vars=data)
return {i['bookshelf_id']: i['count'] for i in result} if result else {}
# Iterates through a list of solr docs, and for all items with a 'logged edition'
# it will remove an item with the matching edition key from the list, and add it to
# doc["editions"]["docs"]
def link_editions_to_works(solr_docs):
"""
:param solr_docs: Solr work/edition docs, augmented with reading log data
"""
linked_docs: list[web.storage] = []
editions_to_work_doc = {}
# adds works to linked_docs, recording their edition key and index in docs_dict if present.
for doc in solr_docs:
if doc["key"].startswith("/works"):
linked_docs.append(doc)
if doc.get("logged_edition"):
editions_to_work_doc.update({doc["logged_edition"]: doc})
# Attaches editions to the works, in second loop-- in case of misperformed order.
for edition in solr_docs:
if edition["key"].startswith("/books/"):
if work_doc := editions_to_work_doc.get(edition["key"]):
work_doc.editions = [edition]
else:
# raise error no matching work found
logger.error("Error: No work found for edition %s" % edition["key"])
return linked_docs
@classmethod
def add_storage_items_for_redirects(
cls, reading_log_keys, solr_docs: list[web.Storage]
) -> list[web.storage]:
"""
Use reading_log_keys to fill in missing redirected items in the
the solr_docs query results.
Solr won't return matches for work keys that have been redirected. Because
we use Solr to build the lists of storage items that ultimately gets passed
to the templates, redirected items returned from the reading log DB will
'disappear' when not returned by Solr. This remedies that by filling in
dummy works, albeit with the correct work_id.
"""
from openlibrary.plugins.worksearch.code import run_solr_query
from openlibrary.plugins.worksearch.schemes.works import WorkSearchScheme
fetched_keys = {doc["key"] for doc in solr_docs}
missing_keys = {work for (work, _) in reading_log_keys} - fetched_keys
"""
Provides a proper 1-to-1 connection between work keys and edition keys; needed, in order to fill in the appropriate 'logged_edition' data
for the correctly pulled work later on, as well as to correctly update the post-redirect version back to the pre_redirect key.
Without this step, processes may error due to the 'post-redirect key' not actually existing within a user's reading log.
"""
work_to_edition_keys = {
work: edition for (work, edition) in reading_log_keys if edition
}
edition_to_work_keys = {
edition: work for (work, edition) in reading_log_keys if edition
}
# Here, we add in dummied works for situations in which there is no edition key present, yet the work key accesses a redirect.
# Ideally, this will be rare, as there's no way to access the relevant information through Solr.
for key in missing_keys.copy():
if not work_to_edition_keys.get(key):
missing_keys.remove(key)
solr_docs.append(web.storage({"key": key}))
edition_keys_to_query = [
work_to_edition_keys[key].split("/")[2] for key in missing_keys
]
fq = f'edition_key:({" OR ".join(edition_keys_to_query)})'
if not edition_keys_to_query:
return solr_docs
solr_resp = run_solr_query(
scheme=WorkSearchScheme(),
param={'q': '*:*'},
rows=len(edition_keys_to_query),
fields=list(
WorkSearchScheme.default_fetched_fields
| {'subject', 'person', 'place', 'time', 'edition_key'}
),
facet=False,
extra_params=[("fq", fq)],
)
"""
Now, we add the correct 'logged_edition' information to each document retrieved by the query, and substitute the work_key in
each doc for the original one.
"""
for doc in solr_resp.docs:
for edition_key in doc["edition_key"]:
if pre_redirect_key := edition_to_work_keys.get(
'/books/%s' % edition_key
):
doc["key"] = pre_redirect_key
doc["logged_edition"] = work_to_edition_keys.get(pre_redirect_key)
solr_docs.append(web.storage(doc))
break
return solr_docs
@classmethod
def get_users_logged_books(
cls,
username: str,
bookshelf_id: int = 0,
limit: int = 100,
page: int = 1, # Not zero-based counting!
sort: Literal['created asc', 'created desc'] = 'created desc',
checkin_year: int | None = None,
q: str = "",
) -> Any: # Circular imports prevent type hinting LoggedBooksData
"""
Returns LoggedBooksData containing Reading Log database records for books that
the user has logged. Also allows filtering/searching the reading log shelves,
and sorting reading log shelves (when not filtering).
The returned records ultimately come from Solr so that, as much as possible,
these query results may be used by anything relying on logged book data.
:param username: who logged this book
:param bookshelf_id: the ID of the bookshelf, see: PRESET_BOOKSHELVES.
If bookshelf_id is None, return books from all bookshelves.
:param q: an optional query string to filter the results.
"""
from openlibrary.core.models import LoggedBooksData
from openlibrary.plugins.worksearch.code import run_solr_query
from openlibrary.plugins.worksearch.schemes.works import WorkSearchScheme
# Sets the function to fetch editions as well, if not accessing the Want to Read shelf.
show_editions: bool = bookshelf_id != 1
shelf_totals = cls.count_total_books_logged_by_user_per_shelf(username)
oldb = db.get_db()
page = int(page or 1)
query_params: dict[str, str | int | None] = {
'username': username,
'limit': limit,
'offset': limit * (page - 1),
'bookshelf_id': bookshelf_id,
'checkin_year': checkin_year,
}
@dataclass
class ReadingLogItem:
"""Holds the datetime a book was logged and the edition ID."""
logged_date: datetime
edition_id: str
def add_reading_log_data(
reading_log_books: list[web.storage], solr_docs: list[web.storage]
):
"""
Adds data from ReadingLogItem to the Solr responses so they have the logged
date and edition ID.
"""
# Create a mapping of work keys to ReadingLogItem from the reading log DB.
reading_log_store: dict[str, ReadingLogItem] = {
f"/works/OL{book.work_id}W": ReadingLogItem(
logged_date=book.created,
edition_id=(
f"/books/OL{book.edition_id}M"
if book.edition_id is not None
else ""
),
)
for book in reading_log_books
}
# Insert {logged_edition} if present and {logged_date} into the Solr work.
# These dates are not used for sort-by-added-date. The DB handles that.
# Currently only used in JSON requests.
for doc in solr_docs:
if reading_log_record := reading_log_store.get(doc.key):
doc.logged_date = reading_log_record.logged_date
doc.logged_edition = reading_log_record.edition_id
return solr_docs
def get_filtered_reading_log_books(
q: str, query_params: dict[str, str | int | None], filter_book_limit: int
) -> LoggedBooksData:
"""
Filter reading log books based an a query and return LoggedBooksData.
This does not work with sorting.
The reading log DB alone has access to who logged which book to their
reading log, so we need to get work IDs and logged info from there, query
Solr for more complete book information, and then put the logged info into
the Solr response.
"""
# Filtering by query needs a larger limit as we need (ideally) all of a
# user's added works from the reading log DB. The logged work IDs are used
# to query Solr, which searches for matches related to those work IDs.
query_params["limit"] = filter_book_limit
query = (
"SELECT work_id, created, edition_id from bookshelves_books WHERE "
"bookshelf_id=$bookshelf_id AND username=$username "
"LIMIT $limit"
)
reading_log_books: list[web.storage] = list(
oldb.query(query, vars=query_params)
)
assert len(reading_log_books) <= filter_book_limit
work_to_edition_keys = {
'/works/OL%sW' % i['work_id']: '/books/OL%sM' % i['edition_id']
for i in reading_log_books
}
# Separating out the filter query from the call allows us to cleanly edit it, if editions are required.
filter_query = 'key:(%s)' % " OR ".join(
'"%s"' % key for key in work_to_edition_keys
)
solr_resp = run_solr_query(
scheme=WorkSearchScheme(),
param={'q': q},
offset=query_params["offset"],
rows=limit,
facet=False,
# Putting these in fq allows them to avoid user-query processing, which
# can be (surprisingly) slow if we have ~20k OR clauses.
extra_params=[('fq', filter_query)],
)
total_results = solr_resp.num_found
solr_docs = solr_resp.docs
if show_editions:
edition_data = get_solr().get_many(
[work_to_edition_keys[work["key"]] for work in solr_resp.docs],
fields=WorkSearchScheme.default_fetched_fields
| {'subject', 'person', 'place', 'time', 'edition_key'},
)
solr_docs.extend(edition_data)
# Downstream many things expect a list of web.storage docs.
solr_docs = [web.storage(doc) for doc in solr_resp.docs]
solr_docs = add_reading_log_data(reading_log_books, solr_docs)
# This function is only necessary if edition data was fetched.
if show_editions:
solr_docs = cls.link_editions_to_works(solr_docs)
return LoggedBooksData(
username=username,
q=q,
page_size=limit,
total_results=total_results,
shelf_totals=shelf_totals,
docs=solr_docs,
)
def get_sorted_reading_log_books(
query_params: dict[str, str | int | None],
sort: Literal['created asc', 'created desc'],
checkin_year: int | None,
):
"""
Get a page of sorted books from the reading log. This does not work with
filtering/searching the reading log.
The reading log DB alone has access to who logged which book to their
reading log, so we need to get work IDs and logged info from there, query
Solr for more complete book information, and then put the logged info into
the Solr response.
"""
if checkin_year:
query = """
SELECT b.work_id, b.created, b.edition_id
FROM bookshelves_books b
INNER JOIN bookshelves_events e
ON b.work_id = e.work_id AND b.username = e.username
WHERE b.username = $username
AND e.event_date LIKE $checkin_year || '%'
ORDER BY b.created DESC
"""
else:
query = (
"SELECT work_id, created, edition_id from bookshelves_books WHERE "
"bookshelf_id=$bookshelf_id AND username=$username "
f"ORDER BY created {'DESC' if sort == 'created desc' else 'ASC'} "
"LIMIT $limit OFFSET $offset"
)
if not bookshelf_id:
query = "SELECT * from bookshelves_books WHERE username=$username"
# XXX Removing limit, offset, etc from data looks like a bug
# unrelated / not fixing in this PR.
query_params = {'username': username}
reading_log_books: list[web.storage] = list(
oldb.query(query, vars=query_params)
)
reading_log_keys = [
(
['/works/OL%sW' % i['work_id'], '/books/OL%sM' % i['edition_id']]
if show_editions and i['edition_id']
else ['/works/OL%sW' % i['work_id'], ""]
)
for i in reading_log_books
]
solr_docs = get_solr().get_many(
[key for key in flatten(reading_log_keys) if key],
fields=WorkSearchScheme.default_fetched_fields
| {'subject', 'person', 'place', 'time', 'edition_key'},
)
solr_docs = cls.add_storage_items_for_redirects(reading_log_keys, solr_docs)
total_results = shelf_totals.get(bookshelf_id, 0)
solr_docs = add_reading_log_data(reading_log_books, solr_docs)
# Attaches returned editions to works.
if show_editions:
solr_docs = cls.link_editions_to_works(solr_docs)
assert len(solr_docs) == len(reading_log_keys), (
"solr_docs is missing an item/items from reading_log_keys; "
"see add_storage_items_for_redirects()"
)
return LoggedBooksData(
username=username,
q=q,
page_size=limit,
total_results=total_results,
shelf_totals=shelf_totals,
docs=solr_docs,
)
if q:
# checkin_year ignored :(
return get_filtered_reading_log_books(
q=q, query_params=query_params, filter_book_limit=FILTER_BOOK_LIMIT
)
else:
return get_sorted_reading_log_books(
query_params=query_params, sort=sort, checkin_year=checkin_year
)
@classmethod
def iterate_users_logged_books(cls, username: str) -> Iterable[dict]:
"""
Heavy users will have long lists of books which consume lots of memory and
cause performance issues. So, instead of creating a big list, let's repeatedly
get small lists like get_users_logged_books() and yield one book at a time.
"""
if not username or not isinstance(username, str):
raise ValueError(f"username must be a string, not {username}.")
oldb = db.get_db()
block = 0
LIMIT = 100 # Is there an ideal block size?!?
def get_a_block_of_books() -> list:
data = {"username": username, "limit": LIMIT, "offset": LIMIT * block}
query = (
"SELECT * from bookshelves_books WHERE username=$username "
"ORDER BY created DESC LIMIT $limit OFFSET $offset"
)
return list(oldb.query(query, vars=data))
while books := get_a_block_of_books():
block += 1
yield from books
@classmethod
def get_recently_logged_books(
cls,
bookshelf_id: str | None = None,
limit: int = 50,
page: int = 1,
fetch: bool = False,
) -> list:
oldb = db.get_db()
page = int(page or 1)
data = {
'bookshelf_id': bookshelf_id,
'limit': limit,
'offset': limit * (page - 1),
}
where = "WHERE bookshelf_id=$bookshelf_id " if bookshelf_id else ""
query = (
f"SELECT * from bookshelves_books {where} "
"ORDER BY created DESC LIMIT $limit OFFSET $offset"
)
logged_books = list(oldb.query(query, vars=data))
return cls.fetch(logged_books) if fetch else logged_books
@classmethod
def get_users_read_status_of_work(cls, username: str, work_id: str) -> int | None:
"""A user can mark a book as (1) want to read, (2) currently reading,
or (3) already read. Each of these states is mutually
exclusive. Returns the user's read state of this work, if one
exists.
"""
oldb = db.get_db()
data = {'username': username, 'work_id': int(work_id)}
bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])
query = (
"SELECT bookshelf_id from bookshelves_books WHERE "
"bookshelf_id=ANY('{" + bookshelf_ids + "}'::int[]) "
"AND username=$username AND work_id=$work_id"
)
result = list(oldb.query(query, vars=data))
return result[0].bookshelf_id if result else None
@classmethod
def get_users_read_status_of_works(cls, username: str, work_ids: list[str]) -> list:
oldb = db.get_db()
data = {
'username': username,
'work_ids': work_ids,
}
query = (
"SELECT work_id, bookshelf_id from bookshelves_books WHERE "
"username=$username AND "
"work_id IN $work_ids"
)
return list(oldb.query(query, vars=data))
@classmethod
def add(
cls, username: str, bookshelf_id: str, work_id: str, edition_id=None
) -> None:
"""Adds a book with `work_id` to user's bookshelf designated by
`bookshelf_id`"""
oldb = db.get_db()
work_id = int(work_id) # type: ignore
bookshelf_id = int(bookshelf_id) # type: ignore
data = {
'work_id': work_id,
'username': username,
}
users_status = cls.get_users_read_status_of_work(username, work_id)
if not users_status:
return oldb.insert(
cls.TABLENAME,
username=username,
bookshelf_id=bookshelf_id,
work_id=work_id,
edition_id=edition_id,
)
else:
where = "work_id=$work_id AND username=$username"
return oldb.update(
cls.TABLENAME,
where=where,
bookshelf_id=bookshelf_id,
edition_id=edition_id,
vars=data,
)
@classmethod
def remove(cls, username: str, work_id: str, bookshelf_id: str | None = None):
oldb = db.get_db()
where = {'username': username, 'work_id': int(work_id)}
if bookshelf_id:
where['bookshelf_id'] = int(bookshelf_id)
try:
return oldb.delete(
cls.TABLENAME,
where=('work_id=$work_id AND username=$username'),
vars=where,
)
except Exception: # we want to catch no entry exists
return None
@classmethod
def get_works_shelves(cls, work_id: str, lazy: bool = False):
"""Bookshelves this work is on"""
oldb = db.get_db()
query = f"SELECT * from {cls.TABLENAME} where work_id=$work_id"
try:
result = oldb.query(query, vars={'work_id': work_id})
return result if lazy else list(result)
except Exception:
return None
@classmethod
def get_num_users_by_bookshelf_by_work_id(cls, work_id: str) -> dict[int, int]:
"""Returns a dict mapping a work_id to the
number of number of users who have placed that work_id in each shelf,
i.e. {bookshelf_id: count}.
"""
oldb = db.get_db()
query = (
"SELECT bookshelf_id, count(DISTINCT username) as user_count "
"from bookshelves_books where"
" work_id=$work_id"
" GROUP BY bookshelf_id"
)
result = oldb.query(query, vars={'work_id': int(work_id)})
return {i['bookshelf_id']: i['user_count'] for i in result} if result else {}
@classmethod
def get_work_summary(cls, work_id: str) -> WorkReadingLogSummary:
shelf_id_to_count = Bookshelves.get_num_users_by_bookshelf_by_work_id(work_id)
result = {}
# Make sure all the fields are present
for shelf_name, shelf_id in Bookshelves.PRESET_BOOKSHELVES_JSON.items():
result[shelf_name] = shelf_id_to_count.get(shelf_id, 0)
return cast(WorkReadingLogSummary, result)
@classmethod
def user_with_most_books(cls) -> list:
"""
Which super patrons have the most books logged?
SELECT username, count(*) AS counted from bookshelves_books
WHERE bookshelf_id=ANY('{1,3,2}'::int[]) GROUP BY username
ORDER BY counted DESC, username LIMIT 10
"""
oldb = db.get_db()
_bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])
query = (
"SELECT username, count(*) AS counted "
"FROM bookshelves_books WHERE "
"bookshelf_id=ANY('{" + _bookshelf_ids + "}'::int[]) "
"GROUP BY username "
"ORDER BY counted DESC, username LIMIT 100"
)
result = oldb.query(query)
return list(result)
| 30,768 | Python | .py | 683 | 33.68082 | 145 | 0.577828 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
338 | lending.py | internetarchive_openlibrary/openlibrary/core/lending.py | """Module for providing core functionality of lending on Open Library.
"""
from typing import TYPE_CHECKING, Literal, TypedDict, cast
import web
import datetime
import logging
import time
import uuid
import eventer
import requests
from simplejson.errors import JSONDecodeError
from infogami.utils.view import public
from infogami.utils import delegate
from openlibrary.core import cache
from openlibrary.accounts.model import OpenLibraryAccount
from openlibrary.plugins.upstream.utils import urlencode
from openlibrary.utils import dateutil, uniq
from . import ia
from . import helpers as h
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import Edition
logger = logging.getLogger(__name__)
S3_LOAN_URL = 'https://%s/services/loans/loan/'
# When we generate a loan offer (.acsm) for a user we assume that the loan has occurred.
# Once the loan fulfillment inside Digital Editions the book status server will know
# the loan has occurred. We allow this timeout so that we don't delete the OL loan
# record before fulfillment because we can't find it in the book status server.
# $$$ If a user borrows an ACS4 book and immediately returns book loan will show as
# "not yet downloaded" for the duration of the timeout.
# BookReader loan status is always current.
LOAN_FULFILLMENT_TIMEOUT_SECONDS = dateutil.MINUTE_SECS * 5
# How long bookreader loans should last
BOOKREADER_LOAN_DAYS = 14
BOOKREADER_STREAM_URL_PATTERN = "https://{0}/stream/{1}"
DEFAULT_IA_RESULTS = 42
MAX_IA_RESULTS = 1000
class PatronAccessException(Exception):
def __init__(self, message="Access to this item is temporarily locked."):
self.message = message
super().__init__(self.message)
config_ia_loan_api_url = None
config_ia_xauth_api_url = None
config_ia_availability_api_v2_url = cast(str, None)
config_ia_access_secret = None
config_ia_domain = None
config_ia_ol_shared_key = None
config_ia_ol_xauth_s3 = None
config_ia_s3_auth_url = None
config_ia_ol_metadata_write_s3 = None
config_ia_users_loan_history = None
config_ia_loan_api_developer_key = None
config_http_request_timeout = None
config_loanstatus_url = None
config_bookreader_host = None
config_internal_tests_api_key = None
def setup(config):
"""Initializes this module from openlibrary config."""
global config_loanstatus_url, config_ia_access_secret, config_bookreader_host
global config_ia_ol_shared_key, config_ia_ol_xauth_s3, config_internal_tests_api_key
global config_ia_loan_api_url, config_http_request_timeout
global config_ia_availability_api_v2_url, config_ia_ol_metadata_write_s3
global config_ia_xauth_api_url, config_http_request_timeout, config_ia_s3_auth_url
global config_ia_users_loan_history, config_ia_loan_api_developer_key
global config_ia_domain
config_loanstatus_url = config.get('loanstatus_url')
config_bookreader_host = config.get('bookreader_host', 'archive.org')
config_ia_domain = config.get('ia_base_url', 'https://archive.org')
config_ia_loan_api_url = config.get('ia_loan_api_url')
config_ia_availability_api_v2_url = cast(
str, config.get('ia_availability_api_v2_url')
)
config_ia_xauth_api_url = config.get('ia_xauth_api_url')
config_ia_access_secret = config.get('ia_access_secret')
config_ia_ol_shared_key = config.get('ia_ol_shared_key')
config_ia_ol_auth_key = config.get('ia_ol_auth_key')
config_ia_ol_xauth_s3 = config.get('ia_ol_xauth_s3')
config_ia_ol_metadata_write_s3 = config.get('ia_ol_metadata_write_s3')
config_ia_s3_auth_url = config.get('ia_s3_auth_url')
config_ia_users_loan_history = config.get('ia_users_loan_history')
config_ia_loan_api_developer_key = config.get('ia_loan_api_developer_key')
config_internal_tests_api_key = config.get('internal_tests_api_key')
config_http_request_timeout = config.get('http_request_timeout')
@public
def compose_ia_url(
limit: int | None = None,
page: int = 1,
subject=None,
query=None,
sorts=None,
advanced: bool = True,
rate_limit_exempt: bool = True,
) -> str | None:
"""This needs to be exposed by a generalized API endpoint within
plugins/api/browse which lets lazy-load more items for
the homepage carousel and support the upcoming /browse view
(backed by archive.org search, so we don't have to send users to
archive.org to see more books)
Returns None if we get an empty query
"""
from openlibrary.plugins.openlibrary.home import CAROUSELS_PRESETS
query = CAROUSELS_PRESETS.get(query, query)
q = 'openlibrary_work:(*)'
# If we don't provide an openlibrary_subject and no collection is
# specified in our query, we restrict our query to the `inlibrary`
# collection (i.e. those books which are borrowable)
if (not subject) and (not query or 'collection:' not in query):
q += ' AND collection:(inlibrary)'
# In the only case where we are not restricting our search to
# borrowable books (i.e. `inlibrary`), we remove all the books
# which are `printdisabled` *outside* of `inlibrary`.
if 'collection:(inlibrary)' not in q:
q += ' AND (collection:(inlibrary) OR (!collection:(printdisabled)))'
# If no lending restrictions (e.g. borrow, read) are imposed in
# our query, we assume only borrowable books will be included in
# results (not unrestricted/open books).
lendable = (
'(lending___available_to_browse:true OR lending___available_to_borrow:true)'
)
if (not query) or lendable not in query:
q += ' AND ' + lendable
if query:
q += " AND " + query
if subject:
q += " AND openlibrary_subject:" + subject
if not advanced:
_sort = sorts[0] if sorts else ''
if ' desc' in _sort:
_sort = '-' + _sort.split(' desc')[0]
elif ' asc' in _sort:
_sort = _sort.split(' asc')[0]
simple_params = {'query': q}
if _sort:
simple_params['sort'] = _sort
return 'https://archive.org/search.php?' + urlencode(simple_params)
rows = limit or DEFAULT_IA_RESULTS
params = [
('q', q),
('fl[]', 'identifier'),
('fl[]', 'openlibrary_edition'),
('fl[]', 'openlibrary_work'),
('rows', rows),
('page', page),
('output', 'json'),
]
if rate_limit_exempt:
params.append(('service', 'metadata__unlimited'))
if not sorts or not isinstance(sorts, list):
sorts = ['']
for sort in sorts:
params.append(('sort[]', sort))
base_url = "http://%s/advancedsearch.php" % config_bookreader_host
return base_url + '?' + urlencode(params)
@public
@cache.memoize(
engine="memcache", key="gt-availability", expires=5 * dateutil.MINUTE_SECS
)
def get_cached_groundtruth_availability(ocaid):
return get_groundtruth_availability(ocaid)
def get_groundtruth_availability(ocaid, s3_keys=None):
"""temporary stopgap to get ground-truth availability of books
including 1-hour borrows"""
params = '?action=availability&identifier=' + ocaid
url = S3_LOAN_URL % config_bookreader_host
try:
response = requests.post(url + params, data=s3_keys)
response.raise_for_status()
except requests.HTTPError:
pass # TODO: Handle unexpected responses from the availability server.
try:
data = response.json().get('lending_status', {})
except JSONDecodeError as e:
data = {}
# For debugging
data['__src__'] = 'core.models.lending.get_groundtruth_availability'
return data
def s3_loan_api(s3_keys, ocaid=None, action='browse', **kwargs):
"""Uses patrons s3 credentials to initiate or return a browse or
borrow loan on Archive.org.
:param dict s3_keys: {'access': 'xxx', 'secret': 'xxx'}
:param str action : 'browse_book' or 'borrow_book' or 'return_loan'
:param dict kwargs : Additional data to be sent in the POST request body (limit, offset)
"""
fields = {'identifier': ocaid, 'action': action}
params = '?' + '&'.join([f"{k}={v}" for (k, v) in fields.items() if v])
url = S3_LOAN_URL % config_bookreader_host
data = s3_keys | kwargs
response = requests.post(url + params, data=data)
# We want this to be just `409` but first
# `www/common/Lending.inc#L111-114` needs to
# be updated on petabox
if response.status_code in [400, 409]:
raise PatronAccessException()
response.raise_for_status()
return response
def get_available(
limit=None,
page=1,
subject=None,
query=None,
sorts=None,
url=None,
):
"""Experimental. Retrieves a list of available editions from
archive.org advancedsearch which are available, in the inlibrary
collection, and optionally apart of an `openlibrary_subject`.
Returns a list of editions (one available edition per work). Is
used in such things as 'Staff Picks' carousel to retrieve a list
of unique available books.
"""
url = url or compose_ia_url(
limit=limit,
page=page,
subject=subject,
query=query,
sorts=sorts,
)
if not url:
logger.error(
'get_available failed',
extra={
'limit': limit,
'page': page,
'subject': subject,
'query': query,
'sorts': sorts,
},
)
return {'error': 'no_url'}
try:
# Internet Archive Elastic Search (which powers some of our
# carousel queries) needs Open Library to forward user IPs so
# we can attribute requests to end-users
client_ip = web.ctx.env.get('HTTP_X_FORWARDED_FOR', 'ol-internal')
headers = {
"x-client-id": client_ip,
"x-preferred-client-id": client_ip,
"x-application-id": "openlibrary",
}
response = requests.get(
url, headers=headers, timeout=config_http_request_timeout
)
items = response.json().get('response', {}).get('docs', [])
results = {}
for item in items:
if item.get('openlibrary_work'):
results[item['openlibrary_work']] = item['openlibrary_edition']
books = web.ctx.site.get_many(['/books/%s' % olid for olid in results.values()])
books = add_availability(books)
return books
except Exception: # TODO: Narrow exception scope
logger.exception("get_available(%s)" % url)
return {'error': 'request_timeout'}
class AvailabilityStatus(TypedDict):
status: Literal["borrow_available", "borrow_unavailable", "open", "error"]
error_message: str | None
available_to_browse: bool | None
available_to_borrow: bool | None
available_to_waitlist: bool | None
is_printdisabled: bool | None
is_readable: bool | None
is_lendable: bool | None
is_previewable: bool
identifier: str | None
isbn: str | None
oclc: str | None
openlibrary_work: str | None
openlibrary_edition: str | None
last_loan_date: str | None
"""e.g. 2020-07-31T19:07:55Z"""
num_waitlist: str | None
"""A number represented inexplicably as a string"""
last_waitlist_date: str | None
"""e.g. 2020-07-31T19:07:55Z"""
class AvailabilityServiceResponse(TypedDict):
success: bool
responses: dict[str, AvailabilityStatus]
class AvailabilityStatusV2(AvailabilityStatus):
is_restricted: bool
is_browseable: bool | None
__src__: str
def update_availability_schema_to_v2(
v1_resp: AvailabilityStatus,
ocaid: str | None,
) -> AvailabilityStatusV2:
"""
This function attempts to take the output of e.g. Bulk Availability
API and add/infer attributes which are missing (but are present on
Ground Truth API)
"""
v2_resp = cast(AvailabilityStatusV2, v1_resp)
# TODO: Make less brittle; maybe add simplelists/copy counts to Bulk Availability
v2_resp['identifier'] = ocaid
v2_resp['is_restricted'] = v1_resp['status'] != 'open'
v2_resp['is_browseable'] = v1_resp.get('available_to_browse', False)
# For debugging
v2_resp['__src__'] = 'core.models.lending.get_availability'
return v2_resp
def get_availability(
id_type: Literal['identifier', 'openlibrary_work', 'openlibrary_edition'],
ids: list[str],
) -> dict[str, AvailabilityStatusV2]:
ids = [id_ for id_ in ids if id_] # remove infogami.infobase.client.Nothing
if not ids:
return {}
def key_func(_id: str) -> str:
return cache.build_memcache_key('lending.get_availability', id_type, _id)
mc = cache.get_memcache()
cached_values = cast(
dict[str, AvailabilityStatusV2], mc.get_multi([key_func(_id) for _id in ids])
)
availabilities = {
_id: cached_values[key]
for _id in ids
if (key := key_func(_id)) in cached_values
}
ids_to_fetch = set(ids) - set(availabilities)
if not ids_to_fetch:
return availabilities
try:
headers = {
"x-preferred-client-id": web.ctx.env.get(
'HTTP_X_FORWARDED_FOR', 'ol-internal'
),
"x-application-id": "openlibrary",
}
if config_ia_ol_metadata_write_s3:
headers["authorization"] = "LOW {s3_key}:{s3_secret}".format(
**config_ia_ol_metadata_write_s3
)
response = cast(
AvailabilityServiceResponse,
requests.get(
config_ia_availability_api_v2_url,
params={
id_type: ','.join(ids_to_fetch),
"scope": "printdisabled",
},
headers=headers,
timeout=10,
).json(),
)
uncached_values = {
_id: update_availability_schema_to_v2(
availability,
ocaid=(
_id if id_type == 'identifier' else availability.get('identifier')
),
)
for _id, availability in response['responses'].items()
}
availabilities |= uncached_values
mc.set_multi(
{
key_func(_id): availability
for _id, availability in uncached_values.items()
},
expires=5 * dateutil.MINUTE_SECS,
)
return availabilities
except Exception as e: # TODO: Narrow exception scope
logger.exception("lending.get_availability", extra={'ids': ids})
availabilities.update(
{
_id: update_availability_schema_to_v2(
cast(AvailabilityStatus, {'status': 'error'}),
ocaid=_id if id_type == 'identifier' else None,
)
for _id in ids_to_fetch
}
)
return availabilities | {
'error': 'request_timeout',
'details': str(e),
} # type:ignore
def get_ocaid(item: dict) -> str | None:
# Circular import otherwise
from ..book_providers import is_non_ia_ocaid
possible_fields = [
'ocaid', # In editions
'identifier', # In ?? not editions/works/solr
'ia', # In solr work records and worksearch get_docs
'lending_identifier', # In solr works records + worksearch get_doc
]
# SOLR WORK RECORDS ONLY:
# Open Library only has access to a list of archive.org IDs
# and solr isn't currently equipped with the information
# necessary to determine which editions may be openly
# available. Using public domain date as a heuristic
# Long term solution is a full reindex, but this hack will work in the
# vast majority of cases for now.
# NOTE: there is still a risk pre-1923 books will get a print-diabled-only
# or lendable edition.
# Note: guaranteed to be int-able if none None
US_PD_YEAR = 1923
if float(item.get('first_publish_year') or '-inf') > US_PD_YEAR:
# Prefer `lending_identifier` over `ia` (push `ia` to bottom)
possible_fields.remove('ia')
possible_fields.append('ia')
ocaids = []
for field in possible_fields:
if item.get(field):
ocaids += item[field] if isinstance(item[field], list) else [item[field]]
ocaids = uniq(ocaids)
return next((ocaid for ocaid in ocaids if not is_non_ia_ocaid(ocaid)), None)
@public
def get_availabilities(items: list) -> dict:
result = {}
ocaids = [ocaid for ocaid in map(get_ocaid, items) if ocaid]
availabilities = get_availability_of_ocaids(ocaids)
for item in items:
ocaid = get_ocaid(item)
if ocaid:
result[item['key']] = availabilities.get(ocaid)
return result
@public
def add_availability(
items: list,
mode: Literal['identifier', 'openlibrary_work'] = "identifier",
) -> list:
"""
Adds API v2 'availability' key to dicts
:param items: items with fields containing ocaids
"""
if mode == "identifier":
ocaids = [ocaid for ocaid in map(get_ocaid, items) if ocaid]
availabilities = get_availability_of_ocaids(ocaids)
for item in items:
ocaid = get_ocaid(item)
if ocaid:
item['availability'] = availabilities.get(ocaid)
elif mode == "openlibrary_work":
_ids = [item['key'].split('/')[-1] for item in items]
availabilities = get_availability('openlibrary_work', _ids)
for item in items:
olid = item['key'].split('/')[-1]
if olid:
item['availability'] = availabilities.get(olid)
return items
def get_availability_of_ocaid(ocaid):
"""Retrieves availability based on ocaid/archive.org identifier"""
return get_availability('identifier', [ocaid])
def get_availability_of_ocaids(ocaids: list[str]) -> dict[str, AvailabilityStatusV2]:
"""
Retrieves availability based on ocaids/archive.org identifiers
"""
return get_availability('identifier', ocaids)
def get_items_and_add_availability(ocaids: list[str]) -> dict[str, "Edition"]:
"""
Get Editions from OCAIDs and attach their availabiliity.
Returns a dict of the form: `{"ocaid1": edition1, "ocaid2": edition2, ...}`
"""
ocaid_availability = get_availability_of_ocaids(ocaids=ocaids)
editions = web.ctx.site.get_many(
[
f"/books/{item.get('openlibrary_edition')}"
for item in ocaid_availability.values()
if item.get('openlibrary_edition')
]
)
# Attach availability
for edition in editions:
if edition.ocaid in ocaids:
edition.availability = ocaid_availability.get(edition.ocaid)
return {edition.ocaid: edition for edition in editions if edition.ocaid}
def is_loaned_out(identifier: str) -> bool:
"""Returns True if the given identifier is loaned out.
This doesn't worry about waiting lists.
"""
# is_loaned_out_on_acs4 is to be deprecated, this logic (in PR)
# should be handled by is_loaned_out_on_ia which calls
# BorrowBooks.inc in petabox
return (
is_loaned_out_on_ol(identifier)
or is_loaned_out_on_acs4(identifier)
or (is_loaned_out_on_ia(identifier) is True)
)
def is_loaned_out_on_acs4(identifier: str) -> bool:
"""Returns True if the item is checked out on acs4 server."""
item = ACS4Item(identifier)
return item.has_loan()
def is_loaned_out_on_ia(identifier: str) -> bool | None:
"""Returns True if the item is checked out on Internet Archive."""
url = "https://archive.org/services/borrow/%s?action=status" % identifier
try:
response = requests.get(url).json()
return response and response.get('checkedout')
except Exception: # TODO: Narrow exception scope
logger.exception("is_loaned_out_on_ia(%s)" % identifier)
return None
def is_loaned_out_on_ol(identifier: str) -> bool:
"""Returns True if the item is checked out on Open Library."""
loan = get_loan(identifier)
return bool(loan)
def get_loan(identifier, user_key=None):
"""Returns the loan object for given identifier, if a loan exists.
If user_key is specified, it returns the loan only if that user is
borrowed that book.
"""
_loan = None
account = None
if user_key:
if user_key.startswith('@'):
account = OpenLibraryAccount.get(link=user_key)
else:
account = OpenLibraryAccount.get(key=user_key)
d = web.ctx.site.store.get("loan-" + identifier)
if d and (
user_key is None
or (d['user'] == account.username)
or (d['user'] == account.itemname)
):
loan = Loan(d)
if loan.is_expired():
return loan.delete()
try:
_loan = _get_ia_loan(identifier, account and userkey2userid(account.username))
except Exception: # TODO: Narrow exception scope
logger.exception("get_loan(%s) 1 of 2" % identifier)
try:
_loan = _get_ia_loan(identifier, account and account.itemname)
except Exception: # TODO: Narrow exception scope
logger.exception("get_loan(%s) 2 of 2" % identifier)
return _loan
def _get_ia_loan(identifier: str, userid: str):
ia_loan = ia_lending_api.get_loan(identifier, userid)
return ia_loan and Loan.from_ia_loan(ia_loan)
def get_loans_of_user(user_key):
"""TODO: Remove inclusion of local data; should only come from IA"""
if 'env' not in web.ctx:
"""For the get_cached_user_loans to call the API if no cache is present,
we have to fakeload the web.ctx
"""
delegate.fakeload()
account = OpenLibraryAccount.get(username=user_key.split('/')[-1])
loandata = web.ctx.site.store.values(type='/type/loan', name='user', value=user_key)
loans = [Loan(d) for d in loandata] + (_get_ia_loans_of_user(account.itemname))
# Set patron's loans in cache w/ now timestamp
get_cached_loans_of_user.memcache_set(
[user_key], {}, loans or [], time.time()
) # rehydrate cache
return loans
get_cached_loans_of_user = cache.memcache_memoize(
get_loans_of_user,
key_prefix='lending.cached_loans',
timeout=5 * dateutil.MINUTE_SECS, # time to live for cached loans = 5 minutes
)
def get_user_waiting_loans(user_key):
"""Gets the waitingloans of the patron.
Returns [] if user has no waitingloans.
"""
from .waitinglist import WaitingLoan
try:
account = OpenLibraryAccount.get(key=user_key)
itemname = account.itemname
result = WaitingLoan.query(userid=itemname)
get_cached_user_waiting_loans.memcache_set(
[user_key], {}, result or {}, time.time()
) # rehydrate cache
return result or []
except JSONDecodeError as e:
return []
get_cached_user_waiting_loans = cache.memcache_memoize(
get_user_waiting_loans,
key_prefix='waitinglist.user_waiting_loans',
timeout=10 * dateutil.MINUTE_SECS,
)
def _get_ia_loans_of_user(userid):
ia_loans = ia_lending_api.find_loans(userid=userid)
return [Loan.from_ia_loan(d) for d in ia_loans]
def create_loan(identifier, resource_type, user_key, book_key=None):
"""Creates a loan and returns it."""
ia_loan = ia_lending_api.create_loan(
identifier=identifier, format=resource_type, userid=user_key, ol_key=book_key
)
if ia_loan:
loan = Loan.from_ia_loan(ia_loan)
eventer.trigger("loan-created", loan)
sync_loan(identifier)
return loan
# loan = Loan.new(identifier, resource_type, user_key, book_key)
# loan.save()
# return loan
NOT_INITIALIZED = object()
def sync_loan(identifier, loan=NOT_INITIALIZED):
"""Updates the loan info stored in openlibrary.
The loan records are stored at the Internet Archive. There is no way for
OL to know when a loan is deleted. To handle that situation, the loan info
is stored in the ebook document and the deletion is detected by comparing
the current loan id and loan id stored in the ebook.
This function is called whenever the loan is updated.
"""
logger.info("BEGIN sync_loan %s %s", identifier, loan)
if loan is NOT_INITIALIZED:
loan = get_loan(identifier)
# The data of the loan without the user info.
loan_data = loan and {
'uuid': loan['uuid'],
'loaned_at': loan['loaned_at'],
'resource_type': loan['resource_type'],
'ocaid': loan['ocaid'],
'book': loan['book'],
}
responses = get_availability_of_ocaid(identifier)
response = responses[identifier] if responses else {}
if response:
num_waiting = int(response.get('num_waitlist', 0) or 0)
ebook = EBookRecord.find(identifier)
# The loan known to us is deleted
is_loan_completed = ebook.get("loan") and ebook.get("loan") != loan_data
# When the current loan is a OL loan, remember the loan_data
if loan and loan.is_ol_loan():
ebook_loan_data = loan_data
else:
ebook_loan_data = None
kwargs = {
"type": "ebook",
"identifier": identifier,
"loan": ebook_loan_data,
"borrowed": str(response['status'] not in ['open', 'borrow_available']).lower(),
"wl_size": num_waiting,
}
try:
ebook.update(**kwargs)
except Exception: # TODO: Narrow exception scope
# updating ebook document is sometimes failing with
# "Document update conflict" error.
# Log the error in such cases, don't crash.
logger.exception("failed to update ebook for %s", identifier)
# fire loan-completed event
if is_loan_completed and ebook.get('loan'):
_d = dict(ebook['loan'], returned_at=time.time())
eventer.trigger("loan-completed", _d)
logger.info("END sync_loan %s", identifier)
class EBookRecord(dict):
@staticmethod
def find(identifier):
key = "ebooks/" + identifier
d = web.ctx.site.store.get(key) or {"_key": key, "type": "ebook", "_rev": 1}
return EBookRecord(d)
def update(self, **kwargs):
logger.info("updating %s %s", self['_key'], kwargs)
# Nothing to update if what we have is same as what is being asked to
# update.
d = {k: self.get(k) for k in kwargs}
if d == kwargs:
return
dict.update(self, **kwargs)
web.ctx.site.store[self['_key']] = self
class Loan(dict):
"""Model for loan."""
@staticmethod
def new(identifier, resource_type, user_key, book_key=None):
"""Creates a new loan object.
The caller is expected to call save method to save the loan.
"""
if book_key is None:
book_key = "/books/ia:" + identifier
_uuid = uuid.uuid4().hex
loaned_at = time.time()
if resource_type == "bookreader":
resource_id = "bookreader:" + identifier
loan_link = BOOKREADER_STREAM_URL_PATTERN.format(
config_bookreader_host, identifier
)
expiry = (
datetime.datetime.utcnow()
+ datetime.timedelta(days=BOOKREADER_LOAN_DAYS)
).isoformat()
else:
raise Exception(
'No longer supporting ACS borrows directly from Open Library. Please go to Archive.org'
)
if not resource_id:
raise Exception(
f'Could not find resource_id for {identifier} - {resource_type}'
)
key = "loan-" + identifier
return Loan(
{
'_key': key,
'_rev': 1,
'type': '/type/loan',
'fulfilled': 1,
'user': user_key,
'book': book_key,
'ocaid': identifier,
'expiry': expiry,
'uuid': _uuid,
'loaned_at': loaned_at,
'resource_type': resource_type,
'resource_id': resource_id,
'loan_link': loan_link,
}
)
@staticmethod
def from_ia_loan(data):
if data['userid'].startswith('ol:'):
user_key = '/people/' + data['userid'][len('ol:') :]
elif data['userid'].startswith('@'):
account = OpenLibraryAccount.get_by_link(data['userid'])
user_key = ('/people/' + account.username) if account else None
else:
user_key = None
if data['ol_key']:
book_key = data['ol_key']
else:
book_key = resolve_identifier(data['identifier'])
created = h.parse_datetime(data['created'])
# For historic reasons, OL considers expiry == None as un-fulfilled
# loan.
expiry = data.get('until')
d = {
'_key': "loan-{}".format(data['identifier']),
'_rev': 1,
'type': '/type/loan',
'userid': data['userid'],
'user': user_key,
'book': book_key,
'ocaid': data['identifier'],
'expiry': expiry,
'fulfilled': data['fulfilled'],
'uuid': 'loan-{}'.format(data['id']),
'loaned_at': time.mktime(created.timetuple()),
'resource_type': data['format'],
'resource_id': data['resource_id'],
'loan_link': data['loan_link'],
'stored_at': 'ia',
}
return Loan(d)
def is_ol_loan(self):
# self['user'] will be None for IA loans
return self['user'] is not None
def get_key(self):
return self['_key']
def save(self):
# loans stored at IA are not supposed to be saved at OL.
# This call must have been made in mistake.
if self.get("stored_at") == "ia":
return
web.ctx.site.store[self['_key']] = self
# Inform listers that a loan is created/updated
eventer.trigger("loan-created", self)
def is_expired(self):
return (
self['expiry'] and self['expiry'] < datetime.datetime.utcnow().isoformat()
)
def is_yet_to_be_fulfilled(self):
"""Returns True if the loan is not yet fulfilled and fulfillment time
is not expired.
"""
return (
self['expiry'] is None
and (time.time() - self['loaned_at']) < LOAN_FULFILLMENT_TIMEOUT_SECONDS
)
def return_loan(self):
logger.info("*** return_loan ***")
if self['resource_type'] == 'bookreader':
self.delete()
return True
else:
return False
def delete(self):
loan = dict(self, returned_at=time.time())
user_key = self['user']
account = OpenLibraryAccount.get(key=user_key)
if self.get("stored_at") == 'ia':
ia_lending_api.delete_loan(self['ocaid'], userkey2userid(user_key))
if account.itemname:
ia_lending_api.delete_loan(self['ocaid'], account.itemname)
else:
web.ctx.site.store.delete(self['_key'])
sync_loan(self['ocaid'])
# Inform listers that a loan is completed
eventer.trigger("loan-completed", loan)
def resolve_identifier(identifier):
"""Returns the OL book key for given IA identifier."""
if keys := web.ctx.site.things({'type': '/type/edition', 'ocaid': identifier}):
return keys[0]
else:
return "/books/ia:" + identifier
def userkey2userid(user_key):
username = user_key.split("/")[-1]
return "ol:" + username
def get_resource_id(identifier, resource_type):
"""Returns the resource_id for an identifier for the specified resource_type.
The resource_id is found by looking at external_identifiers field in the
metadata of the item.
"""
if resource_type == "bookreader":
return "bookreader:" + identifier
metadata = ia.get_metadata(identifier)
external_identifiers = metadata.get("external-identifier", [])
for eid in external_identifiers:
# Ignore bad external identifiers
if eid.count(":") < 2:
continue
# The external identifiers will be of the format
# acs:epub:<resource_id> or acs:pdf:<resource_id>
acs, rtype, resource_id = eid.split(":", 2)
if rtype == resource_type:
return resource_id
def update_loan_status(identifier):
"""Update the loan status in OL based off status in ACS4. Used to check for early returns."""
loan = get_loan(identifier)
# if the loan is from ia, it is already updated when getting the loan
if loan is None or loan.get('from_ia'):
return
if loan['resource_type'] == 'bookreader':
if loan.is_expired():
loan.delete()
return
else:
acs4_loan = ACS4Item(identifier).get_loan()
if not acs4_loan and not loan.is_yet_to_be_fulfilled():
logger.info(
"%s: loan returned or expired or timedout, deleting...", identifier
)
loan.delete()
return
if loan['expiry'] != acs4_loan['until']:
loan['expiry'] = acs4_loan['until']
loan.save()
logger.info("%s: updated expiry to %s", identifier, loan['expiry'])
class ACS4Item:
"""Represents an item on ACS4 server.
An item can have multiple resources (epub/pdf) and any of them could be loanded out.
This class provides a way to access the loan info from ACS4 server.
"""
def __init__(self, identifier):
self.identifier = identifier
def get_data(self):
url = f'{config_loanstatus_url}/item/{self.identifier}'
try:
return requests.get(url).json()
except OSError:
logger.exception("unable to connect BSS server")
def has_loan(self):
return bool(self.get_loan())
def get_loan(self):
"""Returns the information about loan in the ACS4 server."""
d = self.get_data() or {}
if not d.get('resources'):
return
for r in d['resources']:
if r['loans']:
loan = dict(r['loans'][0])
loan['resource_id'] = r['resourceid']
loan['resource_type'] = self._format2resource_type(r['format'])
return loan
def _format2resource_type(self, format):
formats = {"application/epub+zip": "epub", "application/pdf": "pdf"}
return formats[format]
class IA_Lending_API:
"""Archive.org waiting list API."""
def get_loan(self, identifier: str, userid: str | None = None):
params = {'method': "loan.query", 'identifier': identifier}
if userid:
params['userid'] = userid
if loans := self._post(**params).get('result', []):
return loans[0]
def find_loans(self, **kw):
try:
return self._post(method="loan.query", **kw).get('result', [])
except JSONDecodeError as e:
return []
def create_loan(self, identifier, userid, format, ol_key):
response = self._post(
method="loan.create",
identifier=identifier,
userid=userid,
format=format,
ol_key=ol_key,
)
if response['status'] == 'ok':
return response['result']['loan']
def delete_loan(self, identifier, userid):
self._post(method="loan.delete", identifier=identifier, userid=userid)
def get_waitinglist_of_book(self, identifier):
return self.query(identifier=identifier)
def get_waitinglist_of_user(self, userid):
return self.query(userid=userid)
def join_waitinglist(self, identifier, userid):
return self._post(
method="waitinglist.join", identifier=identifier, userid=userid
)
def leave_waitinglist(self, identifier, userid):
return self._post(
method="waitinglist.leave", identifier=identifier, userid=userid
)
def update_waitinglist(self, identifier, userid, **kwargs):
return self._post(
method="waitinglist.update", identifier=identifier, userid=userid, **kwargs
)
def query(self, **params):
response = self._post(method="waitinglist.query", **params)
return response.get('result')
def request(self, method, **arguments):
return self._post(method=method, **arguments)
def _post(self, **payload):
logger.info("POST %s %s", config_ia_loan_api_url, payload)
if config_ia_loan_api_developer_key:
payload['developer'] = config_ia_loan_api_developer_key
payload['token'] = config_ia_ol_shared_key
try:
jsontext = requests.post(
config_ia_loan_api_url,
data=payload,
timeout=config_http_request_timeout,
).json()
logger.info("POST response: %s", jsontext)
return jsontext
except Exception: # TODO: Narrow exception scope
logger.exception("POST failed")
raise
ia_lending_api = IA_Lending_API()
| 37,165 | Python | .py | 915 | 32.893989 | 103 | 0.628724 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
339 | ratings.py | internetarchive_openlibrary/openlibrary/core/ratings.py | from math import sqrt
from typing import TypedDict
from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
from . import db
class WorkRatingsSummary(TypedDict):
ratings_average: float
ratings_sortable: float
ratings_count: int
ratings_count_1: int
ratings_count_2: int
ratings_count_3: int
ratings_count_4: int
ratings_count_5: int
class Ratings(db.CommonExtras):
TABLENAME = "ratings"
VALID_STAR_RATINGS = range(6) # inclusive: [0 - 5] (0-5 star)
PRIMARY_KEY = ["username", "work_id"]
ALLOW_DELETE_ON_CONFLICT = True
@classmethod
def summary(cls) -> dict:
return {
'total_books_starred': {
'total': Ratings.total_num_books_rated(),
'month': Ratings.total_num_books_rated(since=DATE_ONE_MONTH_AGO),
'week': Ratings.total_num_books_rated(since=DATE_ONE_WEEK_AGO),
},
'total_star_raters': {
'total': Ratings.total_num_unique_raters(),
'month': Ratings.total_num_unique_raters(since=DATE_ONE_MONTH_AGO),
'week': Ratings.total_num_unique_raters(since=DATE_ONE_WEEK_AGO),
},
}
@classmethod
def total_num_books_rated(cls, since=None, distinct=False) -> int | None:
oldb = db.get_db()
query = "SELECT count(%s work_id) from ratings" % (
'DISTINCT' if distinct else ''
)
if since:
query += " WHERE created >= $since"
results = oldb.query(query, vars={'since': since})
return results[0]['count'] if results else 0
@classmethod
def total_num_unique_raters(cls, since=None) -> int:
oldb = db.get_db()
query = "select count(DISTINCT username) from ratings"
if since:
query += " WHERE created >= $since"
results = oldb.query(query, vars={'since': since})
return results[0]['count'] if results else 0
@classmethod
def most_rated_books(cls, limit=10, since=False) -> list:
oldb = db.get_db()
query = 'select work_id, count(*) as cnt from ratings '
if since:
query += " WHERE created >= $since"
query += ' group by work_id order by cnt desc limit $limit'
return list(oldb.query(query, vars={'limit': limit, 'since': since}))
@classmethod
def get_users_ratings(cls, username) -> list:
oldb = db.get_db()
query = 'select * from ratings where username=$username'
return list(oldb.query(query, vars={'username': username}))
@classmethod
def get_rating_stats(cls, work_id) -> dict:
oldb = db.get_db()
query = (
"SELECT AVG(rating) as avg_rating, COUNT(DISTINCT username) as num_ratings"
" FROM ratings"
" WHERE work_id = $work_id"
)
result = oldb.query(query, vars={'work_id': int(work_id)})
return result[0] if result else {}
@classmethod
def get_work_ratings_summary(cls, work_id: int) -> WorkRatingsSummary | None:
oldb = db.get_db()
# NOTE: Using some old postgres syntax here :/ for modern postgres syntax,
# see the query in solr_builder.py
query = """
SELECT
sum( CASE WHEN rating = 1 THEN 1 ELSE 0 END ) as ratings_count_1,
sum( CASE WHEN rating = 2 THEN 1 ELSE 0 END ) as ratings_count_2,
sum( CASE WHEN rating = 3 THEN 1 ELSE 0 END ) as ratings_count_3,
sum( CASE WHEN rating = 4 THEN 1 ELSE 0 END ) as ratings_count_4,
sum( CASE WHEN rating = 5 THEN 1 ELSE 0 END ) as ratings_count_5
FROM ratings
WHERE work_id = $work_id
GROUP BY work_id
"""
result = oldb.query(query, vars={'work_id': work_id})
if not result:
return None
row = result[0]
return cls.work_ratings_summary_from_counts(
[row[f'ratings_count_{i}'] for i in range(1, 6)]
)
@classmethod
def work_ratings_summary_from_counts(
cls, rating_counts: list[int]
) -> WorkRatingsSummary:
total_count = sum(rating_counts, 0)
ratings_average = (
(sum((k * n_k for k, n_k in enumerate(rating_counts, 1)), 0) / total_count)
if total_count != 0
else 0
)
return {
'ratings_average': ratings_average,
'ratings_sortable': cls.compute_sortable_rating(rating_counts),
'ratings_count': total_count,
'ratings_count_1': rating_counts[0],
'ratings_count_2': rating_counts[1],
'ratings_count_3': rating_counts[2],
'ratings_count_4': rating_counts[3],
'ratings_count_5': rating_counts[4],
}
@classmethod
def compute_sortable_rating(cls, rating_counts: list[int]) -> float:
"""
Computes a rating that can be used for sorting works by rating. It takes
into account the fact that a book with only 1 rating that is 5 stars, is not
necessarily "better" than a book with 1 rating that is 1 star, and 10 ratings
that are 5 stars. The first book has an average rating of 5, but the second
book has an average rating of 4.6 .
Uses the algorithm from:
https://www.evanmiller.org/ranking-items-with-star-ratings.html
"""
n = rating_counts
N = sum(n, 0)
K = len(n)
z = 1.65
return sum(
((k + 1) * (n_k + 1) / (N + K) for k, n_k in enumerate(n)), 0
) - z * sqrt(
(
sum(
(((k + 1) ** 2) * (n_k + 1) / (N + K) for k, n_k in enumerate(n)), 0
)
- sum(((k + 1) * (n_k + 1) / (N + K) for k, n_k in enumerate(n)), 0)
** 2
)
/ (N + K + 1)
)
@classmethod
def get_all_works_ratings(cls, work_id) -> list:
oldb = db.get_db()
query = 'select * from ratings where work_id=$work_id'
return list(oldb.query(query, vars={'work_id': int(work_id)}))
@classmethod
def get_users_rating_for_work(cls, username: str, work_id: str | int) -> int | None:
"""work_id must be convertible to int."""
oldb = db.get_db()
data = {'username': username, 'work_id': int(work_id)}
query = 'SELECT * from ratings where username=$username AND work_id=$work_id'
results = list(oldb.query(query, vars=data))
rating: int | None = results[0].rating if results else None
return rating
@classmethod
def remove(cls, username, work_id):
oldb = db.get_db()
where = {'username': username, 'work_id': int(work_id)}
try:
return oldb.delete(
'ratings', where=('work_id=$work_id AND username=$username'), vars=where
)
except: # we want to catch no entry exists
return None
@classmethod
def add(cls, username, work_id, rating, edition_id=None):
from openlibrary.core.bookshelves import Bookshelves
oldb = db.get_db()
work_id = int(work_id)
data = {'work_id': work_id, 'username': username}
if rating not in cls.VALID_STAR_RATINGS:
return None
# Vote implies user read book; Update reading log status as "Already Read"
users_read_status_for_work = Bookshelves.get_users_read_status_of_work(
username, work_id
)
if users_read_status_for_work != Bookshelves.PRESET_BOOKSHELVES['Already Read']:
Bookshelves.add(
username,
Bookshelves.PRESET_BOOKSHELVES['Already Read'],
work_id,
edition_id=edition_id,
)
users_rating_for_work = cls.get_users_rating_for_work(username, work_id)
if not users_rating_for_work:
return oldb.insert(
'ratings',
username=username,
work_id=work_id,
rating=rating,
edition_id=edition_id,
)
else:
where = "work_id=$work_id AND username=$username"
return oldb.update('ratings', where=where, rating=rating, vars=data)
| 8,310 | Python | .py | 198 | 31.934343 | 88 | 0.571658 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
340 | readableurls.py | internetarchive_openlibrary/openlibrary/core/processors/readableurls.py | """Various web.py application processors used in OL.
"""
import logging
import os
import web
from infogami.utils.view import render
from openlibrary.core import helpers as h
import urllib
logger = logging.getLogger("openlibrary.readableurls")
try:
from booklending_utils.openlibrary import is_exclusion
except ImportError:
def is_exclusion(obj):
"""Processor for determining whether records require exclusion"""
return False
class ReadableUrlProcessor:
"""Open Library code works with urls like /books/OL1M and
/books/OL1M/edit. This processor seamlessly changes the urls to
/books/OL1M/title and /books/OL1M/title/edit.
The changequery function is also customized to support this.
"""
patterns = [
(r'/\w+/OL\d+M', '/type/edition', 'title', 'untitled'),
(r'/\w+/ia:[a-zA-Z0-9_\.-]+', '/type/edition', 'title', 'untitled'),
(r'/\w+/OL\d+A', '/type/author', 'name', 'noname'),
(r'/\w+/OL\d+W', '/type/work', 'title', 'untitled'),
(r'/[/\w\-]+/OL\d+L', '/type/list', 'name', 'unnamed'),
(r'/\w+/OL\d+T', '/type/tag', 'name', 'untitled'),
]
def __call__(self, handler):
# temp hack to handle languages and users during upstream-to-www migration
if web.ctx.path.startswith("/l/"):
raise web.seeother("/languages/" + web.ctx.path[len("/l/") :])
if web.ctx.path.startswith("/user/") and not web.ctx.site.get(web.ctx.path):
raise web.seeother("/people/" + web.ctx.path[len("/user/") :])
real_path, readable_path = get_readable_path(
web.ctx.site, web.ctx.path, self.patterns, encoding=web.ctx.encoding
)
# @@ web.ctx.path is either quoted or unquoted depends on whether the application is running
# @@ using builtin-server. That is probably a bug in web.py.
# @@ take care of that case here till that is fixed.
# @@ Also, the redirection must be done only for GET requests.
if (
readable_path != web.ctx.path
and readable_path != urllib.parse.quote(web.safestr(web.ctx.path))
and web.ctx.method == "GET"
):
raise web.redirect(
web.safeunicode(readable_path) + web.safeunicode(web.ctx.query)
)
web.ctx.readable_path = readable_path
web.ctx.path = real_path
web.ctx.fullpath = web.ctx.path + web.ctx.query
out = handler()
V2_TYPES = [
'works',
'books',
'people',
'authors',
'publishers',
'languages',
'account',
]
# Exclude noindex items
if web.ctx.get('exclude'):
web.ctx.status = "404 Not Found"
return render.notfound(web.ctx.path)
return out
def _get_object(site, key):
"""Returns the object with the given key.
If the key has an OLID and no object is found with that key, it tries to
find object with the same OLID. OL database makes sures that OLIDs are
unique.
"""
obj = site.get(key)
if obj is None and key.startswith("/a/"):
key = "/authors/" + key[len("/a/") :]
obj = key and site.get(key)
if obj is None and key.startswith("/b/"):
key = "/books/" + key[len("/b/") :]
obj = key and site.get(key)
if obj is None and key.startswith("/user/"):
key = "/people/" + key[len("/user/") :]
obj = key and site.get(key)
basename = key.split("/")[-1]
# redirect all /.*/ia:foo to /books/ia:foo
if obj is None and basename.startswith("ia:"):
key = "/books/" + basename
obj = site.get(key)
# redirect all /.*/OL123W to /works/OL123W
if obj is None and basename.startswith("OL") and basename.endswith("W"):
key = "/works/" + basename
obj = site.get(key)
# redirect all /.*/OL123M to /books/OL123M
if obj is None and basename.startswith("OL") and basename.endswith("M"):
key = "/books/" + basename
obj = site.get(key)
# redirect all /.*/OL123A to /authors/OL123A
if obj is None and basename.startswith("OL") and basename.endswith("A"):
key = "/authors/" + basename
obj = site.get(key)
# Disabled temporarily as the index is not ready the db
# if obj is None and web.re_compile(r"/.*/OL\d+[A-Z]"):
# olid = web.safestr(key).split("/")[-1]
# key = site._request("/olid_to_key", data={"olid": olid}).key
# obj = key and site.get(key)
return obj
def get_readable_path(site, path, patterns, encoding=None):
"""Returns real_path and readable_path from the given path.
The patterns is a list of (path_regex, type, property_name, default_value)
tuples.
"""
def match(path):
for pat, _type, _property, default_title in patterns:
m = web.re_compile('^' + pat).match(path)
if m:
prefix = m.group()
extra = web.lstrips(path, prefix)
tokens = extra.split("/", 2)
# `extra` starts with "/". So first token is always empty.
middle = web.listget(tokens, 1, "")
suffix = web.listget(tokens, 2, "")
if suffix:
suffix = "/" + suffix
return _type, _property, default_title, prefix, middle, suffix
return None, None, None, None, None, None
_type, _property, default_title, prefix, middle, suffix = match(path)
if _type is None:
path = web.safeunicode(path)
return (path, path)
if encoding is not None or path.endswith((".json", ".rdf", ".yml")):
key, ext = os.path.splitext(path)
thing = _get_object(site, key)
if thing:
path = thing.key + ext
path = web.safeunicode(path)
return (path, path)
thing = _get_object(site, prefix)
# get_object may handle redirections.
if thing:
prefix = thing.key
if thing and thing.type.key == _type:
title = thing.get(_property) or default_title
try:
# Explicitly only run for python3 to solve #4033
from urllib.parse import quote_plus
middle = '/' + quote_plus(h.urlsafe(title.strip()))
except ImportError:
middle = '/' + h.urlsafe(title.strip())
else:
middle = ""
if is_exclusion(thing):
web.ctx.exclude = True
prefix = web.safeunicode(prefix)
middle = web.safeunicode(middle)
suffix = web.safeunicode(suffix)
return (prefix + suffix, prefix + middle + suffix)
| 6,631 | Python | .py | 157 | 33.828025 | 100 | 0.5925 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
341 | invalidation.py | internetarchive_openlibrary/openlibrary/core/processors/invalidation.py | import web
import datetime
from infogami.infobase import client
from openlibrary.core import helpers as h
import contextlib
__all__ = ["InvalidationProcessor"]
class InvalidationProcessor:
"""Application processor to invalidate/update locally cached documents.
The openlibrary application caches some documents like templates, macros,
javascripts etc. locally for variety of reasons. This class implements a
way to make sure those documents are kept up-to-date with the db within
some allowed constraints.
This implements a kind of lazy consistency, which guaranties the following:
* If a client makes an update, he will continue to see that update on
subsequent requests.
* If a client sees an update made by somebody else, he will continue to
see that update on subsequent requests.
* A client sees older version of a document no longer than the specified
timeout (in seconds) after the document is updated.
It means that the following conditions will never happen:
* A client edits a page and reloading the same page shows an older
version.
* A client loads a page and reloading the same page shows an older version.
* A client continue to see an older version of a document for very long time.
It is implemented as follows:
* If there is an update, set a cookie with time of the update as value.
* If the cookie timestamp is more than the last_poll_time, trigger reload.
* If the cookie timestamp is less than the last_update_time, set the
cookie with last_update_time.
* If the current time is more than timeout seconds since last_poll_time,
trigger reload.
When the reload is triggered:
* A request to the datebase is made to find list of documents modified after the last_poll_time.
* Trigger on_new_version event for each modified document. The application
code that is handling the caching must listen to that event and
invalidate/update its cached copy.
How to use::
from infogami.utils import delegate
from infogami.infobase import client
p = InvalidationProcessor(["/templates/", "/macros/"])
# install the application processor
delegate.app.add_processor(p)
# add the hook to get notifications when a document is modified
client.hooks.append(p.hook)
Glossary:
* cookie_timestamp: value of the invalidation cookie.
* last_poll_time: timestamp of the latest reload
* last_update_time: timestamp of the most recent update known to this
process.
"""
def __init__(self, prefixes, timeout=60, cookie_name="lastupdate"):
self.prefixes = prefixes
self.timeout = datetime.timedelta(0, timeout)
self.cookie_name = cookie_name
self.last_poll_time = datetime.datetime.now()
self.last_update_time = self.last_poll_time
# set expire_time slightly more than timeout
self.expire_time = 3 * timeout
self.hook = _InvalidationHook(
prefixes=prefixes, cookie_name=cookie_name, expire_time=self.expire_time
)
def __call__(self, handler):
def t(date):
return date.isoformat().split("T")[-1]
cookie_time = self.get_cookie_time()
if self.is_timeout() or cookie_time and cookie_time > self.last_poll_time:
self.reload()
# last update in recent timeout seconds?
has_recent_update = (self.last_poll_time - self.last_update_time) < self.timeout
if has_recent_update and (
cookie_time is None or cookie_time < self.last_update_time
):
web.setcookie(
self.cookie_name,
self.last_update_time.isoformat(),
expires=self.expire_time,
)
return handler()
def is_timeout(self):
t = datetime.datetime.now()
dt = t - self.last_poll_time
return dt > self.timeout
def get_cookie_time(self):
cookies = web.cookies()
if self.cookie_name in cookies:
return self.parse_datetime(cookies[self.cookie_name])
def parse_datetime(self, datestr):
try:
return h.parse_datetime(datestr)
except ValueError:
return None
def reload(self):
"""Triggers on_new_version event for all the documents modified since last_poll_time."""
t = datetime.datetime.now()
reloaded = False
keys = []
for prefix in self.prefixes:
q = {
"key~": prefix + "*",
"last_modified>": self.last_poll_time.isoformat(),
"limit": 1000,
}
keys += web.ctx.site.things(q)
if keys:
web.ctx._invalidation_inprogress = True
docs = web.ctx.site.get_many(keys)
for doc in docs:
with contextlib.suppress(Exception):
client._run_hooks("on_new_version", doc)
self.last_update_time = max(doc.last_modified for doc in docs)
reloaded = True
del web.ctx._invalidation_inprogress
self.last_poll_time = t
return reloaded
class _InvalidationHook:
"""Infogami client hook to get notification on edits.
This sets a cookie when any of the documents under the given prefixes is modified.
"""
def __init__(self, prefixes, cookie_name, expire_time):
self.prefixes = prefixes
self.cookie_name = cookie_name
self.expire_time = expire_time
def __call__(self):
return self
def on_new_version(self, doc):
if web.ctx.get("_invalidation_inprogress"):
# This event is triggered from invalidation. ignore it.
return
if any(doc.key.startswith(prefix) for prefix in self.prefixes):
# The supplied does doesn't have the updated last_modified time.
# Fetch the document afresh to get the correct last_modified time.
doc = web.ctx.site.get(doc.key)
t = doc.last_modified
web.setcookie(self.cookie_name, t.isoformat(), expires=self.expire_time)
| 6,179 | Python | .py | 134 | 37.298507 | 100 | 0.659 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
342 | engine.py | internetarchive_openlibrary/openlibrary/core/lists/engine.py | """Utility functions for processing lists.
"""
import collections
import re
def reduce_seeds(values):
"""Function to reduce the seed values got from works db."""
d = {
"works": 0,
"editions": 0,
"ebooks": 0,
"last_update": "",
}
subject_processor = SubjectProcessor()
for v in values:
d["works"] += v[0]
d['editions'] += v[1]
d['ebooks'] += v[2]
d['last_update'] = max(d['last_update'], v[3])
subject_processor.add_subjects(v[4])
d['subjects'] = subject_processor.top_subjects()
return d
RE_SUBJECT = re.compile("[, _]+")
def get_seeds(work):
"""Returns all seeds of given work."""
def get_authors(work):
return [a['author'] for a in work.get('authors', []) if 'author' in a]
def _get_subject(subject, prefix):
if isinstance(subject, str):
key = prefix + RE_SUBJECT.sub("_", subject.lower()).strip("_")
return {"key": key, "name": subject}
def get_subjects(work):
subjects = [_get_subject(s, "subject:") for s in work.get("subjects", [])]
places = [_get_subject(s, "place:") for s in work.get("subject_places", [])]
people = [_get_subject(s, "person:") for s in work.get("subject_people", [])]
times = [_get_subject(s, "time:") for s in work.get("subject_times", [])]
d = {s['key']: s for s in subjects + places + people + times if s is not None}
return d.values()
def get(work):
yield work['key']
for a in get_authors(work):
yield a['key']
for e in work.get('editions', []):
yield e['key']
for s in get_subjects(work):
yield s['key']
return list(get(work))
class SubjectProcessor:
"""Processor to take a dict of subjects, places, people and times and build a list of ranked subjects."""
def __init__(self):
self.subjects = collections.defaultdict(list)
def add_subjects(self, subjects):
for s in subjects.get("subjects", []):
self._add_subject('subject:', s)
for s in subjects.get("people", []):
self._add_subject('person:', s)
for s in subjects.get("places", []):
self._add_subject('place:', s)
for s in subjects.get("times", []):
self._add_subject('time:', s)
def _add_subject(self, prefix, name):
if s := self._get_subject(prefix, name):
self.subjects[s['key']].append(s['name'])
def _get_subject(self, prefix, subject_name):
if isinstance(subject_name, str):
key = prefix + RE_SUBJECT.sub("_", subject_name.lower()).strip("_")
return {"key": key, "name": subject_name}
def _most_used(self, seq):
d = collections.defaultdict(lambda: 0)
for x in seq:
d[x] += 1
return sorted(d, key=lambda k: d[k], reverse=True)[0]
def top_subjects(self, limit=100):
subjects = [
{"key": key, "name": self._most_used(names), "count": len(names)}
for key, names in self.subjects.items()
]
subjects.sort(key=lambda s: s['count'], reverse=True)
return subjects[:limit]
| 3,220 | Python | .py | 78 | 33 | 109 | 0.568722 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
343 | model.py | internetarchive_openlibrary/openlibrary/core/lists/model.py | """Helper functions used by the List model.
"""
from collections.abc import Iterable
from functools import cached_property
from typing import TypedDict, cast
import web
import logging
from infogami import config
from infogami.infobase import client, common
from infogami.utils import stats
from openlibrary.core import helpers as h
from openlibrary.core import cache
from openlibrary.core.models import Image, Subject, Thing, ThingKey, ThingReferenceDict
from openlibrary.plugins.upstream.models import Author, Changeset, Edition, User, Work
from openlibrary.plugins.worksearch.search import get_solr
from openlibrary.plugins.worksearch.subjects import get_subject
import contextlib
logger = logging.getLogger("openlibrary.lists.model")
SeedSubjectString = str
"""
When a subject is added to a list, it's added as a string like:
- "subject:foo"
- "person:floyd_heywood"
"""
class AnnotatedSeedDict(TypedDict):
"""
The JSON friendly version of an annotated seed.
"""
thing: ThingReferenceDict
notes: str
class AnnotatedSeed(TypedDict):
"""
The database/`Thing` friendly version of an annotated seed.
"""
thing: Thing
notes: str
class AnnotatedSeedThing(Thing):
"""
Note: This isn't a real `Thing` type! This will never be constructed
or returned. It's just here to illustrate that when we get seeds from
the db, they're wrapped in this weird `Thing` object, which will have
a _data field that is the raw JSON data. That JSON data will conform
to the `AnnotatedSeedDict` type.
"""
key: None # type: ignore[assignment]
_data: AnnotatedSeed
class List(Thing):
"""Class to represent /type/list objects in OL.
List contains the following properties, theoretically:
* cover - id of the book cover. Picked from one of its editions.
* tags - list of tags to describe this list.
"""
name: str | None
"""Name of the list"""
description: str | None
"""Detailed description of the list (markdown)"""
seeds: list[Thing | SeedSubjectString | AnnotatedSeedThing]
"""Members of the list. Either references or subject strings."""
def url(self, suffix="", **params):
return self.get_url(suffix, **params)
def get_url_suffix(self):
return self.name or "unnamed"
def get_owner(self) -> User | None:
if match := web.re_compile(r"(/people/[^/]+)/lists/OL\d+L").match(self.key):
key = match.group(1)
return cast(User, self._site.get(key))
else:
return None
def get_cover(self):
"""Returns a cover object."""
return self.cover and Image(self._site, "b", self.cover)
def get_tags(self):
"""Returns tags as objects.
Each tag object will contain name and url fields.
"""
return [web.storage(name=t, url=self.key + "/tags/" + t) for t in self.tags]
def add_seed(
self, seed: ThingReferenceDict | AnnotatedSeedDict | SeedSubjectString
):
"""Adds a new seed to this list."""
seed_object = Seed.from_json(self, seed)
if self._index_of_seed(seed_object.key) >= 0:
return False
else:
self.seeds = self.seeds or []
self.seeds.append(seed_object.to_db())
return True
def remove_seed(
self, seed: ThingReferenceDict | AnnotatedSeedDict | SeedSubjectString
):
"""Removes a seed for the list."""
seed_key = Seed.from_json(self, seed).key
if (index := self._index_of_seed(seed_key)) >= 0:
self.seeds.pop(index)
return True
else:
return False
def _index_of_seed(self, seed_key: str) -> int:
for i, s in enumerate(self._get_seed_strings()):
if s == seed_key:
return i
return -1
def __repr__(self):
return f"<List: {self.key} ({self.name!r})>"
def _get_seed_strings(self) -> list[SeedSubjectString | ThingKey]:
return [seed.key for seed in self.get_seeds()]
@cached_property
def last_update(self):
last_updates = [seed.last_update for seed in self.get_seeds()]
last_updates = [x for x in last_updates if x]
if last_updates:
return max(last_updates)
else:
return None
@property
def seed_count(self):
return len(self.seeds)
def preview(self):
"""Return data to preview this list.
Used in the API.
"""
return {
"url": self.key,
"full_url": self.url(),
"name": self.name or "",
"seed_count": self.seed_count,
"last_update": self.last_update and self.last_update.isoformat() or None,
}
def get_work_keys(self) -> Iterable[ThingKey]:
"""
Gets the keys of the works in this list, or of the works of the editions in
this list. May return duplicates.
"""
return (
(seed.document.works[0].key if seed.document.works else seed.key)
for seed in self.get_seeds()
if seed.key.startswith(('/books/', '/works/'))
)
def get_editions(self) -> Iterable[Edition]:
"""Returns the editions objects belonging to this list."""
for seed in self.get_seeds():
if (
isinstance(seed.document, Thing)
and seed.document.type.key == "/type/edition"
):
yield cast(Edition, seed.document)
def get_export_list(self) -> dict[str, list[dict]]:
"""Returns all the editions, works and authors of this list in arbitrary order.
The return value is an iterator over all the entries. Each entry is a dictionary.
This works even for lists with too many seeds as it doesn't try to
return entries in the order of last-modified.
"""
# Make one db call to fetch fully loaded Thing instances. By
# default they are 'shell' instances that dynamically get fetched
# as you access their attributes.
things = cast(
list[Thing],
web.ctx.site.get_many(
[seed.key for seed in self.seeds if isinstance(seed, Thing)]
),
)
# Create the return dictionary
return {
"editions": [
thing.dict() for thing in things if isinstance(thing, Edition)
],
"works": [thing.dict() for thing in things if isinstance(thing, Work)],
"authors": [thing.dict() for thing in things if isinstance(thing, Author)],
}
def _preload(self, keys):
keys = list(set(keys))
return self._site.get_many(keys)
def preload_works(self, editions):
return self._preload(w.key for e in editions for w in e.get('works', []))
def preload_authors(self, editions):
works = self.preload_works(editions)
return self._preload(
a.author.key for w in works for a in w.get("authors", []) if "author" in a
)
def load_changesets(self, editions):
"""Adds "recent_changeset" to each edition.
The recent_changeset will be of the form:
{
"id": "...",
"author": {
"key": "..",
"displayname", "..."
},
"timestamp": "...",
"ip": "...",
"comment": "..."
}
"""
for e in editions:
if "recent_changeset" not in e:
with contextlib.suppress(IndexError):
e['recent_changeset'] = self._site.recentchanges(
{"key": e.key, "limit": 1}
)[0]
def _get_solr_query_for_subjects(self):
terms = [seed.get_solr_query_term() for seed in self.get_seeds()]
return " OR ".join(t for t in terms if t)
def _get_all_subjects(self):
solr = get_solr()
q = self._get_solr_query_for_subjects()
# Solr has a maxBooleanClauses constraint there too many seeds, the
if len(self.seeds) > 500:
logger.warning(
"More than 500 seeds. skipping solr query for finding subjects."
)
return []
facet_names = ['subject_facet', 'place_facet', 'person_facet', 'time_facet']
try:
result = solr.select(
q, fields=[], facets=facet_names, facet_limit=20, facet_mincount=1
)
except OSError:
logger.error(
"Error in finding subjects of list %s", self.key, exc_info=True
)
return []
def get_subject_prefix(facet_name):
name = facet_name.replace("_facet", "")
if name == 'subject':
return ''
else:
return name + ":"
def process_subject(facet_name, title, count):
prefix = get_subject_prefix(facet_name)
key = prefix + title.lower().replace(" ", "_")
url = "/subjects/" + key
return web.storage(
{"title": title, "name": title, "count": count, "key": key, "url": url}
)
def process_all():
facets = result['facets']
for k in facet_names:
for f in facets.get(k, []):
yield process_subject(f.name, f.value, f.count)
return sorted(process_all(), reverse=True, key=lambda s: s["count"])
def get_subjects(self, limit=20):
def get_subject_type(s):
if s.url.startswith("/subjects/place:"):
return "places"
elif s.url.startswith("/subjects/person:"):
return "people"
elif s.url.startswith("/subjects/time:"):
return "times"
else:
return "subjects"
d = web.storage(subjects=[], places=[], people=[], times=[])
for s in self._get_all_subjects():
kind = get_subject_type(s)
if len(d[kind]) < limit:
d[kind].append(s)
return d
def get_seeds(self, sort=False, resolve_redirects=False) -> list['Seed']:
seeds: list[Seed] = []
for s in self.seeds:
seed = Seed.from_db(self, s)
max_checks = 10
while resolve_redirects and seed.type == 'redirect' and max_checks:
seed = Seed(self, web.ctx.site.get(seed.document.location))
max_checks -= 1
seeds.append(seed)
if sort:
seeds = h.safesort(seeds, reverse=True, key=lambda seed: seed.last_update)
return seeds
def has_seed(self, seed: ThingReferenceDict | SeedSubjectString) -> bool:
if isinstance(seed, dict):
seed = seed['key']
return seed in self._get_seed_strings()
# cache the default_cover_id for 60 seconds
@cache.memoize(
"memcache", key=lambda self: ("d" + self.key, "default-cover-id"), expires=60
)
def _get_default_cover_id(self):
for s in self.get_seeds():
cover = s.get_cover()
if cover:
return cover.id
def get_default_cover(self):
from openlibrary.core.models import Image
cover_id = self._get_default_cover_id()
return Image(self._site, 'b', cover_id)
# These functions cache and retrieve the 'my lists' section for mybooks.
@cache.memoize(
"memcache",
key=lambda self: 'core.patron_lists.%s' % web.safestr(self.key),
expires=60 * 10,
)
def get_patron_showcase(self, limit=3):
return self._get_uncached_patron_showcase(limit=limit)
def _get_uncached_patron_showcase(self, limit=3):
title = self.name or "Unnamed List"
n_covers = []
seeds = self.get_seeds()
for seed in seeds[:limit]:
if cover := seed.get_cover():
n_covers.append(cover.url("s"))
else:
n_covers.append(False)
last_modified = self.last_update
return {
'title': title,
'count': self.seed_count,
'covers': n_covers,
'last_mod': (
last_modified.isoformat(sep=' ', timespec="minutes")
if self.seed_count != 0
else ""
),
}
class Seed:
"""Seed of a list.
Attributes:
* last_update
* type - "edition", "work" or "subject"
* document - reference to the edition/work document
* title
* url
* cover
"""
key: ThingKey | SeedSubjectString
value: Thing | SeedSubjectString
notes: str | None = None
def __init__(
self,
list: List,
value: Thing | SeedSubjectString | AnnotatedSeed,
):
self._list = list
self._type = None
if isinstance(value, str):
self.key = value
self.value = value
self._type = "subject"
elif isinstance(value, dict):
# AnnotatedSeed
self.key = value['thing'].key
self.value = value['thing']
self.notes = value['notes']
else:
self.key = value.key
self.value = value
@staticmethod
def from_db(list: List, seed: Thing | SeedSubjectString) -> 'Seed':
if isinstance(seed, str):
return Seed(list, seed)
# If there is a cache miss, `seed` is a client.Thing.
# See https://github.com/internetarchive/openlibrary/issues/8882#issuecomment-1983844076
elif isinstance(seed, Thing | client.Thing):
if seed.key is None:
return Seed(list, cast(AnnotatedSeed, seed._data))
else:
return Seed(list, seed)
else:
raise ValueError(f"Invalid seed: {seed!r}")
@staticmethod
def from_json(
list: List,
seed_json: SeedSubjectString | ThingReferenceDict | AnnotatedSeedDict,
):
if isinstance(seed_json, dict):
if 'thing' in seed_json:
annotated_seed = cast(AnnotatedSeedDict, seed_json) # Appease mypy
return Seed(
list,
{
'thing': Thing(
list._site, annotated_seed['thing']['key'], None
),
'notes': annotated_seed['notes'],
},
)
elif 'key' in seed_json:
thing_ref = cast(ThingReferenceDict, seed_json) # Appease mypy
return Seed(
list,
{
'thing': Thing(list._site, thing_ref['key'], None),
'notes': '',
},
)
return Seed(list, seed_json)
def to_db(self) -> Thing | SeedSubjectString:
"""
Returns a db-compatible (I.e. Thing) representation of the seed.
"""
if isinstance(self.value, str):
return self.value
if self.notes:
return Thing(
self._list._site,
None,
{
'thing': self.value,
'notes': self.notes,
},
)
else:
return self.value
def to_json(self) -> SeedSubjectString | ThingReferenceDict | AnnotatedSeedDict:
if isinstance(self.value, str):
return self.value
elif self.notes:
return {
'thing': {'key': self.key},
'notes': self.notes,
}
else:
return {'key': self.key}
@cached_property
def document(self) -> Subject | Thing:
if isinstance(self.value, str):
return get_subject(self.get_subject_url(self.value))
else:
return self.value
def get_solr_query_term(self):
if self.type == 'subject':
typ, value = self.key.split(":", 1)
# escaping value as it can have special chars like : etc.
value = get_solr().escape(value)
return f"{typ}_key:{value}"
else:
doc_basekey = self.document.key.split("/")[-1]
if self.type == 'edition':
return f"edition_key:{doc_basekey}"
elif self.type == 'work':
return f'key:/works/{doc_basekey}'
elif self.type == 'author':
return f"author_key:{doc_basekey}"
else:
logger.warning(
f"Cannot get solr query term for seed type {self.type}",
extra={'list': self._list.key, 'seed': self.key},
)
return None
@cached_property
def type(self) -> str:
if self._type:
return self._type
key = self.document.type.key
if key in ("/type/author", "/type/edition", "/type/redirect", "/type/work"):
return key.split("/")[-1]
return "unknown"
@property
def title(self) -> str:
if self.type in ("work", "edition"):
return self.document.title or self.key
elif self.type == "author":
return self.document.name or self.key
elif self.type == "subject":
return self.key.replace("_", " ")
else:
return self.key
@property
def url(self):
if self.document:
return self.document.url()
else:
if self.key.startswith("subject:"):
return "/subjects/" + web.lstrips(self.key, "subject:")
else:
return "/subjects/" + self.key
def get_subject_url(self, subject: SeedSubjectString) -> str:
if subject.startswith("subject:"):
return "/subjects/" + web.lstrips(subject, "subject:")
else:
return "/subjects/" + subject
def get_cover(self):
if self.type in ['work', 'edition']:
return self.document.get_cover()
elif self.type == 'author':
return self.document.get_photo()
elif self.type == 'subject':
return self.document.get_default_cover()
else:
return None
@cached_property
def last_update(self):
return self.document.get('last_modified')
def dict(self):
if self.type == "subject":
url = self.url
full_url = self.url
else:
url = self.key
full_url = self.url
d = {
"url": url,
"full_url": full_url,
"type": self.type,
"title": self.title,
"last_update": self.last_update and self.last_update.isoformat() or None,
}
if cover := self.get_cover():
d['picture'] = {"url": cover.url("S")}
return d
def __repr__(self):
return f"<seed: {self.type} {self.key}>"
__str__ = __repr__
class ListChangeset(Changeset):
def get_added_seed(self):
added = self.data.get("add")
if added and len(added) == 1:
return self.get_seed(added[0])
def get_removed_seed(self):
removed = self.data.get("remove")
if removed and len(removed) == 1:
return self.get_seed(removed[0])
def get_list(self) -> List:
return self.get_changes()[0]
def get_seed(self, seed):
"""Returns the seed object."""
if isinstance(seed, dict):
seed = self._site.get(seed['key'])
return Seed.from_db(self.get_list(), seed)
def register_models():
client.register_thing_class('/type/list', List)
client.register_changeset_class('lists', ListChangeset)
| 19,851 | Python | .py | 521 | 27.71977 | 96 | 0.557006 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
344 | matchers.py | internetarchive_openlibrary/openlibrary/records/matchers.py | """
Matchers
========
This module contains a list of functions that are used to search for
records in the database.
Each function will receive a dictionary that contains the search
parameters. This shouldn't be modified (make a copy if you have to
modify it). It can do whatever kinds of searches it wants to and then
should return an iterable of keys of matched things.
The `match_functions` is a list of functions in order of running. To
reduce computation, it's a good idea to put the more accurately
matching ones which require less queries on the top (e.g. exact ISBN
searches etc.). Adding a new matcher means creating a new function and
then adding the function to this list.
"""
import copy
from collections import defaultdict
import logging as Logging
from infogami import config
from openlibrary.utils.solr import Solr
import web
logger = Logging.getLogger(__name__)
def match_isbn(params):
"Search by ISBN for exact matches"
if "isbn" in params.get("identifiers", {}):
isbns = params["identifiers"]["isbn"]
q = {'type': '/type/edition', 'isbn_': [str(x) for x in isbns]}
logger.debug("ISBN query : %s", q)
ekeys = list(web.ctx.site.things(q))
if ekeys:
return ekeys
return []
def match_identifiers(params):
"Match by identifiers"
print(params)
counts = defaultdict(int)
identifiers = copy.deepcopy(params.get("identifiers", {}))
for i in ["oclc_numbers", "lccn", "ocaid"]:
if i in identifiers:
val = identifiers.pop(i)
query = {'type': '/type/edition', i: val}
matches = web.ctx.site.things(query)
for i in matches:
counts[i] += 1
for k, v in identifiers.items(): # Rest of the identifiers
print("Trying ", k, v)
query = {'type': '/type/edition', 'identifiers': {k: v}}
matches = web.ctx.site.things(query)
for i in matches:
counts[i] += 1
return sorted(counts, key=counts.__getitem__, reverse=True)
def match_tap_infogami(params):
"Search infogami using title, author and publishers"
return []
def match_tap_solr(params):
"""Search solr for works using title and author and narrow using
publishers.
Note:
This function is ugly and the idea is to contain ugliness here
itself so that it doesn't leak into the rest of the library.
"""
# First find author keys. (if present in query) (TODO: This could be improved)
# if "authors" in params:
# q = 'name:(%s) OR alternate_names:(%s)' % (name, name)
return []
match_functions = [match_isbn, match_identifiers, match_tap_infogami, match_tap_solr]
| 2,688 | Python | .py | 66 | 35.484848 | 85 | 0.679754 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
345 | functions.py | internetarchive_openlibrary/openlibrary/records/functions.py | """
Functions which are used by the records package. The two public ones
are `search` and `create` which are callable from the outside world.
"""
import copy
import web
from openlibrary.catalog.add_book import normalize
from openlibrary.core.models import Thing
import openlibrary.core.helpers as h
class NoQueryParam(KeyError):
"""
Exception that is used internally when a find_by_X function is
called but no X parameters were provided.
"""
pass
def search(params):
"""
Takes a search parameter and returns a result set
Input:
------
{'doc': {'authors': [{'name': 'Arthur Conan Doyle'}],
'identifiers': {'isbn': ['1234567890']},
'title': 'A study in Scarlet'}}
Output:
-------
{'doc': {'authors': [
{
'key': '/authors/OL1A',
'name': 'Arthur Conan Doyle'
}
],
'identifiers': {'isbn': ['1234567890']},
'key': '/books/OL1M',
'title': 'A study in Scarlet'
'work' : { 'key' : '/works/OL1W'}
},
'matches': [{'edition': '/books/OL1M', 'work': '/works/OL1W'},
{'edition': None, 'work': '/works/OL234W'}]}
'doc' is the best fit match. It contains only the keys that were
provided as input and one extra key called 'key' which will be
openlibrary identifier if one was found or None if nothing was.
There will be two extra keys added to the 'doc'.
1. 'work' which is a dictionary with a single element 'key' that
contains a link to the work of the matched edition.
2. 'authors' is a list of dictionaries each of which contains an
element 'key' that links to the appropriate author.
If a work, author or an edition is not matched, the 'key' at that
level will be None.
To update fields in a record, add the extra keys to the 'doc' and
send the resulting structure to 'create'.
'matches' contain a list of possible matches ordered in
decreasing order of certainty. The first one will be same as
'doc' itself.
TODO: Things to change
1. For now, if there is a work match, the provided authors
will be replaced with the ones that are stored.
"""
params = copy.deepcopy(params)
doc = params.pop("doc")
matches = []
# TODO: We are looking only at edition searches here. This should be expanded to works.
if "isbn" in doc.get('identifiers', {}):
matches.extend(find_matches_by_isbn(doc['identifiers']['isbn']))
if "identifiers" in doc:
d = find_matches_by_identifiers(doc['identifiers'])
matches.extend(d['all'])
matches.extend(
d['any']
) # TODO: These are very poor matches. Maybe we should put them later.
if "publisher" in doc or "publish_date" in doc or "title" in doc:
matches.extend(find_matches_by_title_and_publishers(doc))
return massage_search_results(matches, doc)
def find_matches_by_isbn(isbns):
"Find matches using isbns."
q = {'type': '/type/edition', 'isbn_': str(isbns[0])}
print("ISBN query : ", q)
if ekeys := list(web.ctx.site.things(q)):
return ekeys[:1] # TODO: We artificially match only one item here
else:
return []
def find_matches_by_identifiers(identifiers):
"""Find matches using all the identifiers in the given doc.
We consider only oclc_numbers, lccn and ocaid. isbn is dealt with
separately.
Will return two lists of matches:
all : List of items that match all the given identifiers (better
matches).
any : List of items that match any of the given identifiers
(poorer matches).
"""
identifiers = copy.deepcopy(identifiers)
# Find matches that match everything.
q = {'type': '/type/edition'}
for i in ["oclc_numbers", "lccn", "ocaid"]:
if i in identifiers:
q[i] = identifiers[i]
matches_all = web.ctx.site.things(q)
# Find matches for any of the given parameters and take the union
# of all such matches
matches_any = set()
for i in ["oclc_numbers", "lccn", "ocaid"]:
q = {'type': '/type/edition'}
if i in identifiers:
q[i] = identifiers[i]
matches_any.update(web.ctx.site.things(q))
matches_any = list(matches_any)
return {"all": matches_all, "any": matches_any}
def find_matches_by_title_and_publishers(doc):
"Find matches using title and author in the given doc"
# TODO: Use normalised_title instead of the regular title
# TODO: Use catalog.add_book.load_book:build_query instead of this
q = {'type': '/type/edition'}
for key in ["title", 'publishers', 'publish_date']:
if key in doc:
q[key] = doc[key]
ekeys = web.ctx.site.things(q)
return ekeys
def massage_search_results(things, input_query=None):
"""Converts list of things into the output expected by users of the search API.
If input_query is non empty, narrow return keys to the ones in
this dictionary. Also, if the keys list is empty, use this to
construct a response with key = None.
"""
input_query = input_query or {} # Avoid a mutable default argument
if things:
best = things[0]
doc = thing_to_doc(best, list(input_query))
matches = things_to_matches(things)
else:
doc = build_create_input(input_query)
matches = [{"edition": None, "work": None}]
return {'doc': doc, 'matches': matches}
def build_create_input(params):
params['key'] = None
params['type'] = '/type/edition'
params['work'] = {'key': None}
params['authors'] = [
{'name': x['name'], 'key': None} for x in params.get('authors', [])
]
return params
def edition_to_doc(thing):
"""Converts an edition document from infobase into a 'doc' used by
the search API.
"""
doc = thing.dict()
# Process identifiers
identifiers = doc.get("identifiers", {})
for i in ["oclc_numbers", "lccn", "ocaid"]:
if i in doc:
identifiers[i] = doc.pop(i)
for i in ["isbn_10", "isbn_13"]:
if i in doc:
identifiers.setdefault('isbn', []).extend(doc.pop(i))
doc['identifiers'] = identifiers
# TODO : Process classifiers here too
# Unpack works and authors
if "works" in doc:
work = doc.pop("works")[0]
doc['work'] = work
authors = [{'key': str(x.author)} for x in thing.works[0].authors]
doc['authors'] = authors
return doc
def work_to_doc(thing):
"""
Converts the given work into a 'doc' used by the search API.
"""
doc = thing.dict()
# Unpack works and authors
authors = [{'key': x.author.key} for x in thing.authors]
doc['authors'] = authors
return doc
def author_to_doc(thing):
return thing.dict()
def thing_to_doc(thing, keys=None):
"""Converts an infobase 'thing' into an entry that can be used in
the 'doc' field of the search results.
If keys provided, it will remove all keys in the item except the
ones specified in the 'keys'.
"""
if not isinstance(thing, Thing):
thing = web.ctx.site.get(thing)
keys = keys or []
typ = str(thing['type'])
processors = {
'/type/edition': edition_to_doc,
'/type/work': work_to_doc,
'/type/author': author_to_doc,
}
doc = processors[typ](thing)
# Remove version info
for i in ['latest_revision', 'last_modified', 'revision']:
if i in doc:
doc.pop(i)
# Unpack 'type'
doc['type'] = doc['type']['key']
if keys:
keys += ['key', 'type', 'authors', 'work']
keys = set(keys)
for i in list(doc):
if i not in keys:
doc.pop(i)
return doc
def things_to_matches(things):
"""Converts a list of things into a list of 'matches' used by the search API"""
matches = []
for thing in things:
if not isinstance(thing, Thing):
thing = web.ctx.site.get(thing)
key = thing['key']
if key.startswith("/books"):
edition = key
work = thing.works[0].key
if key.startswith("/works"):
work = key
edition = None
matches.append({"edition": edition, "work": work})
return matches
# Creation/updation entry point
def create(records):
"""
Creates one or more new records in the system.
TODO: Describe Input/output
"""
if doc := records["doc"]:
things = doc_to_things(copy.deepcopy(doc))
web.ctx.site.save_many(things, 'Import new records.')
return [thing['key'] for thing in things]
# Creation helpers
def edition_doc_to_things(doc):
"""
unpack identifiers, classifiers
Process work and author fields if present
"""
retval = []
# Unpack identifiers
identifiers = doc.get("identifiers", {})
for i in ["oclc_numbers", "isbn_10", "isbn_13", "lccn", "ocaid"]:
if i in identifiers:
doc[i] = identifiers.pop(i)
if "isbn" in identifiers:
isbns = identifiers.pop("isbn")
isbn_10 = [x for x in isbns if len(x) == 10]
isbn_13 = [x for x in isbns if len(x) == 13]
if isbn_10:
doc["isbn_10"] = isbn_10
if isbn_13:
doc["isbn_13"] = isbn_13
# TODO: Unpack classifiers
work = authors = None
if 'work' in doc:
work = doc.pop('work')
work['type'] = '/type/work'
work = doc_to_things(work)
retval.extend(work)
if 'authors' in doc:
authors = doc.pop('authors')
for i in authors:
i['type'] = '/type/author'
a = []
for i in authors:
a.extend(doc_to_things(i))
retval.extend(a)
authors = a
# Attach authors to the work
# TODO: Consider updation here?
if work and authors:
for i in authors:
a = {
'type': '/type/author_role',
'author': i['key'],
} # TODO : Check this with Anandb
work[0].setdefault('authors', []).append(
a
) # Attach this author to the work
return retval
def work_doc_to_things(doc):
new_things = []
# Ugly hack to prevent Things from being processed
if 'authors' in doc and all(isinstance(x, dict) for x in doc['authors']):
authors = doc['authors']
author_entries = []
for i in authors:
i['type'] = '/type/author'
new_author = doc_to_things(i)
new_things.extend(new_author)
a = {
'type': '/type/author_role',
'author': new_author[0]['key'],
} # TODO : Check this with Anandb
author_entries.append(a)
doc['authors'] = author_entries
return new_things
def author_doc_to_things(doc):
return []
def doc_to_things(doc):
"""
Receives a 'doc' (what the search API returns and receives) and
returns a list of dictionaries that can be added into infobase.
Expects the `type` to figure out type of object.
Has separate sub functions to convert editions, works and
authors. Logic is different for these three.
This function will call itself for the 'work' and 'authors' fields
if present.
If the doc has a 'key', the thing corresponding to that key will
be fetched from the database and the fields of the original doc
updated.
If the doc doesn't have a key, the function will call
web.ctx.site.new_key, generate one for it and add that as the key.
"""
retval = []
doc = copy.deepcopy(doc)
key = doc.get('key')
typ = doc['type']
# Handle key creation and updation of data
if key:
db_thing = web.ctx.site.get(key).dict()
# Remove extra version related fields
for i in ('latest_revision', 'last_modified', 'revision'):
if i in db_thing:
db_thing.pop(i)
for i in list(db_thing):
if i in doc:
db_thing.pop(i)
doc.update(db_thing)
else:
key = web.ctx.site.new_key(typ)
doc['key'] = key
# Type specific processors
processors = {
'/type/edition': edition_doc_to_things,
'/type/work': work_doc_to_things,
'/type/author': author_doc_to_things,
}
extras = processors[typ](doc)
retval.append(doc)
retval.extend(extras)
return retval
| 12,612 | Python | .py | 337 | 29.967359 | 91 | 0.606038 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
346 | driver.py | internetarchive_openlibrary/openlibrary/records/driver.py | """
Low level import API
====================
The Low level import API functions has 2 stages.
1. Matchers
-----------
The matchers are functions present in the ``matchers`` module and
exposed via the ``match_functions`` list. These are functions that try
to search for entries in the database that match the input criteria in
various ways. The results of all the matchings are chained into a
single iterable and returned.
The matching is done liberally. The idea is to not miss any
records. We might have extra records that are bad matches.
The idea is to isolate the ugliness and complexity of the searches
into a small set of functions inside a single module. The rest of the
API can be clean and understandable then.
2. Filter
---------
The filter is a function that consumes the output of the matchers and
discards any items that are bad matches. This narrows the list of
matches and then returns the list of good ones.
Finally, the list of matched keys are massaged into the proper output
expected from the search API and returned to the client.
"""
import itertools
import logging as Logging
import web
from .functions import massage_search_results, thing_to_doc
from .matchers import match_functions
logger = Logging.getLogger("openlibrary.importapi")
def search(params):
params = params["doc"]
matched_keys = run_matchers(params)
filtered_keys = run_filter(matched_keys, params)
return massage_search_results(list(filtered_keys))
def run_matchers(params):
"""
Run all the matchers in the match_functions list and produce a list of keys which match the
"""
keys = []
for i in match_functions:
logger.debug("Running %s", i.__name__)
keys.append(i(params))
return itertools.chain.from_iterable(keys)
def run_filter(matched_keys, params):
"""
Will check all the matched keys for the following conditions and
emit only the ones that pass all of them.
This function compensates for the permissiveness of the matchers.
The rules are as follows
1. All the fields provided in params should either be matched or
missing in the record.
2. In case of the title and author, if provided in params, it
*should* match (absence is not acceptable).
TODO: Don't create if title missing
*match* needn't mean an exact match. This is especially true for
publishers and such ('Dover publishers' and 'Dover' are
equivalent).
"""
def compare(i1, i2):
"""Compares `i1` to see if it matches `i2`
according to the rules stated above.
`i1` is originally the `thing` and `i2` the search parameters.
"""
if i1 == i2: # Trivially the same
return True
if isinstance(i1, list) and isinstance(i2, list):
# i2 should be a subset of i1. Can't use plain old set
# operations since we have to match recursively using
# compare
for i in i2:
matched = False
for j in i1:
if compare(i, j):
matched = True
break
if not matched: # A match couldn't be found for at least one element
logger.debug("Couldn't match %s in %s", i, i1)
return False
return True
if isinstance(i1, dict) and isinstance(i2, dict):
# Every key in i2 should either be in i1 and matching
# OR
# In case of the 'title' and 'authors', if it's there in
# the search params, it *should* match.
for k in i2:
if k in {"title", "authors"}:
# Special case title and authors. Return False if not present in thing
# TODO: Convert author names to keys.
if k not in i1 or not compare(i1[k], i2[k]):
return False
elif k in i1:
# Recursively match for other keys
if compare(i1[k], i2[k]):
pass
else:
return False
else:
return False
return True
return False
docs = (thing_to_doc(web.ctx.site.get(x)) for x in matched_keys)
return itertools.imap(
lambda x: web.ctx.site.get(x['key']),
itertools.ifilter(lambda y: compare(y, params), docs),
)
| 4,481 | Python | .py | 106 | 33.59434 | 95 | 0.631361 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
347 | test_functions.py | internetarchive_openlibrary/openlibrary/records/tests/test_functions.py | import json
import pytest
from ..functions import (
doc_to_things,
search,
create,
thing_to_doc,
things_to_matches,
find_matches_by_isbn,
find_matches_by_identifiers,
find_matches_by_title_and_publishers,
massage_search_results,
)
def same_dict(a, b):
"""
def same_dict(a: dict, b: dict) -> bool:
Temporary utility function because of different dict key order in Python 2 and 3
"""
key = 'created'
b[key] = b.get(key, a[key])
return a == b or json.dumps(a, sort_keys=True) == json.dumps(b, sort_keys=True)
def populate_infobase(site):
"Dumps some documents into infobase"
# Create two authors
atype = '/type/author'
akey0 = site.new_key(atype)
a0 = {'name': 'Test author 1', 'type': {'key': atype}, 'key': akey0}
akey1 = site.new_key(atype)
a1 = {'name': 'Test author 1', 'type': {'key': atype}, 'key': akey1}
# Create a work
wtype = '/type/work'
wkey = site.new_key(wtype)
w = {
'title': 'test1',
'type': {'key': wtype},
'key': wkey,
'authors': [{'author': a0}, {'author': a1}],
}
site.save(w)
# Create two editions for this work
editions = []
etype = '/type/edition'
for i in range(2):
ekey = site.new_key(etype)
e = {
'title': 'test1',
'type': {'key': etype},
'lccn': ['123%d' % i],
'oclc_numbers': ['456%d' % i],
'key': ekey,
'ocaid': "12345%d" % i,
'isbn_10': ["123456789%d" % i],
"works": [{"key": wkey}],
}
site.save(e)
editions.append(ekey)
# Now create a work without any edition
wkey = site.new_key(wtype)
w = {
'title': 'editionless',
'type': {'key': wtype},
'key': wkey,
}
site.save(w)
def test_doc_to_thing_adds_key_to_edition(mock_site):
"Test whether doc_to_things adds a key to an edition"
doc = {'type': '/type/edition'}
thing = doc_to_things(doc)
assert 'key' in thing[0]
assert thing[0]['key'] == '/books/OL1M'
def test_doc_to_thing_adds_key_to_work(mock_site):
"Test whether doc_to_things adds a key to a work"
doc = {'type': '/type/work'}
thing = doc_to_things(doc)
assert 'key' in thing[0]
assert thing[0]['key'] == '/works/OL1W'
def test_doc_to_thing_adds_key_to_author(mock_site):
"Test whether doc_to_things adds a key to an author"
doc = {'type': '/type/author'}
thing = doc_to_things(doc)
assert 'key' in thing[0]
assert thing[0]['key'] == '/authors/OL1A'
def test_doc_to_thing_updation_of_edition(mock_site):
"Tests whether edition records are populated with fields from the database"
populate_infobase(mock_site)
doc = {'type': '/type/edition', 'key': '/books/OL1M'}
thing = doc_to_things(doc)
expected = {
'title': 'test1',
'lccn': ['1230'],
'isbn_10': ['1234567890'],
'key': '/books/OL1M',
'ocaid': '123450',
'oclc_numbers': ['4560'],
'works': [{'key': '/works/OL1W'}],
'type': '/type/edition',
}
assert same_dict(thing[0], expected)
def test_doc_to_thing_updation_of_work(mock_site):
"Tests whether work records are populated with fields from the database"
populate_infobase(mock_site)
doc = {'type': '/type/work', 'key': '/works/OL1W'}
thing = doc_to_things(doc)
authors = thing[0].pop('authors')
expected = {'type': '/type/work', 'key': '/works/OL1W', 'title': 'test1'}
assert same_dict(thing[0], expected)
assert {i['author'] for i in authors} == {'/authors/OL3A', '/authors/OL4A'}
def test_doc_to_thing_unpack_work_and_authors_from_edition(mock_site):
"Tests if the 'work' and 'author' fields in a an edition doc are unpacked and converted."
doc = {
'type': '/type/edition',
'work': {'title': 'Test title for work'},
'authors': [{'name': 'Test author'}],
}
things = doc_to_things(doc)
expected = [
{'key': '/books/OL1M', 'type': '/type/edition'}, # The edition
{
'authors': [{'author': '/authors/OL1A', 'type': '/type/author_role'}],
'key': '/works/OL1W',
'title': 'Test title for work',
'type': '/type/work',
}, # The work
{
'key': '/authors/OL1A',
'name': 'Test author',
'type': '/type/author',
}, # The author
]
assert expected == things
def test_doc_to_thing_unpack_authors_from_work(mock_site):
"Tests if the 'authors' fields in a work doc are unpacked and converted."
doc = {
'type': '/type/work',
'title': 'This is a test book',
'authors': [{'name': 'Test author'}],
}
things = doc_to_things(doc)
expected = [
{
'authors': [{'author': '/authors/OL1A', 'type': '/type/author_role'}],
'key': '/works/OL1W',
'title': 'This is a test book',
'type': '/type/work',
}, # The work
{
'key': '/authors/OL1A',
'name': 'Test author',
'type': '/type/author',
}, # The author
]
assert expected == things
def test_doc_to_thing_unpack_identifiers(mock_site):
"Tests if the identifiers are unpacked from an edition"
doc = {
'type': '/type/edition',
'identifiers': {
"oclc_numbers": ['1234'],
"isbn_10": ['1234567890'],
"isbn_13": ['1234567890123'],
"lccn": ['5678'],
"ocaid": ['90'],
},
}
things = doc_to_things(doc)
for k, v in doc['identifiers'].items():
assert things[0][k] == v
def test_create(mock_site):
"Tests the create API"
doc = {
'type': '/type/edition',
'publisher': "Test publisher",
'work': {'title': 'Test title for work'},
'authors': [{'name': 'Test author'}],
'identifiers': {
"oclc_numbers": ['1234'],
"isbn_10": ['1234567890'],
"isbn_13": ['1234567890123'],
"lccn": ['5678'],
"ocaid": ['90'],
},
}
create({'doc': doc})
work = mock_site.get("/works/OL1W")
edition = mock_site.get("/books/OL1M")
author = mock_site.get("/authors/OL1A")
# Check work
assert work.title == "Test title for work"
assert len(work.authors) == 1
assert work.authors[0].author == "/authors/OL1A"
# Check edition
for k, v in doc['identifiers'].items():
assert edition[k] == v
edition.publisher = "Test publisher"
# Check author
assert author.name == "Test author"
def test_thing_to_doc_edition(mock_site):
"Tests whether an edition is properly converted back into a doc"
populate_infobase(mock_site)
edition = mock_site.get('/books/OL1M')
doc = thing_to_doc(edition)
expected = {
'authors': [{'key': '/authors/OL1A'}, {'key': '/authors/OL2A'}],
'identifiers': {
'isbn': ['1234567890'],
'lccn': ['1230'],
'ocaid': '123450',
'oclc_numbers': ['4560'],
},
'key': '/books/OL1M',
'title': 'test1',
'type': '/type/edition',
'work': {'key': '/works/OL1W'},
}
assert same_dict(doc, expected)
def test_thing_to_doc_edition_key_limiting(mock_site):
"Tests whether extra keys are removed during converting an edition into a doc"
populate_infobase(mock_site)
edition = mock_site.get('/books/OL1M')
doc = thing_to_doc(edition, ["title"])
expected = {
'authors': [{'key': '/authors/OL1A'}, {'key': '/authors/OL2A'}],
'key': '/books/OL1M',
'title': 'test1',
'type': '/type/edition',
'work': {'key': '/works/OL1W'},
}
assert doc == expected
def test_thing_to_doc_work(mock_site):
"Tests whether a work is properly converted back into a doc"
populate_infobase(mock_site)
edition = mock_site.get('/works/OL1W')
doc = thing_to_doc(edition)
expected = {
'authors': [{'key': '/authors/OL1A'}, {'key': '/authors/OL2A'}],
'key': '/works/OL1W',
'title': 'test1',
'type': '/type/work',
}
assert same_dict(doc, expected)
def test_things_to_matches(mock_site):
"""Tests whether a list of keys is converted into a list of
'matches' as returned by the search API"""
populate_infobase(mock_site)
matches = things_to_matches(['/books/OL1M', '/works/OL2W'])
expected = [
{'edition': '/books/OL1M', 'work': '/works/OL1W'},
{'edition': None, 'work': '/works/OL2W'},
]
assert matches == expected
@pytest.mark.skipif('"isbn_ not supported by mock_site"')
def test_find_matches_by_isbn(mock_site):
"""Tests whether books are matched by ISBN"""
populate_infobase(mock_site)
matches = find_matches_by_isbn(['1234567890'])
assert matches == ['/books/OL1M']
def test_find_matches_by_identifiers(mock_site):
"Validates the all and any return values of find_matches_by_identifiers"
# First create 2 records
record0 = {
'doc': {
'identifiers': {
"oclc_numbers": ["1807182"],
"lccn": ["34029558"],
'isbn_10': ['1234567890'],
},
'key': None,
'title': 'THIS IS A TEST BOOK 1',
'type': '/type/edition',
}
}
record1 = {
'doc': {
'identifiers': {
"oclc_numbers": ["2817081"],
"lccn": ["34029558"],
'isbn_10': ['09876543210'],
},
'key': None,
'title': 'THIS IS A TEST BOOK 2',
'type': '/type/edition',
}
}
create(record0)
create(record1)
q = {'oclc_numbers': "1807182", 'lccn': '34029558'}
results = find_matches_by_identifiers(q)
assert results["all"] == ['/books/OL1M']
assert sorted(results["any"]) == ['/books/OL1M', '/books/OL2M']
@pytest.mark.xfail(reason="TODO: find_matches_by_title_and_publishers() needs work!")
def test_find_matches_by_title_and_publishers(mock_site):
"Try to search for a record that should match by publisher and year of publishing"
record0 = {
'doc': {
'isbn_10': ['1234567890'],
'key': None,
'title': 'Bantam book',
'type': '/type/edition',
'publishers': ['Bantam'],
'publish_year': '1992',
}
}
record1 = {
'doc': {
'isbn_10': ['0987654321'],
'key': None,
'title': 'Dover book',
'type': '/type/edition',
'publishers': ['Dover'],
'publish_year': '2000',
}
}
create(record0)
create(record1)
# A search that should fail
q = {'publishers': ["Bantam"], 'publish_year': '2000'}
result = find_matches_by_title_and_publishers(q)
assert not result, "Found a match '%s' where there should have been none" % result
# A search that should return the first entry (title, publisher and year)
q = {'title': 'Bantam book', 'publishers': ["Bantam"], 'publish_year': '1992'}
result = find_matches_by_title_and_publishers(q)
assert result == ['/books/OL1M']
# A search that should return the second entry (title only)
q = {'title': 'Dover book'}
result = find_matches_by_title_and_publishers(q)
assert result == ['/books/OL2M']
# TODO: Search by title and then filter for publisher in the application directly.
def test_search_by_title(mock_site):
"Drill the main search API using title"
populate_infobase(mock_site)
q = {'title': "test1"}
matches = search({"doc": q})
expected = {
'doc': {
'authors': [{'key': '/authors/OL1A'}, {'key': '/authors/OL2A'}],
'key': '/books/OL1M',
'title': 'test1',
'type': '/type/edition',
'work': {'key': '/works/OL1W'},
},
'matches': [
{'edition': '/books/OL1M', 'work': '/works/OL1W'},
{'edition': '/books/OL2M', 'work': '/works/OL1W'},
],
}
assert matches == expected
@pytest.mark.skipif('"isbn_ not supported by mock_site"')
def test_search_by_isbn(mock_site):
"Drill the main search API using isbn"
populate_infobase(mock_site)
q = ['1234567890']
matches = search({"doc": {"identifiers": {"isbn": q}}})
assert matches == {
'doc': {
'authors': [{'key': '/authors/OL1A'}, {'key': '/authors/OL2A'}],
'identifiers': {
'isbn': ['1234567890'],
'lccn': ['1230'],
'ocaid': '123450',
'oclc_numbers': ['4560'],
},
'key': '/books/OL1M',
'title': 'test1',
'type': '/type/edition',
'work': {'key': '/works/OL1W'},
},
'matches': [{'edition': '/books/OL1M', 'work': '/works/OL1W'}],
}
def test_massage_search_results_edition(mock_site):
"Test if search results are properly massaged"
populate_infobase(mock_site)
matches = ['/books/OL1M', '/books/OL2M']
# With limiting
massaged = massage_search_results(matches, {"title": None})
expected = {
'doc': {
'authors': [{'key': '/authors/OL1A'}, {'key': '/authors/OL2A'}],
'key': '/books/OL1M',
'title': 'test1',
'type': '/type/edition',
'work': {'key': '/works/OL1W'},
},
'matches': [
{'edition': '/books/OL1M', 'work': '/works/OL1W'},
{'edition': '/books/OL2M', 'work': '/works/OL1W'},
],
}
assert massaged == expected
# Without limiting
massaged = massage_search_results(matches)
expected = {
'doc': {
'authors': [{'key': '/authors/OL1A'}, {'key': '/authors/OL2A'}],
'created': massaged['doc']['created'],
'identifiers': {
'isbn': ['1234567890'],
'lccn': ['1230'],
'ocaid': '123450',
'oclc_numbers': ['4560'],
},
'key': '/books/OL1M',
'title': 'test1',
'type': '/type/edition',
'work': {'key': '/works/OL1W'},
},
'matches': [
{'edition': '/books/OL1M', 'work': '/works/OL1W'},
{'edition': '/books/OL2M', 'work': '/works/OL1W'},
],
}
assert massaged == expected
# TODO : Test when no matches at all are found
| 14,581 | Python | .py | 412 | 27.543689 | 93 | 0.543592 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
348 | code.py | internetarchive_openlibrary/openlibrary/admin/code.py | import os
import web
from infogami.utils.view import render_template
from openlibrary.core import admin
from openlibrary.admin import utils
app = web.auto_application()
app.add_processor(utils.admin_processor)
app.notfound = utils.notfound
class home(app.page): # type: ignore[name-defined]
path = "/admin/?"
def GET(self):
stats = admin.get_stats()
return render_template("admin/index", stats)
class static(app.page): # type: ignore[name-defined]
path = "(/(?:images|js|css)/.*)"
def GET(self, path):
raise web.seeother("/static/upstream" + path)
def setup():
# load templates from this package so that they are available via render_template
from infogami.utils import template
template.load_templates(os.path.dirname(__file__))
| 796 | Python | .py | 21 | 33.857143 | 85 | 0.724771 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
349 | utils.py | internetarchive_openlibrary/openlibrary/admin/utils.py | """utils for admin application.
"""
import web
from infogami.utils import delegate, features
from infogami.utils.view import render_template
def admin_processor(handler):
"""web.py application processor for enabling infogami and verifying admin permissions."""
delegate.initialize_context()
delegate.context.features = []
features.loadhook()
# required to give a special look and feel in site template
delegate.context.setdefault('cssfile', 'admin')
delegate.context.setdefault('usergroup', 'admin')
page = handler()
return render_template("site", page)
def notfound():
msg = render_template(
"site", render_template("notfound", web.ctx.path, create=False)
)
return web.notfound(msg)
| 748 | Python | .py | 20 | 33.2 | 93 | 0.736111 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
350 | stats.py | internetarchive_openlibrary/openlibrary/admin/stats.py | """
Script to read out data from thingdb and put it in couch so that it
can be queried by the /admin pages on openlibrary
"""
import os
import logging
import datetime
import web
import yaml
from openlibrary.admin import numbers
logger = logging.getLogger(__name__)
web.config.debug = False
class InvalidType(TypeError):
pass
def connect_to_pg(config_file):
"""Connects to the postgres database specified in the dictionary
`config`. Needs a top level key `db_parameters` and under that
`database` (or `db`) at the least. If `user` and `host` are
provided, they're used as well."""
with open(config_file) as f:
config = yaml.safe_load(f)
conf = {}
conf["db"] = config["db_parameters"].get("database") or config["db_parameters"].get(
"db"
)
if not conf['db']:
raise KeyError("database/db")
host = config["db_parameters"].get("host")
user = config["db_parameters"].get("user") or config["db_parameters"].get(
"username"
)
if host:
conf["host"] = host
if user:
conf["user"] = user
logger.debug(" Postgres Database : %(db)s" % conf)
return web.database(dbn="postgres", **conf)
def get_config_info(infobase_config):
"""Parses the config file(s) to get back all the necessary pieces of data.
Add extra parameters here and change the point of calling.
"""
with open(infobase_config) as f:
config = yaml.safe_load(f)
logroot = config.get("writelog")
return logroot
def store_data(data, date):
uid = "counts-%s" % date
logger.debug(" Updating stats for %s - %s", uid, data)
doc = web.ctx.site.store.get(uid) or {}
doc.update(data)
doc['type'] = 'admin-stats'
# as per https://github.com/internetarchive/infogami/blob/master/infogami/infobase/_dbstore/store.py#L79-L83
# avoid document collisions if multiple tasks updating stats in competition (race)
doc["_rev"] = None
web.ctx.site.store[uid] = doc
def run_gathering_functions(
infobase_db, coverstore_db, start, end, logroot, prefix, key_prefix=None
):
"""Runs all the data gathering functions with the given prefix
inside the numbers module"""
funcs = [x for x in dir(numbers) if x.startswith(prefix)]
d = {}
for i in funcs:
fn = getattr(numbers, i)
key = i.replace(prefix, "")
if key_prefix:
key = f"{key_prefix}_{key}"
try:
ret = fn(
thingdb=infobase_db,
coverdb=coverstore_db,
logroot=logroot,
start=start,
end=end,
)
logger.info(" %s - %s", i, ret)
d[key] = ret
except numbers.NoStats:
logger.warning(" %s - No statistics available", i)
except Exception as k:
logger.warning(" Failed with %s", k)
return d
def setup_ol_config(openlibrary_config_file):
"""Setup OL configuration.
Required for storing counts in store.
"""
import infogami
from infogami import config
config.plugin_path += ['openlibrary.plugins']
config.site = "openlibrary.org"
infogami.load_config(openlibrary_config_file)
infogami.config.infobase_parameters = {"type": "ol"}
if config.get("infobase_config_file"):
dir = os.path.dirname(openlibrary_config_file)
path = os.path.join(dir, config.infobase_config_file)
config.infobase = yaml.safe_load(open(path).read())
infogami._setup()
def main(infobase_config, openlibrary_config, coverstore_config, ndays=1):
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)-8s : %(filename)-12s:%(lineno)4d : %(message)s",
)
logger.info("Parsing config file")
try:
infobase_db = connect_to_pg(infobase_config)
coverstore_db = connect_to_pg(coverstore_config)
logroot = get_config_info(infobase_config)
except KeyError as k:
logger.critical("Config file section '%s' missing", k.args[0])
return -1
setup_ol_config(openlibrary_config)
# Gather delta and total counts
# Total counts are simply computed and updated for the current day
# Delta counts are computed by subtracting the current total from yesterday's total
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
data = {}
logger.info("Gathering total data")
data.update(
run_gathering_functions(
infobase_db,
coverstore_db,
yesterday,
today,
logroot,
prefix="admin_total__",
key_prefix="total",
)
)
logger.info("Gathering data using difference between totals")
data.update(
run_gathering_functions(
infobase_db,
coverstore_db,
yesterday,
today,
logroot,
prefix="admin_delta__",
)
)
store_data(data, today.strftime("%Y-%m-%d"))
# Now gather data which can be queried based on date ranges
# The queries will be from the beginning of today till right now
# The data will be stored as the counts of the current day.
end = datetime.datetime.now() # - datetime.timedelta(days = 10)# Right now
start = datetime.datetime(
hour=0, minute=0, second=0, day=end.day, month=end.month, year=end.year
) # Beginning of the day
logger.info("Gathering range data")
data = {}
for i in range(int(ndays)):
logger.info(" %s to %s", start, end)
data.update(
run_gathering_functions(
infobase_db, coverstore_db, start, end, logroot, prefix="admin_range__"
)
)
store_data(data, start.strftime("%Y-%m-%d"))
end = start
start = end - datetime.timedelta(days=1)
return 0
| 5,870 | Python | .py | 161 | 29.391304 | 112 | 0.632394 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
351 | numbers.py | internetarchive_openlibrary/openlibrary/admin/numbers.py | """
List of functions that return various numbers which are stored in the
admin database by the stats module.
All functions prefixed with `admin_range__` will be run for each day and the
result will be stored as the part after it. e.g. the result of
admin_range__foo will be stored under the key `foo`.
All functions prefixed with `admin_delta__` will be run for the current
day and the result will be stored as the part after it. e.g. the
result of `admin_delta__foo` will be stored under the key `foo`.
All functions prefixed with `admin_total__` will be run for the current
day and the result will be stored as `total_<key>`. e.g. the result of
`admin_total__foo` will be stored under the key `total__foo`.
Functions with names other than the these will not be called from the
main harness. They can be utility functions.
"""
import functools
import logging
import web
logger = logging.getLogger(__name__)
class InvalidType(TypeError):
pass
class NoStats(TypeError):
pass
# Utility functions
def query_single_thing(db, typ, start, end):
"Query the counts a single type from the things table"
q1 = "SELECT id as id from thing where key=$typ"
typ = '/type/%s' % typ
result = db.query(q1, vars=locals())
try:
kid = result[0].id
except IndexError:
raise InvalidType("No id for type '/type/%s in the database" % typ)
q2 = (
"select count(*) as count from thing where type=%d and created >= '%s' and created < '%s'"
% (kid, start, end)
)
result = db.query(q2)
count = result[0].count
return count
def single_thing_skeleton(**kargs):
"""Returns number of things of `type` added between `start` and `end`.
`type` is partially applied for admin__[work, edition, user, author, list].
"""
try:
typ = kargs['type']
start = kargs['start'].strftime("%Y-%m-%d")
end = kargs['end'].strftime("%Y-%m-%d %H:%M:%S")
db = kargs['thingdb']
except KeyError as k:
raise TypeError(f"{k} is a required argument for admin_range__{typ}")
return query_single_thing(db, typ, start, end)
# Public functions that are used by stats.py
def admin_range__human_edits(**kargs):
"""Calculates the number of edits between the `start` and `end`
parameters done by humans. `thingdb` is the database.
"""
try:
start = kargs['start'].strftime("%Y-%m-%d")
end = kargs['end'].strftime("%Y-%m-%d %H:%M:%S")
db = kargs['thingdb']
except KeyError as k:
raise TypeError("%s is a required argument for admin_range__human_edits" % k)
q1 = (
"SELECT count(*) AS count FROM transaction WHERE created >= '%s' and created < '%s'"
% (start, end)
)
result = db.query(q1)
total_edits = result[0].count
q1 = (
"SELECT count(DISTINCT t.id) AS count FROM transaction t, version v WHERE "
f"v.transaction_id=t.id AND t.created >= '{start}' and t.created < '{end}' AND "
"t.author_id IN (SELECT thing_id FROM account WHERE bot = 't')"
)
result = db.query(q1)
bot_edits = result[0].count
return total_edits - bot_edits
def admin_range__bot_edits(**kargs):
"""Calculates the number of edits between the `start` and `end`
parameters done by bots. `thingdb` is the database.
"""
try:
start = kargs['start'].strftime("%Y-%m-%d")
end = kargs['end'].strftime("%Y-%m-%d %H:%M:%S")
db = kargs['thingdb']
except KeyError as k:
raise TypeError("%s is a required argument for admin_range__bot_edits" % k)
q1 = (
"SELECT count(*) AS count FROM transaction t, version v WHERE "
f"v.transaction_id=t.id AND t.created >= '{start}' and t.created < '{end}' AND "
"t.author_id IN (SELECT thing_id FROM account WHERE bot = 't')"
)
result = db.query(q1)
count = result[0].count
return count
def admin_range__covers(**kargs):
"Queries the number of covers added between `start` and `end`"
try:
start = kargs['start'].strftime("%Y-%m-%d")
end = kargs['end'].strftime("%Y-%m-%d %H:%M:%S")
db = kargs['coverdb']
except KeyError as k:
raise TypeError("%s is a required argument for admin_range__covers" % k)
q1 = (
"SELECT count(*) as count from cover where created>= '%s' and created < '%s'"
% (start, end)
)
result = db.query(q1)
count = result[0].count
return count
admin_range__works = functools.partial(single_thing_skeleton, type="work")
admin_range__editions = functools.partial(single_thing_skeleton, type="edition")
admin_range__users = functools.partial(single_thing_skeleton, type="user")
admin_range__authors = functools.partial(single_thing_skeleton, type="author")
admin_range__lists = functools.partial(single_thing_skeleton, type="list")
admin_range__members = functools.partial(single_thing_skeleton, type="user")
def admin_range__loans(**kargs):
"""Finds the number of loans on a given day.
Loan info is written to infobase write log. Grepping through the log file gives us the counts.
WARNING: This script must be run on the node that has infobase logs.
"""
try:
db = kargs['thingdb']
start = kargs['start']
end = kargs['end']
except KeyError as k:
raise TypeError("%s is a required argument for admin_total__ebooks" % k)
result = db.query(
"SELECT count(*) as count FROM stats"
" WHERE type='loan'"
" AND created >= $start"
" AND created < $end",
vars=locals(),
)
return result[0].count
def admin_total__authors(**kargs):
db = kargs['thingdb']
return _count_things(db, "/type/author")
def admin_total__subjects(**kargs):
# Anand - Dec 2014 - TODO
# Earlier implementation that uses couchdb is gone now
return 0
def admin_total__lists(**kargs):
try:
db = kargs['thingdb']
except KeyError as k:
raise TypeError("%s is a required argument for admin_total__lists" % k)
# Computing total number of lists
q1 = "SELECT id as id from thing where key='/type/list'"
result = db.query(q1)
try:
kid = result[0].id
except IndexError:
raise InvalidType("No id for type '/type/list' in the database")
q2 = "select count(*) as count from thing where type=%d" % kid
result = db.query(q2)
total_lists = result[0].count
return total_lists
def admin_total__covers(**kargs):
db = kargs['coverdb']
return db.query("SELECT count(*) as count FROM cover")[0].count
def admin_total__works(**kargs):
db = kargs['thingdb']
return _count_things(db, '/type/work')
def admin_total__editions(**kargs):
db = kargs['thingdb']
return _count_things(db, '/type/edition')
def _count_things(db, type):
type_id = db.where("thing", key=type)[0].id
result = db.query(
"SELECT count(*) as count FROM thing WHERE type=$type_id", vars=locals()
)
return result[0].count
def _query_count(db, table, type, property, distinct=False):
type_id = db.where("thing", key=type)[0].id
key_id = db.where('property', type=type_id, name=property)[0].id
if distinct:
what = 'count(distinct(thing_id)) as count'
else:
what = 'count(thing_id) as count'
result = db.select(
table, what=what, where='key_id=$key_id', vars={"key_id": key_id}
)
return result[0].count
def admin_total__ebooks(**kargs):
# Anand - Dec 2014
# The following implementation is too slow. Disabling for now.
return 0
db = kargs['thingdb']
return _query_count(db, "edition_str", "/type/edition", "ocaid")
def admin_total__members(**kargs):
db = kargs['thingdb']
return _count_things(db, '/type/user')
def admin_delta__ebooks(**kargs):
# Anand - Dec 2014 - TODO
# Earlier implementation that uses couchdb is gone now
return 0
def admin_delta__subjects(**kargs):
# Anand - Dec 2014 - TODO
# Earlier implementation that uses couchdb is gone now
return 0
| 8,060 | Python | .py | 201 | 34.810945 | 98 | 0.65189 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
352 | db.py | internetarchive_openlibrary/openlibrary/coverstore/db.py | import datetime
import web
from openlibrary.coverstore import config
_categories = None
_db = None
def getdb():
global _db
if _db is None:
_db = web.database(**config.db_parameters)
return _db
def get_category_id(category):
global _categories
if _categories is None:
_categories = {}
for c in getdb().select('category'):
_categories[c.name] = c.id
return _categories.get(category)
def new(
category,
olid,
filename,
filename_s,
filename_m,
filename_l,
author,
ip,
source_url,
width,
height,
):
category_id = get_category_id(category)
now = datetime.datetime.utcnow()
db = getdb()
t = db.transaction()
try:
cover_id = db.insert(
'cover',
category_id=category_id,
filename=filename,
filename_s=filename_s,
filename_m=filename_m,
filename_l=filename_l,
olid=olid,
author=author,
ip=ip,
source_url=source_url,
width=width,
height=height,
created=now,
last_modified=now,
deleted=False,
archived=False,
)
db.insert("log", action="new", timestamp=now, cover_id=cover_id)
except:
t.rollback()
raise
else:
t.commit()
return cover_id
def query(category, olid, offset=0, limit=10):
category_id = get_category_id(category)
deleted = False
if isinstance(olid, list):
if len(olid) == 0:
olid = [-1]
where = web.reparam(
'deleted=$deleted AND category_id = $category_id AND olid IN $olid',
locals(),
)
elif olid is None:
where = web.reparam('deleted=$deleted AND category_id=$category_id', locals())
else:
where = web.reparam(
'deleted=$deleted AND category_id=$category_id AND olid=$olid', locals()
)
result = getdb().select(
'cover',
what='*',
where=where,
order='last_modified desc',
offset=offset,
limit=limit,
)
return result.list()
def details(id):
try:
return getdb().select('cover', what='*', where="id=$id", vars=locals())[0]
except IndexError:
return None
def touch(id):
"""Sets the last_modified of the specified cover to the current timestamp.
By doing so, this cover become comes in the top in query because the results are ordered by last_modified.
"""
now = datetime.datetime.utcnow()
db = getdb()
t = db.transaction()
try:
db.query("UPDATE cover SET last_modified=$now where id=$id", vars=locals())
db.insert("log", action="touch", timestamp=now, cover_id=id)
except:
t.rollback()
raise
else:
t.commit()
def delete(id):
true = True
now = datetime.datetime.utcnow()
db = getdb()
t = db.transaction()
try:
db.query(
'UPDATE cover set deleted=$true AND last_modified=$now WHERE id=$id',
vars=locals(),
)
db.insert("log", action="delete", timestamp=now, cover_id=id)
except:
t.rollback()
raise
else:
t.commit()
def get_filename(id):
d = getdb().select('cover', what='filename', where='id=$id', vars=locals())
return d and d[0].filename or None
| 3,426 | Python | .py | 124 | 20.298387 | 110 | 0.580409 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
353 | config.py | internetarchive_openlibrary/openlibrary/coverstore/config.py | image_engine = "pil"
image_sizes = {"S": (116, 58), "M": (180, 360), "L": (500, 500)}
default_image = None
data_root = None
ol_url = "http://openlibrary.org/"
# ids of the blocked covers
# this is used to block covers when someone requests
# an image to be blocked.
blocked_covers: list[str] = []
def get(name, default=None):
return globals().get(name, default)
| 371 | Python | .py | 11 | 31.909091 | 64 | 0.687324 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
354 | oldb.py | internetarchive_openlibrary/openlibrary/coverstore/oldb.py | """Library to talk directly to OL database to avoid expensive API calls.
"""
import json
import web
from openlibrary.coverstore import config
from openlibrary.utils import olmemcache
__all__ = ["query", "get"]
def is_supported():
return bool(config.get("ol_db_parameters"))
_db = None
def get_db():
global _db
if _db is None and config.get("ol_db_parameters"):
_db = web.database(**config.ol_db_parameters)
return _db
_memcache = None
def get_memcache():
global _memcache
if _memcache is None and config.get("ol_memcache_servers"):
_memcache = olmemcache.Client(config.ol_memcache_servers)
return _memcache
@web.memoize
def get_property_id(name):
db = get_db()
try:
type_id = db.query("SELECT * FROM thing WHERE key='/type/edition'")[0].id
rows = db.query(
"SELECT * FROM property WHERE name=$name AND type=$type_id", vars=locals()
)
return rows[0].id
except IndexError:
return None
def query(key, value):
key_id = get_property_id(key)
db = get_db()
rows = db.query(
"SELECT thing.key"
" FROM thing, edition_str"
" WHERE thing.id=edition_str.thing_id"
" AND key_id=$key_id"
" AND value=$value"
" ORDER BY thing.last_modified LIMIT 10",
vars=locals(),
)
return [row.key for row in rows]
def get(key):
# try memcache
if memcache := get_memcache():
json_data = memcache.get(key)
if json_data:
return json.loads(json_data)
# try db
db = get_db()
try:
thing = db.query("SELECT * FROM thing WHERE key=$key", vars=locals())[0]
data = db.query(
"SELECT * FROM data WHERE data.thing_id=$thing.id AND data.revision=$thing.latest_revision",
vars=locals(),
)[0]
return json.loads(data.data)
except IndexError:
return None
| 1,932 | Python | .py | 62 | 24.967742 | 104 | 0.625541 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
355 | archive.py | internetarchive_openlibrary/openlibrary/coverstore/archive.py | """Utility to move files from local disk to zip files and update the paths in the db"""
import glob
import os
import re
import sys
import subprocess
import time
import zipfile
import web
import internetarchive as ia
from infogami.infobase import utils
from openlibrary.coverstore import config, db
from openlibrary.coverstore.coverlib import find_image_path
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
ITEM_SIZE = 1_000_000
BATCH_SIZE = 10_000
BATCH_SIZES = ('', 's', 'm', 'l')
def log(*args):
msg = " ".join(args)
print(msg)
class Uploader:
@staticmethod
def _get_s3():
s3_keys = config.get('ia_s3_covers')
return s3_keys.get('s3_key'), s3_keys.get('s3_secret')
@classmethod
def upload(cls, itemname, filepaths):
md = {
"title": "Open Library Cover Archive",
"mediatype": "data",
"collection": ["ol_data", "ol_exports"],
}
access_key, secret_key = cls._get_s3()
return ia.get_item(itemname).upload(
filepaths,
metadata=md,
retries=10,
verbose=True,
access_key=access_key,
secret_key=secret_key,
)
@staticmethod
def is_uploaded(item: str, filename: str, verbose: bool = False) -> bool:
"""
Looks within an archive.org item and determines whether
either a .zip files exists
:param item: name of archive.org item to look within, e.g. `s_covers_0008`
:param filename: filename to look for within item
"""
zip_command = fr'ia list {item} | grep "{filename}" | wc -l'
if verbose:
print(zip_command)
zip_result = subprocess.run(
zip_command, shell=True, text=True, capture_output=True, check=True
)
return int(zip_result.stdout.strip()) == 1
class Batch:
@staticmethod
def get_relpath(item_id, batch_id, ext="", size=""):
"""e.g. s_covers_0008/s_covers_0008_82.zip or covers_0008/covers_0008_82.zip"""
ext = f".{ext}" if ext else ""
prefix = f"{size.lower()}_" if size else ""
folder = f"{prefix}covers_{item_id}"
filename = f"{prefix}covers_{item_id}_{batch_id}{ext}"
return os.path.join(folder, filename)
@classmethod
def get_abspath(cls, item_id, batch_id, ext="", size=""):
"""e.g. /1/var/lib/openlibrary/coverstore/items/covers_0008/covers_0008_87.zip"""
filename = cls.get_relpath(item_id, batch_id, ext=ext, size=size)
return os.path.join(config.data_root, "items", filename)
@staticmethod
def zip_path_to_item_and_batch_id(zpath):
zfilename = zpath.split(os.path.sep)[-1]
if match := re.match(r"(?:[lsm]_)?covers_(\d+)_(\d+)\.zip", zfilename):
return match.group(1), match.group(2)
@classmethod
def process_pending(cls, upload=False, finalize=False, test=True):
"""Makes a big assumption that s,m,l and full zips are all in sync...
Meaning if covers_0008 has covers_0008_01.zip, s_covers_0008
will also have s_covers_0008_01.zip.
1. Finds a list of all cover archives in data_root on disk which are pending
2. Evaluates whether the cover archive is complete and/or uploaded
3. If uploaded, finalize: pdate all filenames of covers in batch from jpg -> zip, delete raw files, delete zip
4. Else, if complete, upload covers
"""
for batch in cls.get_pending():
item_id, batch_id = cls.zip_path_to_item_and_batch_id(batch)
print(f"\n## [Processing batch {item_id}_{batch_id}] ##")
batch_complete = True
for size in BATCH_SIZES:
itemname, filename = cls.get_relpath(
item_id, batch_id, ext="zip", size=size
).split(os.path.sep)
# TODO Uploader.check_item_health(itemname)
# to ensure no conflicting tasks/redrows
zip_uploaded = Uploader.is_uploaded(itemname, filename)
print(f"* {filename}: Uploaded? {zip_uploaded}")
if not zip_uploaded:
batch_complete = False
zip_complete, errors = cls.is_zip_complete(
item_id, batch_id, size=size, verbose=True
)
print(f"* Completed? {zip_complete} {errors or ''}")
if zip_complete and upload:
print(f"=> Uploading {filename} to {itemname}")
fullpath = os.path.join(
config.data_root, "items", itemname, filename
)
Uploader.upload(itemname, fullpath)
print(f"* Finalize? {finalize}")
if finalize and batch_complete:
# Finalize batch...
start_id = (ITEM_SIZE * int(item_id)) + (BATCH_SIZE * int(batch_id))
cls.finalize(start_id, test=test)
print("=> Deleting completed, uploaded zips...")
for size in BATCH_SIZES:
# Remove zips from disk
zp = cls.get_abspath(item_id, batch_id, ext="zip", size=size)
if os.path.exists(zp):
print(f"=> Deleting {zp}")
if not test:
os.remove(zp)
@staticmethod
def get_pending():
"""These are zips on disk which are presumably incomplete or have not
yet been uploaded
"""
zipfiles = []
# find any zips on disk of any size
item_dirs = glob.glob(os.path.join(config.data_root, "items", "covers_*"))
for item_dir in item_dirs:
zipfiles.extend(glob.glob(os.path.join(item_dir, "*.zip")))
return sorted(zipfiles)
@staticmethod
def is_zip_complete(item_id, batch_id, size="", verbose=False):
cdb = CoverDB()
errors = []
filepath = Batch.get_abspath(item_id, batch_id, size=size, ext="zip")
item_id, batch_id = int(item_id), int(batch_id)
start_id = (item_id * ITEM_SIZE) + (batch_id * BATCH_SIZE)
if unarchived := len(cdb.get_batch_unarchived(start_id)):
errors.append({"error": "archival_incomplete", "remaining": unarchived})
if not os.path.exists(filepath):
errors.append({'error': 'nozip'})
else:
expected_num_files = len(cdb.get_batch_archived(start_id=start_id))
num_files = ZipManager.count_files_in_zip(filepath)
if num_files != expected_num_files:
errors.append(
{
"error": "zip_discrepency",
"expected": expected_num_files,
"actual": num_files,
}
)
success = not len(errors)
return (success, errors) if verbose else success
@classmethod
def finalize(cls, start_id, test=True):
"""Update all covers in batch to point to zips, delete files, set deleted=True"""
cdb = CoverDB()
covers = (
Cover(**c)
for c in cdb._get_batch(start_id=start_id, failed=False, uploaded=False)
)
for cover in covers:
if not cover.has_valid_files():
print(f"=> {cover.id} failed")
cdb.update(cover.id, failed=True, _test=test)
continue
print(f"=> Deleting files [test={test}]")
if not test:
# XXX needs testing on 1 cover
cover.delete_files()
print(f"=> Updating cover filenames to reference uploaded zip [test={test}]")
# db.update(where=f"id>={start_id} AND id<{start_id + BATCH_SIZE}")
if not test:
# XXX needs testing
cdb.update_completed_batch(start_id)
class CoverDB:
TABLE = 'cover'
STATUS_KEYS = ('failed', 'archived', 'uploaded')
def __init__(self):
self.db = db.getdb()
@staticmethod
def _get_batch_end_id(start_id):
"""Calculates the end of the batch based on the start_id and the
batch_size
"""
return start_id - (start_id % BATCH_SIZE) + BATCH_SIZE
def get_covers(self, limit=None, start_id=None, end_id=None, **kwargs):
"""Utility for fetching covers from the database
start_id: explicitly define a starting id. This is significant
because an offset would define a number of rows in the db to
skip but this value may not equate to the desired
start_id. When start_id used, a end_id is calculated for the
end of the current batch. When a start_id is used, limit is ignored.
limit: if no start_id is present, specifies num rows to return.
kwargs: additional specifiable cover table query arguments
like those found in STATUS_KEYS
"""
wheres = [
f"{key}=${key}"
for key in kwargs
if key in self.STATUS_KEYS and kwargs.get(key) is not None
]
if start_id:
wheres.append("id>=$start_id AND id<$end_id")
kwargs['start_id'] = start_id
kwargs['end_id'] = end_id or self._get_batch_end_id(start_id)
limit = None
return self.db.select(
self.TABLE,
where=" AND ".join(wheres) if wheres else None,
order='id asc',
vars=kwargs,
limit=limit,
)
def get_unarchived_covers(self, limit, **kwargs):
return self.get_covers(limit=limit, failed=False, archived=False, **kwargs)
def _get_current_batch_start_id(self, **kwargs):
c = self.get_covers(limit=1, **kwargs)[0]
return c.id - (c.id % BATCH_SIZE)
def _get_batch(self, start_id=None, **kwargs):
start_id = start_id or self._get_current_batch_start_id(**kwargs)
return self.get_covers(start_id=start_id, **kwargs)
def get_batch_unarchived(self, start_id=None, end_id=None):
return self._get_batch(
start_id=start_id,
failed=False,
archived=False,
end_id=end_id,
)
def get_batch_archived(self, start_id=None):
return self._get_batch(start_id=start_id, archived=True, failed=False)
def get_batch_failures(self, start_id=None):
return self._get_batch(start_id=start_id, failed=True)
def update(self, cid, **kwargs):
return self.db.update(
self.TABLE,
where="id=$cid",
vars={'cid': cid},
**kwargs,
)
def update_completed_batch(self, start_id):
end_id = start_id + BATCH_SIZE
item_id, batch_id = Cover.id_to_item_and_batch_id(start_id)
return self.db.update(
self.TABLE,
where="id>=$start_id AND id<$end_id AND archived=true AND failed=false AND uploaded=false",
vars={'start_id': start_id, 'end_id': end_id},
uploaded=True,
filename=Batch.get_relpath(item_id, batch_id, ext="zip"),
filename_s=Batch.get_relpath(item_id, batch_id, ext="zip", size="s"),
filename_m=Batch.get_relpath(item_id, batch_id, ext="zip", size="m"),
filename_l=Batch.get_relpath(item_id, batch_id, ext="zip", size="l"),
)
class Cover(web.Storage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.files = self.get_files()
@classmethod
def get_cover_url(cls, cover_id, size="", ext="zip", protocol="https"):
pcid = "%010d" % int(cover_id)
img_filename = item_file = f"{pcid}{'-' + size.upper() if size else ''}.jpg"
item_id, batch_id = cls.id_to_item_and_batch_id(cover_id)
relpath = Batch.get_relpath(item_id, batch_id, size=size, ext=ext)
path = os.path.join(relpath, img_filename)
return f"{protocol}://archive.org/download/{path}"
@property
def timestamp(self):
t = (
utils.parse_datetime(self.created)
if isinstance(self.created, str)
else self.created
)
return time.mktime(t.timetuple())
def has_valid_files(self):
return all(f.path and os.path.exists(f.path) for f in self.files.values())
def get_files(self):
files = {
'filename': web.storage(name="%010d.jpg" % self.id, filename=self.filename),
'filename_s': web.storage(
name="%010d-S.jpg" % self.id, filename=self.filename_s
),
'filename_m': web.storage(
name="%010d-M.jpg" % self.id, filename=self.filename_m
),
'filename_l': web.storage(
name="%010d-L.jpg" % self.id, filename=self.filename_l
),
}
for file_type, f in files.items():
files[file_type].path = f.filename and os.path.join(
config.data_root, "localdisk", f.filename
)
return files
def delete_files(self):
for f in self.files.values():
print('removing', f.path)
os.remove(f.path)
@staticmethod
def id_to_item_and_batch_id(cover_id):
"""Converts a number like 987_654_321 to a 4-digit, 0-padded item_id
representing the value of the millions place and a 2-digit,
0-padded batch_id representing the ten-thousandth place, e.g.
Usage:
>>> Cover.id_to_item_and_batch_id(987_654_321)
('0987', '65')
"""
millions = cover_id // ITEM_SIZE
item_id = f"{millions:04}"
rem = cover_id - (ITEM_SIZE * millions)
ten_thousands = rem // BATCH_SIZE
batch_id = f"{ten_thousands:02}"
return item_id, batch_id
def archive(limit=None, start_id=None, end_id=None):
"""Move files from local disk to tar files and update the paths in the db."""
file_manager = ZipManager()
cdb = CoverDB()
try:
covers = (
cdb.get_unarchived_covers(limit=limit)
if limit
else cdb.get_batch_unarchived(start_id=start_id, end_id=end_id)
)
for cover in covers:
cover = Cover(**cover)
print('archiving', cover)
print(cover.files.values())
if not cover.has_valid_files():
print("Missing image file for %010d" % cover.id, file=web.debug)
cdb.update(cover.id, failed=True)
continue
for d in cover.files.values():
file_manager.add_file(d.name, filepath=d.path, mtime=cover.timestamp)
cdb.update(cover.id, archived=True)
finally:
file_manager.close()
def audit(item_id, batch_ids=(0, 100), sizes=BATCH_SIZES) -> None:
"""Check which cover batches have been uploaded to archive.org.
Checks the archive.org items pertaining to this `item_id` of up to
1 million images (4-digit e.g. 0008) for each specified size and
verify that all the batches (within specified range) and their
.indic (of 10k images, 2-digit e.g. 81) have been successfully
uploaded.
{size}_covers_{item_id}_{batch_id}:
:param item_id: 4 digit, batches of 1M, 0000 to 9999M
:param batch_ids: (min, max) batch_id range or max_batch_id; 2 digit, batch of 10k from [00, 99]
"""
scope = range(*(batch_ids if isinstance(batch_ids, tuple) else (0, batch_ids)))
for size in sizes:
files = (
Batch.get_relpath(f"{item_id:04}", f"{i:02}", ext="zip", size=size)
for i in scope
)
missing_files = []
sys.stdout.write(f"\n{size or 'full'}: ")
for f in files:
item, filename = f.split(os.path.sep, 1)
print(filename)
if Uploader.is_uploaded(item, filename):
sys.stdout.write(".")
else:
sys.stdout.write("X")
missing_files.append(filename)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
if missing_files:
print(
f"ia upload {item} {' '.join([f'{item}/{mf}*' for mf in missing_files])} --retries 10"
)
class ZipManager:
def __init__(self):
self.zipfiles = {}
for size in BATCH_SIZES:
self.zipfiles[size.upper()] = (None, None)
@staticmethod
def count_files_in_zip(filepath):
command = f'unzip -l {filepath} | grep "jpg" | wc -l'
result = subprocess.run(
command, shell=True, text=True, capture_output=True, check=True
)
return int(result.stdout.strip())
def get_zipfile(self, name):
cid = web.numify(name)
zipname = f"covers_{cid[:4]}_{cid[4:6]}.zip"
# for {cid}-[SML].jpg
if '-' in name:
size = name[len(cid + '-') :][0].lower()
zipname = size + "_" + zipname
else:
size = ""
_zipname, _zipfile = self.zipfiles[size.upper()]
if _zipname != zipname:
_zipname and _zipfile.close()
_zipfile = self.open_zipfile(zipname)
self.zipfiles[size.upper()] = zipname, _zipfile
log('writing', zipname)
return _zipfile
def open_zipfile(self, name):
path = os.path.join(config.data_root, "items", name[: -len("_XX.zip")], name)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
return zipfile.ZipFile(path, 'a')
def add_file(self, name, filepath, **args):
zipper = self.get_zipfile(name)
if name not in zipper.namelist():
with open(filepath, 'rb') as fileobj:
# Set compression to ZIP_STORED to avoid compression
zipper.write(filepath, arcname=name, compress_type=zipfile.ZIP_STORED)
return os.path.basename(zipper.filename)
def close(self):
for name, _zipfile in self.zipfiles.values():
if name:
_zipfile.close()
@classmethod
def contains(cls, zip_file_path, filename):
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
return filename in zip_file.namelist()
@classmethod
def get_last_file_in_zip(cls, zip_file_path):
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
file_list = zip_file.namelist()
if file_list:
return max(file_list)
def main(openlibrary_yml: str, coverstore_yml: str, dry_run: bool = False):
from openlibrary.coverstore.server import load_config
load_config(openlibrary_yml)
load_config(coverstore_yml)
archive()
Batch.process_pending(upload=True, finalize=True, test=dry_run)
if __name__ == '__main__':
FnToCLI(main).run()
| 18,877 | Python | .py | 436 | 32.944954 | 118 | 0.578684 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
356 | code.py | internetarchive_openlibrary/openlibrary/coverstore/code.py | import array
import datetime
import io
import json
import logging
import os
import requests
import web
from PIL import Image, ImageDraw, ImageFont
import textwrap
from openlibrary.coverstore import config, db
from openlibrary.coverstore.coverlib import read_file, read_image, save_image
from openlibrary.coverstore.utils import (
changequery,
download,
ol_get,
ol_things,
random_string,
rm_f,
safeint,
)
from openlibrary.plugins.openlibrary.processors import CORSProcessor
logger = logging.getLogger("coverstore")
urls = (
'/',
'index',
'/([^ /]*)/upload',
'upload',
'/([^ /]*)/upload2',
'upload2',
'/([^ /]*)/([a-zA-Z]*)/(.*)-([SML]).jpg',
'cover',
'/([^ /]*)/([a-zA-Z]*)/(.*)().jpg',
'cover',
'/([^ /]*)/([a-zA-Z]*)/(.*).json',
'cover_details',
'/([^ /]*)/query',
'query',
'/([^ /]*)/touch',
'touch',
'/([^ /]*)/delete',
'delete',
)
app = web.application(urls, locals())
app.add_processor(CORSProcessor())
def get_cover_id(olkeys):
"""Return the first cover from the list of ol keys."""
for olkey in olkeys:
doc = ol_get(olkey)
if not doc:
continue
is_author = doc['key'].startswith("/authors")
covers = doc.get('photos' if is_author else 'covers', [])
# Sometimes covers is stored as [None] or [-1] to indicate no covers.
# If so, consider there are no covers.
if covers and (covers[0] or -1) >= 0:
return covers[0]
def _query(category, key, value):
if key == 'olid':
prefixes = {"a": "/authors/", "b": "/books/", "w": "/works/"}
if category in prefixes:
olkey = prefixes[category] + value
return get_cover_id([olkey])
else:
if category == 'b':
if key == 'isbn':
value = value.replace("-", "").strip()
key = "isbn_"
if key == 'oclc':
key = 'oclc_numbers'
olkeys = ol_things(key, value)
return get_cover_id(olkeys)
return None
ERROR_EMPTY = 1, "No image found"
ERROR_INVALID_URL = 2, "Invalid URL"
ERROR_BAD_IMAGE = 3, "Invalid Image"
class index:
def GET(self):
return (
'<h1>Open Library Book Covers Repository</h1><div>See <a '
'href="https://openlibrary.org/dev/docs/api/covers">Open Library Covers '
'API</a> for details.</div>'
)
def _cleanup():
web.ctx.pop("_fieldstorage", None)
web.ctx.pop("_data", None)
web.ctx.env = {}
class upload:
def POST(self, category):
i = web.input(
'olid',
author=None,
file={},
source_url=None,
success_url=None,
failure_url=None,
)
success_url = i.success_url or web.ctx.get('HTTP_REFERRER') or '/'
failure_url = i.failure_url or web.ctx.get('HTTP_REFERRER') or '/'
def error(code__msg):
(code, msg) = code__msg
print("ERROR: upload failed, ", i.olid, code, repr(msg), file=web.debug)
_cleanup()
url = changequery(failure_url, errcode=code, errmsg=msg)
raise web.seeother(url)
if i.source_url:
try:
data = download(i.source_url)
except:
error(ERROR_INVALID_URL)
source_url = i.source_url
elif i.file is not None and i.file != {}:
data = i.file.value
source_url = None
else:
error(ERROR_EMPTY)
if not data:
error(ERROR_EMPTY)
try:
save_image(
data,
category=category,
olid=i.olid,
author=i.author,
source_url=i.source_url,
ip=web.ctx.ip,
)
except ValueError:
error(ERROR_BAD_IMAGE)
_cleanup()
raise web.seeother(success_url)
class upload2:
"""openlibrary.org POSTs here via openlibrary/plugins/upstream/covers.py upload"""
def POST(self, category):
i = web.input(
olid=None, author=None, data=None, source_url=None, ip=None, _unicode=False
)
web.ctx.pop("_fieldstorage", None)
web.ctx.pop("_data", None)
def error(code__msg):
(code, msg) = code__msg
_cleanup()
e = web.badrequest()
e.data = json.dumps({"code": code, "message": msg})
logger.exception("upload2.POST() failed: " + e.data)
raise e
source_url = i.source_url
data = i.data
if source_url:
try:
data = download(source_url)
except:
error(ERROR_INVALID_URL)
if not data:
error(ERROR_EMPTY)
try:
d = save_image(
data,
category=category,
olid=i.olid,
author=i.author,
source_url=i.source_url,
ip=i.ip,
)
except ValueError:
error(ERROR_BAD_IMAGE)
_cleanup()
return json.dumps({"ok": "true", "id": d.id})
def trim_microsecond(date):
# ignore microseconds
return datetime.datetime(*date.timetuple()[:6])
# Number of images stored in one archive.org item
IMAGES_PER_ITEM = 10_000
def zipview_url_from_id(coverid, size):
suffix = size and ("-" + size.upper())
item_index = coverid / IMAGES_PER_ITEM
itemid = "olcovers%d" % item_index
zipfile = itemid + suffix + ".zip"
filename = "%d%s.jpg" % (coverid, suffix)
protocol = web.ctx.protocol # http or https
return f"{protocol}://archive.org/download/{itemid}/{zipfile}/{filename}"
class cover:
def GET(self, category, key, value, size):
i = web.input(default="true")
key = key.lower()
def is_valid_url(url):
return url.startswith(("http://", "https://"))
def notfound():
if (
config.default_image
and i.default.lower() != "false"
and not is_valid_url(i.default)
):
return read_file(config.default_image)
elif is_valid_url(i.default):
return web.seeother(i.default)
else:
return web.notfound("")
if key == 'isbn':
value = value.replace("-", "").strip() # strip hyphens from ISBN
value = self.query(category, key, value)
elif key == 'ia':
url = self.get_ia_cover_url(value, size)
if url:
return web.found(url)
else:
value = None # notfound or redirect to default. handled later.
elif key != 'id':
value = self.query(category, key, value)
value = safeint(value)
if value is None or value in config.blocked_covers:
return notfound()
# redirect to archive.org cluster for large size and original images whenever possible
if size in ("L", "") and self.is_cover_in_cluster(value):
url = zipview_url_from_id(value, size)
return web.found(url)
d = self.get_details(value, size.lower())
if not d:
return notfound()
# set cache-for-ever headers only when requested with ID
if key == 'id':
etag = f"{d.id}-{size.lower()}"
if not web.modified(trim_microsecond(d.created), etag=etag):
return web.notmodified()
web.header('Cache-Control', 'public')
# this image is not going to expire in next 100 years.
web.expires(100 * 365 * 24 * 3600)
else:
web.header('Cache-Control', 'public')
# Allow the client to cache the image for 10 mins to avoid further requests
web.expires(10 * 60)
web.header('Content-Type', 'image/jpeg')
try:
from openlibrary.coverstore import archive
if d.id >= 8_000_000 and d.uploaded:
return web.found(
archive.Cover.get_cover_url(
d.id, size=size, protocol=web.ctx.protocol
)
)
return read_image(d, size)
except OSError:
return web.notfound()
def get_ia_cover_url(self, identifier, size="M"):
url = "https://archive.org/metadata/%s/metadata" % identifier
try:
d = requests.get(url).json().get("result", {})
except (OSError, ValueError):
return
# Not a text item or no images or scan is not complete yet
if (
d.get("mediatype") != "texts"
or d.get("repub_state", "4") not in ("4", "6")
or "imagecount" not in d
):
return
w, h = config.image_sizes[size.upper()]
return "https://archive.org/download/%s/page/cover_w%d_h%d.jpg" % (
identifier,
w,
h,
)
def get_details(self, coverid: int, size=""):
# Use tar index if available to avoid db query. We have 0-6M images in tar balls.
if coverid < 6000000 and size in "sml":
path = self.get_tar_filename(coverid, size)
if path:
if size:
key = "filename_%s" % size
else:
key = "filename"
return web.storage(
{"id": coverid, key: path, "created": datetime.datetime(2010, 1, 1)}
)
return db.details(coverid)
def is_cover_in_cluster(self, coverid: int):
"""Returns True if the cover is moved to archive.org cluster.
It is found by looking at the config variable max_coveritem_index.
"""
try:
return coverid < IMAGES_PER_ITEM * config.get("max_coveritem_index", 0)
except (TypeError, ValueError):
return False
def get_tar_filename(self, coverid, size):
"""Returns tarfile:offset:size for given coverid."""
tarindex = coverid / 10000
index = coverid % 10000
array_offset, array_size = get_tar_index(tarindex, size)
offset = array_offset and array_offset[index]
imgsize = array_size and array_size[index]
if size:
prefix = "%s_covers" % size
else:
prefix = "covers"
if imgsize:
name = "%010d" % coverid
return f"{prefix}_{name[:4]}_{name[4:6]}.tar:{offset}:{imgsize}"
def query(self, category, key, value):
return _query(category, key, value)
@web.memoize
def get_tar_index(tarindex, size):
path = os.path.join(config.data_root, get_tarindex_path(tarindex, size))
if not os.path.exists(path):
return None, None
return parse_tarindex(open(path))
def get_tarindex_path(index, size):
name = "%06d" % index
if size:
prefix = "%s_covers" % size
else:
prefix = "covers"
itemname = f"{prefix}_{name[:4]}"
filename = f"{prefix}_{name[:4]}_{name[4:6]}.index"
return os.path.join('items', itemname, filename)
def parse_tarindex(file):
"""Takes tarindex file as file objects and returns array of offsets and array of sizes. The size of the returned arrays will be 10000."""
array_offset = array.array('L', [0 for i in range(10000)])
array_size = array.array('L', [0 for i in range(10000)])
for line in file:
line = line.strip()
if line:
name, offset, imgsize = line.split("\t")
coverid = int(name[:10]) # First 10 chars is coverid, followed by ".jpg"
index = coverid % 10000
array_offset[index] = int(offset)
array_size[index] = int(imgsize)
return array_offset, array_size
class cover_details:
def GET(self, category, key, value):
d = _query(category, key, value)
if key == 'id':
web.header('Content-Type', 'application/json')
d = db.details(value)
if d:
if isinstance(d['created'], datetime.datetime):
d['created'] = d['created'].isoformat()
d['last_modified'] = d['last_modified'].isoformat()
return json.dumps(d)
else:
raise web.notfound("")
else:
value = _query(category, key, value)
if value is None:
return web.notfound("")
else:
return web.found(f"/{category}/id/{value}.json")
class query:
def GET(self, category):
i = web.input(
olid=None, offset=0, limit=10, callback=None, details="false", cmd=None
)
offset = safeint(i.offset, 0)
limit = safeint(i.limit, 10)
details = i.details.lower() == "true"
limit = min(limit, 100)
if i.olid and ',' in i.olid:
i.olid = i.olid.split(',')
result = db.query(category, i.olid, offset=offset, limit=limit)
if i.cmd == "ids":
result = {r.olid: r.id for r in result}
elif not details:
result = [r.id for r in result]
else:
def process(r):
return {
'id': r.id,
'olid': r.olid,
'created': r.created.isoformat(),
'last_modified': r.last_modified.isoformat(),
'source_url': r.source_url,
'width': r.width,
'height': r.height,
}
result = [process(r) for r in result]
json_data = json.dumps(result)
web.header('Content-Type', 'text/javascript')
if i.callback:
return f"{i.callback}({json_data});"
else:
return json_data
class touch:
def POST(self, category):
i = web.input(id=None, redirect_url=None)
redirect_url = i.redirect_url or web.ctx.get('HTTP_REFERRER')
id = i.id and safeint(i.id, None)
if id:
db.touch(id)
raise web.seeother(redirect_url)
else:
return 'no such id: %s' % id
class delete:
def POST(self, category):
i = web.input(id=None, redirect_url=None)
redirect_url = i.redirect_url
id = i.id and safeint(i.id, None)
if id:
db.delete(id)
if redirect_url:
raise web.seeother(redirect_url)
else:
return 'cover has been deleted successfully.'
else:
return 'no such id: %s' % id
def render_list_preview_image(lst_key):
"""This function takes a list of five books and puts their covers in the correct
locations to create a new image for social-card"""
lst = web.ctx.site.get(lst_key)
five_seeds = lst.seeds[0:5]
background = Image.open(
"/openlibrary/static/images/Twitter_Social_Card_Background.png"
)
logo = Image.open("/openlibrary/static/images/Open_Library_logo.png")
W, H = background.size
image = []
for seed in five_seeds:
cover = seed.get_cover()
if cover:
response = requests.get(
f"https://covers.openlibrary.org/b/id/{cover.id}-M.jpg"
)
image_bytes = io.BytesIO(response.content)
img = Image.open(image_bytes)
basewidth = 162
wpercent = basewidth / float(img.size[0])
hsize = int(float(img.size[1]) * float(wpercent))
img = img.resize((basewidth, hsize), Image.LANCZOS)
image.append(img)
max_height = 0
for img in image:
max_height = max(img.size[1], max_height)
start_width = 63 + 92 * (5 - len(image))
for img in image:
background.paste(img, (start_width, 174 + max_height - img.size[1]))
start_width += 184
logo = logo.resize((120, 74), Image.LANCZOS)
background.paste(logo, (880, 14), logo)
draw = ImageDraw.Draw(background)
font_author = ImageFont.truetype(
"/openlibrary/static/fonts/NotoSans-LightItalic.ttf", 22
)
font_title = ImageFont.truetype(
"/openlibrary/static/fonts/NotoSans-SemiBold.ttf", 28
)
para = textwrap.wrap(lst.name, width=45)
current_h = 42
author_text = "A list on Open Library"
if owner := lst.get_owner():
author_text = f"A list by {owner.displayname}"
left, top, right, bottom = font_author.getbbox(author_text)
w, h = right - left, bottom - top
draw.text(((W - w) / 2, current_h), author_text, font=font_author, fill=(0, 0, 0))
current_h += h + 5
for line in para:
left, top, right, bottom = font_title.getbbox(line)
w, h = right - left, bottom - top
draw.text(((W - w) / 2, current_h), line, font=font_title, fill=(0, 0, 0))
current_h += h
with io.BytesIO() as buf:
background.save(buf, format='PNG')
return buf.getvalue()
| 17,063 | Python | .py | 460 | 27.236957 | 141 | 0.551373 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
357 | utils.py | internetarchive_openlibrary/openlibrary/coverstore/utils.py | """Utilities for coverstore"""
import json
import mimetypes
import os
import random
import socket
import string
import requests
import web
from urllib.parse import urlsplit, urlunsplit, parse_qsl, unquote, unquote_plus # type: ignore[attr-defined]
from urllib.parse import urlencode as real_urlencode
from openlibrary.coverstore import config, oldb
from io import IOBase as file
import contextlib
socket.setdefaulttimeout(10.0)
def safeint(value, default=None):
"""
>>> safeint('1')
1
>>> safeint('x')
>>> safeint('x', 0)
0
"""
try:
return int(value)
except (TypeError, ValueError):
return default
def get_ol_url():
return web.rstrips(config.ol_url, "/")
def ol_things(key, value):
if oldb.is_supported():
return oldb.query(key, value)
else:
query = {
'type': '/type/edition',
key: value,
'sort': 'last_modified',
'limit': 10,
}
try:
d = {"query": json.dumps(query)}
result = download(get_ol_url() + '/api/things?' + real_urlencode(d))
result = json.loads(result)
return result['result']
except OSError:
import traceback
traceback.print_exc()
return []
def ol_get(olkey):
if oldb.is_supported():
return oldb.get(olkey)
else:
try:
return json.loads(download(get_ol_url() + olkey + ".json"))
except OSError:
return None
USER_AGENT = (
"Mozilla/5.0 (Compatible; coverstore downloader http://covers.openlibrary.org)"
)
def download(url):
return requests.get(url, headers={'User-Agent': USER_AGENT}).content
def urldecode(url: str) -> tuple[str, dict[str, str]]:
"""
>>> urldecode('http://google.com/search?q=bar&x=y')
('http://google.com/search', {'q': 'bar', 'x': 'y'})
>>> urldecode('http://google.com/')
('http://google.com/', {})
"""
split_url = urlsplit(url)
items = parse_qsl(split_url.query)
d = {unquote(k): unquote_plus(v) for (k, v) in items}
base = urlunsplit(split_url._replace(query=''))
return base, d
def changequery(url, **kw):
"""
>>> changequery('http://google.com/search?q=foo', q='bar', x='y')
'http://google.com/search?q=bar&x=y'
"""
base, params = urldecode(url)
params.update(kw)
return base + '?' + real_urlencode(params)
def read_file(path, offset, size, chunk=50 * 1024):
"""Returns an iterator over file data at specified offset and size.
>>> len(b"".join(read_file('/dev/urandom', 100, 10000)))
10000
"""
with open(path, "rb") as f:
f.seek(offset)
while size:
data = f.read(min(chunk, size))
size -= len(data)
if data:
yield data
else:
raise OSError("file truncated")
def rm_f(filename):
with contextlib.suppress(OSError):
os.remove(filename)
chars = string.ascii_letters + string.digits
def random_string(n):
return "".join([random.choice(chars) for i in range(n)])
def urlencode(data):
"""
urlencodes the given data dictionary. If any of the value is a file object, data is multipart encoded.
@@@ should go into web.browser
"""
multipart = False
for v in data.values():
if isinstance(v, file):
multipart = True
break
if not multipart:
return 'application/x-www-form-urlencoded', real_urlencode(data)
else:
# adopted from http://code.activestate.com/recipes/146306/
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def encode(key, value, out):
if isinstance(value, file):
out.append('--' + BOUNDARY)
out.append(
f'Content-Disposition: form-data; name="{key}"; filename="{value.name}"'
)
out.append('Content-Type: %s' % get_content_type(value.name))
out.append('')
out.append(value.read())
elif isinstance(value, list):
for v in value:
encode(key, v)
else:
out.append('--' + BOUNDARY)
out.append('Content-Disposition: form-data; name="%s"' % key)
out.append('')
out.append(value)
BOUNDARY = "----------ThIs_Is_tHe_bouNdaRY_$"
CRLF = '\r\n'
out = []
for k, v in data.items():
encode(k, v, out)
body = CRLF.join(out)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4,821 | Python | .py | 145 | 25.489655 | 109 | 0.582399 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
358 | __init__.py | internetarchive_openlibrary/openlibrary/coverstore/__init__.py | """Image store to store book covers and author photos for the Open Library.
"""
| 80 | Python | .py | 2 | 39 | 75 | 0.75641 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
359 | coverlib.py | internetarchive_openlibrary/openlibrary/coverstore/coverlib.py | """Cover management."""
import datetime
from logging import getLogger
import os
from io import BytesIO
from PIL import Image
import web
from openlibrary.coverstore import config, db
from openlibrary.coverstore.utils import random_string, rm_f
logger = getLogger("openlibrary.coverstore.coverlib")
__all__ = ["save_image", "read_image", "read_file"]
def save_image(data, category, olid, author=None, ip=None, source_url=None):
"""Save the provided image data, creates thumbnails and adds an entry in the database.
ValueError is raised if the provided data is not a valid image.
"""
prefix = make_path_prefix(olid)
img = write_image(data, prefix)
if img is None:
raise ValueError("Bad Image")
d = web.storage(
{
'category': category,
'olid': olid,
'author': author,
'source_url': source_url,
}
)
d['width'], d['height'] = img.size
filename = prefix + '.jpg'
d['ip'] = ip
d['filename'] = filename
d['filename_s'] = prefix + '-S.jpg'
d['filename_m'] = prefix + '-M.jpg'
d['filename_l'] = prefix + '-L.jpg'
d.id = db.new(**d)
return d
def make_path_prefix(olid, date=None):
"""Makes a file prefix for storing an image."""
date = date or datetime.date.today()
return "%04d/%02d/%02d/%s-%s" % (
date.year,
date.month,
date.day,
olid,
random_string(5),
)
def write_image(data: bytes, prefix: str) -> Image.Image | None:
path_prefix = find_image_path(prefix)
dirname = os.path.dirname(path_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
# save original image
with open(path_prefix + '.jpg', 'wb') as f:
f.write(data)
img = Image.open(BytesIO(data))
if img.mode != 'RGB':
img = img.convert('RGB') # type: ignore[assignment]
for name, size in config.image_sizes.items():
path = f"{path_prefix}-{name}.jpg"
resize_image(img, size).save(path, quality=90)
return img
except OSError:
logger.exception("write_image() failed")
# cleanup
rm_f(prefix + '.jpg')
rm_f(prefix + '-S.jpg')
rm_f(prefix + '-M.jpg')
rm_f(prefix + '-L.jpg')
return None
def resize_image(image, size):
"""Resizes image to specified size while making sure that aspect ratio is maintained."""
# from PIL
x, y = image.size
if x > size[0]:
y = max(y * size[0] // x, 1)
x = size[0]
if y > size[1]:
x = max(x * size[1] // y, 1)
y = size[1]
size = x, y
return image.resize(size, Image.LANCZOS)
def find_image_path(filename):
if ':' in filename:
return os.path.join(
config.data_root, 'items', filename.rsplit('_', 1)[0], filename
)
else:
return os.path.join(config.data_root, 'localdisk', filename)
def read_file(path):
if ':' in path:
path, offset, size = path.rsplit(':', 2)
with open(path, 'rb') as f:
f.seek(int(offset))
return f.read(int(size))
with open(path, 'rb') as f:
return f.read()
def read_image(d, size):
if size:
filename = (
d['filename_' + size.lower()] or d.filename + "-%s.jpg" % size.upper()
)
else:
filename = d.filename
path = find_image_path(filename)
return read_file(path)
| 3,483 | Python | .py | 106 | 26.037736 | 92 | 0.588112 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
360 | disk.py | internetarchive_openlibrary/openlibrary/coverstore/disk.py | import random
import os
import string
chars = string.ascii_letters + string.digits
def random_string(n):
return "".join([random.choice(chars) for i in range(n)])
class Disk:
"""Disk interface to store files.
>>> import os, string
>>> _ = os.system("rm -rf test_disk")
>>> disk = Disk("test_disk")
>>> f1 = disk.write("hello, world!")
>>> f2 = disk.write(string.ascii_letters)
>>> f3 = disk.write(string.ascii_letters)
>>> disk.read(f1)
'hello, world!'
>>> disk.read(f2)
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
>>> disk.read(f3)
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
"""
def __init__(self, root):
self.root = root
if not os.path.exists(root):
os.makedirs(root)
def write(self, data, params=None):
params = params or {}
prefix = params.get('olid', '')
filename = self.make_filename(prefix)
path = os.path.join(self.root, filename)
f = open(path, 'w')
f.write(data)
f.close()
return filename
def read(self, filename):
path = os.path.join(self.root, filename)
if os.path.exists(path):
return open(path).read()
def make_filename(self, prefix=""):
def exists(filename):
return os.path.exists(os.path.join(self.root, filename))
filename = prefix + "_" + random_string(4)
while exists(filename):
filename = prefix + "_" + random_string(4)
return filename
class LayeredDisk:
"""Disk interface over multiple disks.
Write always happens to the first disk and
read happens on the first disk where the file is available.
"""
def __init__(self, disks):
self.disks = disks
def read(self, filename):
for disk in self.disks:
data = disk.read(filename)
if data:
return data
def write(self, data, headers=None):
headers = headers or {}
return self.disks[0].write(data, headers)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2,130 | Python | .py | 63 | 26.793651 | 68 | 0.611328 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
361 | schema.py | internetarchive_openlibrary/openlibrary/coverstore/schema.py | """Coverstore schema."""
from openlibrary.utils import schema
def get_schema(engine='postgres'):
s = schema.Schema()
s.add_table(
'category',
s.column('id', 'serial', primary_key=True),
s.column('name', 'string'),
)
s.add_table(
'cover',
s.column('id', 'serial', primary_key=True),
s.column('category_id', 'integer', references='category'),
s.column('olid', 'string'),
s.column('filename', 'string'),
s.column('filename_s', 'string'),
s.column('filename_m', 'string'),
s.column('filename_l', 'string'),
s.column('author', 'string'),
s.column('ip', 'string'),
s.column('source_url', 'string'),
s.column('isbn', 'string'),
s.column('width', 'integer'),
s.column('height', 'integer'),
s.column('archived', 'boolean'),
s.column('deleted', 'boolean', default=False),
s.column('created', 'timestamp', default=s.CURRENT_UTC_TIMESTAMP),
s.column('last_modified', 'timestamp', default=s.CURRENT_UTC_TIMESTAMP),
)
s.add_index('cover', 'olid')
s.add_index('cover', 'last_modified')
s.add_index('cover', 'created')
s.add_index('cover', 'deleted')
s.add_index('cover', 'archived')
s.add_table(
"log",
s.column("id", "serial", primary_key=True),
s.column("cover_id", "integer", references="cover"),
s.column("action", "text"),
s.column("timestamp", "timestamp"),
)
s.add_index("log", "timestamp")
sql = s.sql(engine)
if engine == 'sqlite':
# quick hack to fix bug in openlibrary.utils.schema
sql = sql.replace('autoincrement primary key', 'primary key autoincrement')
return sql
| 1,761 | Python | .py | 47 | 30.170213 | 83 | 0.583236 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
362 | server.py | internetarchive_openlibrary/openlibrary/coverstore/server.py | #!/usr/bin/env python
"""coverstore server.
"""
import sys
import yaml
import web
from openlibrary.coverstore import config, code, archive
from openlibrary.utils.sentry import Sentry
def runfcgi(func, addr=('localhost', 8000)):
"""Runs a WSGI function as a FastCGI pre-fork server."""
config = dict(web.config.get("fastcgi", {}))
mode = config.pop("mode", None)
if mode == "prefork":
import flup.server.fcgi_fork as flups
else:
import flup.server.fcgi as flups
return flups.WSGIServer(func, multiplexed=True, bindAddress=addr, **config).run()
web.wsgi.runfcgi = runfcgi
def load_config(configfile):
with open(configfile) as in_file:
d = yaml.safe_load(in_file)
for k, v in d.items():
setattr(config, k, v)
if 'fastcgi' in d:
web.config.fastcgi = d['fastcgi']
def setup(configfile: str) -> None:
load_config(configfile)
sentry = Sentry(getattr(config, 'sentry', {}))
if sentry.enabled:
sentry.init()
sentry.bind_to_webpy_app(code.app)
def main(configfile, *args):
setup(configfile)
if '--archive' in args:
archive.archive()
else:
sys.argv = [sys.argv[0]] + list(args)
code.app.run()
if __name__ == "__main__":
main(*sys.argv[1:])
| 1,292 | Python | .py | 40 | 27.225 | 85 | 0.656934 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
363 | test_webapp.py | internetarchive_openlibrary/openlibrary/coverstore/tests/test_webapp.py | import json
from os import system
from os.path import abspath, dirname, join, pardir
import pytest
import web
import urllib
from openlibrary.coverstore import archive, code, config, coverlib, schema, utils
static_dir = abspath(join(dirname(__file__), pardir, pardir, pardir, 'static'))
@pytest.fixture(scope='module')
def setup_db():
"""These tests have to run as the openlibrary user."""
system('dropdb coverstore_test')
system('createdb coverstore_test')
config.db_parameters = {
'dbn': 'postgres',
'db': 'coverstore_test',
'user': 'openlibrary',
'pw': '',
}
db_schema = schema.get_schema('postgres')
db = web.database(**config.db_parameters)
db.query(db_schema)
db.insert('category', name='b')
@pytest.fixture
def image_dir(tmpdir):
tmpdir.mkdir('localdisk')
tmpdir.mkdir('items')
config.data_root = str(tmpdir)
class Mock:
def __init__(self):
self.calls = []
self.default = None
def __call__(self, *a, **kw):
for a2, kw2, _return in self.calls:
if (a, kw) == (a2, kw2):
return _return
return self.default
def setup_call(self, *a, **kw):
_return = kw.pop("_return", None)
call = a, kw, _return
self.calls.append(call)
class WebTestCase:
def setup_method(self, method):
self.browser = code.app.browser()
def jsonget(self, path):
self.browser.open(path)
return json.loads(self.browser.data)
def upload(self, olid, path):
"""Uploads an image in static dir"""
b = self.browser
path = join(static_dir, path)
content_type, data = utils.urlencode({'olid': olid, 'data': open(path).read()})
b.open('/b/upload2', data, {'Content-Type': content_type})
return json.loads(b.data)['id']
def delete(self, id, redirect_url=None):
b = self.browser
params = {'id': id}
if redirect_url:
params['redirect_url'] = redirect_url
b.open('/b/delete', urllib.parse.urlencode(params))
return b.data
def static_path(self, path):
return join(static_dir, path)
@pytest.mark.skip(
reason="Currently needs running db and openlibrary user. TODO: Make this more flexible."
)
class TestDB:
def test_write(self, setup_db, image_dir):
path = static_dir + '/logos/logo-en.png'
data = open(path).read()
d = coverlib.save_image(data, category='b', olid='OL1M')
assert 'OL1M' in d.filename
path = config.data_root + '/localdisk/' + d.filename
assert open(path).read() == data
class TestWebapp(WebTestCase):
def test_get(self):
assert code.app.request('/').status == "200 OK"
@pytest.mark.skip(
reason="Currently needs running db and openlibrary user. TODO: Make this more flexible."
)
class TestWebappWithDB(WebTestCase):
def test_touch(self):
pytest.skip('TODO: touch is no more used. Remove or fix this test later.')
b = self.browser
id1 = self.upload('OL1M', 'logos/logo-en.png')
id2 = self.upload('OL1M', 'logos/logo-it.png')
assert id1 < id2
assert (
b.open('/b/olid/OL1M.jpg').read()
== open(static_dir + '/logos/logo-it.png').read()
)
b.open('/b/touch', urllib.parse.urlencode({'id': id1}))
assert (
b.open('/b/olid/OL1M.jpg').read()
== open(static_dir + '/logos/logo-en.png').read()
)
def test_delete(self, setup_db):
b = self.browser
id1 = self.upload('OL1M', 'logos/logo-en.png')
data = self.delete(id1)
assert data == 'cover has been deleted successfully.'
def test_upload(self):
b = self.browser
path = join(static_dir, 'logos/logo-en.png')
filedata = open(path).read()
content_type, data = utils.urlencode({'olid': 'OL1234M', 'data': filedata})
b.open('/b/upload2', data, {'Content-Type': content_type})
assert b.status == 200
id = json.loads(b.data)['id']
self.verify_upload(id, filedata, {'olid': 'OL1234M'})
def test_upload_with_url(self, monkeypatch):
b = self.browser
filedata = open(join(static_dir, 'logos/logo-en.png')).read()
source_url = 'http://example.com/bookcovers/1.jpg'
mock = Mock()
mock.setup_call(source_url, _return=filedata)
monkeypatch.setattr(code, 'download', mock)
content_type, data = utils.urlencode(
{'olid': 'OL1234M', 'source_url': source_url}
)
b.open('/b/upload2', data, {'Content-Type': content_type})
assert b.status == 200
id = json.loads(b.data)['id']
self.verify_upload(id, filedata, {'source_url': source_url, 'olid': 'OL1234M'})
def verify_upload(self, id, data, expected_info=None):
expected_info = expected_info or {}
b = self.browser
b.open('/b/id/%d.json' % id)
info = json.loads(b.data)
for k, v in expected_info.items():
assert info[k] == v
response = b.open('/b/id/%d.jpg' % id)
assert b.status == 200
assert response.info().getheader('Content-Type') == 'image/jpeg'
assert b.data == data
b.open('/b/id/%d-S.jpg' % id)
assert b.status == 200
b.open('/b/id/%d-M.jpg' % id)
assert b.status == 200
b.open('/b/id/%d-L.jpg' % id)
assert b.status == 200
def test_archive_status(self):
id = self.upload('OL1M', 'logos/logo-en.png')
d = self.jsonget('/b/id/%d.json' % id)
assert d['archived'] is False
assert d['deleted'] is False
def test_archive(self):
b = self.browser
f1 = web.storage(olid='OL1M', filename='logos/logo-en.png')
f2 = web.storage(olid='OL2M', filename='logos/logo-it.png')
files = [f1, f2]
for f in files:
f.id = self.upload(f.olid, f.filename)
f.path = join(static_dir, f.filename)
assert b.open('/b/id/%d.jpg' % f.id).read() == open(f.path).read()
archive.archive()
for f in files:
d = self.jsonget('/b/id/%d.json' % f.id)
assert 'tar:' in d['filename']
assert b.open('/b/id/%d.jpg' % f.id).read() == open(f.path).read()
| 6,359 | Python | .py | 160 | 31.85 | 92 | 0.5919 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
364 | test_coverstore.py | internetarchive_openlibrary/openlibrary/coverstore/tests/test_coverstore.py | import pytest
import web
from os.path import abspath, exists, join, dirname, pardir
from openlibrary.coverstore import config, coverlib, utils
static_dir = abspath(join(dirname(__file__), pardir, pardir, pardir, 'static'))
image_formats = [
['a', 'images/homesplash.jpg'],
['b', 'logos/logo-en.gif'],
['c', 'logos/logo-en.png'],
]
@pytest.fixture
def image_dir(tmpdir):
tmpdir.mkdir('localdisk')
tmpdir.mkdir('items')
tmpdir.mkdir('items', 'covers_0000')
tmpdir.mkdir('items', 's_covers_0000')
tmpdir.mkdir('items', 'm_covers_0000')
tmpdir.mkdir('items', 'l_covers_0000')
config.data_root = str(tmpdir)
@pytest.mark.parametrize('prefix, path', image_formats)
def test_write_image(prefix, path, image_dir):
"""Test writing jpg, gif and png images"""
data = open(join(static_dir, path), 'rb').read()
assert coverlib.write_image(data, prefix) is not None
def _exists(filename):
return exists(coverlib.find_image_path(filename))
assert _exists(prefix + '.jpg')
assert _exists(prefix + '-S.jpg')
assert _exists(prefix + '-M.jpg')
assert _exists(prefix + '-L.jpg')
assert open(coverlib.find_image_path(prefix + '.jpg'), 'rb').read() == data
def test_bad_image(image_dir):
prefix = config.data_root + '/bad'
assert coverlib.write_image(b'', prefix) is None
prefix = config.data_root + '/bad'
assert coverlib.write_image(b'not an image', prefix) is None
def test_resize_image_aspect_ratio():
"""make sure the aspect-ratio is maintained"""
from PIL import Image
img = Image.new('RGB', (100, 200))
img2 = coverlib.resize_image(img, (40, 40))
assert img2.size == (20, 40)
img2 = coverlib.resize_image(img, (400, 400))
assert img2.size == (100, 200)
img2 = coverlib.resize_image(img, (75, 100))
assert img2.size == (50, 100)
img2 = coverlib.resize_image(img, (75, 200))
assert img2.size == (75, 150)
def test_serve_file(image_dir):
path = static_dir + "/logos/logo-en.png"
assert coverlib.read_file('/dev/null') == b''
assert coverlib.read_file(path) == open(path, "rb").read()
assert coverlib.read_file(path + ":10:20") == open(path, "rb").read()[10 : 10 + 20]
def test_server_image(image_dir):
def write(filename, data):
with open(join(config.data_root, filename), 'wb') as f:
f.write(data)
def do_test(d):
def serve_image(d, size):
return "".join(coverlib.read_image(d, size).decode('utf-8'))
assert serve_image(d, '') == 'main image'
assert serve_image(d, None) == 'main image'
assert serve_image(d, 'S') == 'S image'
assert serve_image(d, 'M') == 'M image'
assert serve_image(d, 'L') == 'L image'
assert serve_image(d, 's') == 'S image'
assert serve_image(d, 'm') == 'M image'
assert serve_image(d, 'l') == 'L image'
# test with regular images
write('localdisk/a.jpg', b'main image')
write('localdisk/a-S.jpg', b'S image')
write('localdisk/a-M.jpg', b'M image')
write('localdisk/a-L.jpg', b'L image')
d = web.storage(
id=1,
filename='a.jpg',
filename_s='a-S.jpg',
filename_m='a-M.jpg',
filename_l='a-L.jpg',
)
do_test(d)
# test with offsets
write('items/covers_0000/covers_0000_00.tar', b'xxmain imagexx')
write('items/s_covers_0000/s_covers_0000_00.tar', b'xxS imagexx')
write('items/m_covers_0000/m_covers_0000_00.tar', b'xxM imagexx')
write('items/l_covers_0000/l_covers_0000_00.tar', b'xxL imagexx')
d = web.storage(
id=1,
filename='covers_0000_00.tar:2:10',
filename_s='s_covers_0000_00.tar:2:7',
filename_m='m_covers_0000_00.tar:2:7',
filename_l='l_covers_0000_00.tar:2:7',
)
do_test(d)
def test_image_path(image_dir):
assert coverlib.find_image_path('a.jpg') == config.data_root + '/localdisk/a.jpg'
assert (
coverlib.find_image_path('covers_0000_00.tar:1234:10')
== config.data_root + '/items/covers_0000/covers_0000_00.tar:1234:10'
)
def test_urldecode():
assert utils.urldecode('http://google.com/search?q=bar&x=y') == (
'http://google.com/search',
{'q': 'bar', 'x': 'y'},
)
assert utils.urldecode('google.com/search?q=bar&x=y') == (
'google.com/search',
{'q': 'bar', 'x': 'y'},
)
assert utils.urldecode('http://google.com/search') == (
'http://google.com/search',
{},
)
assert utils.urldecode('http://google.com/') == ('http://google.com/', {})
assert utils.urldecode('http://google.com/?') == ('http://google.com/', {})
assert utils.urldecode('?q=bar') == ('', {'q': 'bar'})
| 4,747 | Python | .py | 116 | 35.034483 | 87 | 0.618467 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
365 | test_archive.py | internetarchive_openlibrary/openlibrary/coverstore/tests/test_archive.py | from .. import archive
def test_get_filename():
# Basic usage
assert archive.Batch.get_relpath("0008", "80") == "covers_0008/covers_0008_80"
# Sizes
assert (
archive.Batch.get_relpath("0008", "80", size="s")
== "s_covers_0008/s_covers_0008_80"
)
assert (
archive.Batch.get_relpath("0008", "80", size="m")
== "m_covers_0008/m_covers_0008_80"
)
assert (
archive.Batch.get_relpath("0008", "80", size="l")
== "l_covers_0008/l_covers_0008_80"
)
# Ext
assert (
archive.Batch.get_relpath("0008", "80", ext="tar")
== "covers_0008/covers_0008_80.tar"
)
# Ext + Size
assert (
archive.Batch.get_relpath("0008", "80", size="l", ext="zip")
== "l_covers_0008/l_covers_0008_80.zip"
)
def test_get_batch_end_id():
assert archive.CoverDB._get_batch_end_id(start_id=8820500) == 8830000
def test_id_to_item_and_batch_id():
assert archive.Cover.id_to_item_and_batch_id(987_654_321) == ('0987', '65')
| 1,037 | Python | .py | 31 | 27.387097 | 82 | 0.593781 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
366 | test_doctests.py | internetarchive_openlibrary/openlibrary/coverstore/tests/test_doctests.py | import doctest
import pytest
modules = [
'openlibrary.coverstore.archive',
'openlibrary.coverstore.code',
'openlibrary.coverstore.db',
'openlibrary.coverstore.server',
'openlibrary.coverstore.utils',
]
@pytest.mark.parametrize('module', modules)
def test_doctest(module):
mod = __import__(module, None, None, ['x'])
finder = doctest.DocTestFinder()
tests = finder.find(mod, mod.__name__)
print(f"Doctests found in {module}: {[len(m.examples) for m in tests]}\n")
for test in tests:
runner = doctest.DocTestRunner(verbose=True)
failures, tries = runner.run(test)
if failures:
pytest.fail("doctest failed: " + test.name)
| 698 | Python | .py | 20 | 29.95 | 78 | 0.68 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
367 | test_code.py | internetarchive_openlibrary/openlibrary/coverstore/tests/test_code.py | from .. import code
from io import StringIO
import web
import datetime
def test_tarindex_path():
assert code.get_tarindex_path(0, "") == "items/covers_0000/covers_0000_00.index"
assert (
code.get_tarindex_path(0, "s") == "items/s_covers_0000/s_covers_0000_00.index"
)
assert (
code.get_tarindex_path(0, "m") == "items/m_covers_0000/m_covers_0000_00.index"
)
assert (
code.get_tarindex_path(0, "l") == "items/l_covers_0000/l_covers_0000_00.index"
)
assert code.get_tarindex_path(99, "") == "items/covers_0000/covers_0000_99.index"
assert code.get_tarindex_path(100, "") == "items/covers_0001/covers_0001_00.index"
assert code.get_tarindex_path(1, "") == "items/covers_0000/covers_0000_01.index"
assert code.get_tarindex_path(21, "") == "items/covers_0000/covers_0000_21.index"
assert code.get_tarindex_path(321, "") == "items/covers_0003/covers_0003_21.index"
assert code.get_tarindex_path(4321, "") == "items/covers_0043/covers_0043_21.index"
def test_parse_tarindex():
f = StringIO("")
offsets, sizes = code.parse_tarindex(f)
assert list(offsets) == [0 for i in range(10000)]
assert list(sizes) == [0 for i in range(10000)]
f = StringIO("0000010000.jpg\t0\t10\n0000010002.jpg\t512\t20\n")
offsets, sizes = code.parse_tarindex(f)
assert (offsets[0], sizes[0]) == (0, 10)
assert (offsets[1], sizes[1]) == (0, 0)
assert (offsets[2], sizes[2]) == (512, 20)
assert (offsets[42], sizes[42]) == (0, 0)
class Test_cover:
def test_get_tar_filename(self, monkeypatch):
offsets = {}
sizes = {}
def _get_tar_index(index, size):
array_offsets = [offsets.get(i, 0) for i in range(10000)]
array_sizes = [sizes.get(i, 0) for i in range(10000)]
return array_offsets, array_sizes
monkeypatch.setattr(code, "get_tar_index", _get_tar_index)
f = code.cover().get_tar_filename
assert f(42, "s") is None
offsets[42] = 1234
sizes[42] = 567
assert f(42, "s") == "s_covers_0000_00.tar:1234:567"
assert f(30042, "s") == "s_covers_0000_03.tar:1234:567"
d = code.cover().get_details(42, "s")
assert isinstance(d, web.storage)
assert d == {
"id": 42,
"filename_s": "s_covers_0000_00.tar:1234:567",
"created": datetime.datetime(2010, 1, 1),
}
| 2,434 | Python | .py | 54 | 38.12963 | 87 | 0.617435 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
368 | test_po_files.py | internetarchive_openlibrary/openlibrary/i18n/test_po_files.py | import os
import pytest
from babel.messages.pofile import read_po
import xml.etree.ElementTree as ET
from openlibrary.i18n import get_locales
root = os.path.dirname(__file__)
def trees_equal(el1: ET.Element, el2: ET.Element, error=True):
"""
Check if the tree data is the same
>>> trees_equal(ET.fromstring('<root />'), ET.fromstring('<root />'))
True
>>> trees_equal(ET.fromstring('<root x="3" />'),
... ET.fromstring('<root x="7" />'))
True
>>> trees_equal(ET.fromstring('<root x="3" y="12" />'),
... ET.fromstring('<root x="7" />'), error=False)
False
>>> trees_equal(ET.fromstring('<root><a /></root>'),
... ET.fromstring('<root />'), error=False)
False
>>> trees_equal(ET.fromstring('<root><a /></root>'),
... ET.fromstring('<root><a>Foo</a></root>'), error=False)
True
>>> trees_equal(ET.fromstring('<root><a href="" /></root>'),
... ET.fromstring('<root><a>Foo</a></root>'), error=False)
False
"""
try:
assert el1.tag == el2.tag
assert set(el1.attrib.keys()) == set(el2.attrib.keys())
assert len(el1) == len(el2)
for c1, c2 in zip(el1, el2):
trees_equal(c1, c2)
except AssertionError as e:
if error:
raise e
else:
return False
return True
def gen_po_file_keys():
for locale in get_locales():
po_path = os.path.join(root, locale, 'messages.po')
catalog = read_po(open(po_path, 'rb'))
for key in catalog:
yield locale, key
def gen_po_msg_pairs():
for locale, key in gen_po_file_keys():
if not isinstance(key.id, str):
msgids, msgstrs = (key.id, key.string)
else:
msgids, msgstrs = ([key.id], [key.string])
for msgid, msgstr in zip(msgids, msgstrs):
if msgstr == "":
continue
yield locale, msgid, msgstr
def gen_html_entries():
for locale, msgid, msgstr in gen_po_msg_pairs():
if '</' not in msgid:
continue
yield pytest.param(locale, msgid, msgstr, id=f'{locale}-{msgid}')
@pytest.mark.parametrize("locale,msgid,msgstr", gen_html_entries())
def test_html_format(locale: str, msgid: str, msgstr: str):
# Need this to support , since ET only parses XML.
# Find a better solution?
entities = '<!DOCTYPE text [ <!ENTITY nbsp " "> ]>'
id_tree = ET.fromstring(f'{entities}<root>{msgid}</root>')
str_tree = ET.fromstring(f'{entities}<root>{msgstr}</root>')
if not msgstr.startswith('<!-- i18n-lint no-tree-equal -->'):
assert trees_equal(id_tree, str_tree)
| 2,723 | Python | .py | 69 | 32.855072 | 76 | 0.579007 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
369 | __init__.py | internetarchive_openlibrary/openlibrary/i18n/__init__.py | import os
import shutil
import sys
import subprocess
from collections.abc import Iterator
from io import BytesIO
from pathlib import Path
from datetime import datetime
import web
import babel
from babel.support import Translations
from babel.messages import Catalog, Message
from babel.messages.pofile import read_po, write_po
from babel.messages.mofile import write_mo
from babel.messages.extract import extract_from_file, extract_from_dir, extract_python
from .validators import validate
root = os.path.dirname(__file__)
def error_color_fn(text: str) -> str:
"""Styles the text for printing to console with error color."""
return '\033[91m' + text + '\033[0m'
def success_color_fn(text: str) -> str:
"""Styles the text for printing to console with success color."""
return '\033[92m' + text + '\033[0m'
def warning_color_fn(text: str) -> str:
"""Styles the text for printing to console with warning color."""
return '\033[93m' + text + '\033[0m'
def get_untracked_files(dirs: list[str], extensions: tuple[str, str] | str) -> set:
"""Returns a set of all currently untracked files with specified extension(s)."""
untracked_files = {
Path(line)
for dir in dirs
for line in subprocess.run(
['git', 'ls-files', '--others', '--exclude-standard', dir],
stdout=subprocess.PIPE,
text=True,
check=True,
).stdout.split('\n')
if line.endswith(extensions)
}
return untracked_files
def _compile_translation(po, mo):
try:
catalog = read_po(open(po, 'rb'))
f = open(mo, 'wb')
write_mo(f, catalog)
f.close()
print('compiled', po, file=web.debug)
except Exception as e:
print('failed to compile', po, file=web.debug)
raise e
def _validate_catalog(
catalog: Catalog,
) -> Iterator[tuple[Message, list[str], list[str]]]:
for message in catalog:
if message.lineno:
warnings: list[str] = []
errors: list[str] = validate(message, catalog)
if message.fuzzy:
warnings.append(f'"{message.string}" is fuzzy')
if warnings or errors:
yield message, warnings, errors
def validate_translations(args: list[str]):
"""Validates all locales passed in as arguments.
If no arguments are passed, all locales will be validated.
Returns a dictionary of locale-validation error count
key-value pairs.
"""
locales = args or get_locales()
results = {}
for locale in locales:
po_path = os.path.join(root, locale, 'messages.po')
if os.path.exists(po_path):
num_errors = 0
error_print: list[str] = []
catalog = read_po(open(po_path, 'rb'))
for message, warnings, errors in _validate_catalog(catalog):
for w in warnings:
print(
warning_color_fn(
f'openlibrary/i18n/{locale}/messages.po:{message.lineno}: '
)
+ w
)
if errors:
num_errors += len(errors)
error_print.append(
error_color_fn(
f'openlibrary/i18n/{locale}/messages.po:{message.lineno}: '
)
+ repr(message.string),
)
error_print.extend(errors)
if num_errors == 0:
print(
success_color_fn(f'Translations for locale "{locale}" are valid!')
)
else:
for e in error_print:
print(e)
print(error_color_fn("\nValidation failed..."))
print(error_color_fn("Please correct the errors before proceeding."))
results[locale] = num_errors
else:
print(f'Portable object file for locale "{locale}" does not exist.')
return results
def get_locales():
return [
d
for d in os.listdir(root)
if (
os.path.isdir(os.path.join(root, d))
and os.path.exists(os.path.join(root, d, 'messages.po'))
)
]
def extract_templetor(fileobj, keywords, comment_tags, options):
"""Extract i18n messages from web.py templates."""
try:
instring = fileobj.read().decode('utf-8')
# Replace/remove inline js '\$' which interferes with the Babel python parser:
cleaned_string = instring.replace(r'\$', '')
code = web.template.Template.generate_code(cleaned_string, fileobj.name)
f = BytesIO(code.encode('utf-8')) # Babel wants bytes, not strings
except Exception as e:
print('Failed to extract ' + fileobj.name + ':', repr(e), file=web.debug)
return []
return extract_python(f, keywords, comment_tags, options)
def extract_messages(dirs: list[str], verbose: bool, skip_untracked: bool):
# The creation date is fixed to prevent merge conflicts on this line as a result of i18n auto-updates
# In the unlikely event we need to update the fixed creation date, you can change the hard-coded date below
fixed_creation_date = datetime.fromisoformat('2024-05-01 18:58-0400')
catalog = Catalog(
project='Open Library',
copyright_holder='Internet Archive',
creation_date=fixed_creation_date,
)
METHODS = [("**.py", "python"), ("**.html", "openlibrary.i18n:extract_templetor")]
COMMENT_TAGS = ["NOTE:"]
skipped_files = set()
if skip_untracked:
skipped_files = get_untracked_files(dirs, ('.py', '.html'))
for d in dirs:
extracted = extract_from_dir(
d, METHODS, comment_tags=COMMENT_TAGS, strip_comment_tags=True
)
counts: dict[str, int] = {}
for filename, lineno, message, comments, context in extracted:
file_path = Path(d) / filename
if file_path in skipped_files:
continue
counts[filename] = counts.get(filename, 0) + 1
catalog.add(message, None, [(filename, lineno)], auto_comments=comments)
if verbose:
for filename, count in counts.items():
path = filename if d == filename else os.path.join(d, filename)
print(f"{count}\t{path}", file=sys.stderr)
path = os.path.join(root, 'messages.pot')
f = open(path, 'wb')
write_po(f, catalog, include_lineno=False)
f.close()
print('Updated strings written to', path)
def compile_translations(locales: list[str]):
locales_to_update = locales or get_locales()
for locale in locales_to_update:
po_path = os.path.join(root, locale, 'messages.po')
mo_path = os.path.join(root, locale, 'messages.mo')
if os.path.exists(po_path):
_compile_translation(po_path, mo_path)
def update_translations(locales: list[str]):
locales_to_update = locales or get_locales()
print(f"Updating {locales_to_update}")
pot_path = os.path.join(root, 'messages.pot')
template = read_po(open(pot_path, 'rb'))
for locale in locales_to_update:
po_path = os.path.join(root, locale, 'messages.po')
mo_path = os.path.join(root, locale, 'messages.mo')
if os.path.exists(po_path):
catalog = read_po(open(po_path, 'rb'))
catalog.update(template)
f = open(po_path, 'wb')
write_po(f, catalog)
f.close()
print('updated', po_path)
else:
print(f"ERROR: {po_path} does not exist...")
compile_translations(locales_to_update)
def check_status(locales: list[str]):
locales_to_update = locales or get_locales()
pot_path = os.path.join(root, 'messages.pot')
with open(pot_path, 'rb') as f:
message_ids = {message.id for message in read_po(f)}
for locale in locales_to_update:
po_path = os.path.join(root, locale, 'messages.po')
if os.path.exists(po_path):
with open(po_path, 'rb') as f:
catalog = read_po(f)
ids_with_translations = {
message.id
for message in catalog
if ''.join(message.string or '').strip()
}
ids_completed = message_ids.intersection(ids_with_translations)
validation_errors = _validate_catalog(catalog)
total_warnings = 0
total_errors = 0
for message, warnings, errors in validation_errors:
total_warnings += len(warnings)
total_errors += len(errors)
percent_complete = len(ids_completed) / len(message_ids) * 100
all_green = (
percent_complete == 100 and total_warnings == 0 and total_errors == 0
)
total_color = success_color_fn if all_green else lambda x: x
warnings_color = (
warning_color_fn if total_warnings > 0 else success_color_fn
)
errors_color = error_color_fn if total_errors > 0 else success_color_fn
percent_color = (
success_color_fn
if percent_complete == 100
else warning_color_fn if percent_complete > 25 else error_color_fn
)
print(
total_color(
'\t'.join(
[
locale,
percent_color(f'{percent_complete:6.2f}% complete'),
warnings_color(f'{total_warnings:2d} warnings'),
errors_color(f'{total_errors:2d} errors'),
f'openlibrary/i18n/{locale}/messages.po',
]
)
)
)
if len(locales) == 1:
print(f'---- validate {locale} ----')
validate_translations(locales)
else:
print(f"ERROR: {po_path} does not exist...")
def generate_po(args):
if args:
po_dir = os.path.join(root, args[0])
pot_src = os.path.join(root, 'messages.pot')
po_dest = os.path.join(po_dir, 'messages.po')
if os.path.exists(po_dir):
if os.path.exists(po_dest):
print(f"Portable object file already exists at {po_dest}")
else:
shutil.copy(pot_src, po_dest)
os.chmod(po_dest, 0o666)
print(f"File created at {po_dest}")
else:
os.mkdir(po_dir)
os.chmod(po_dir, 0o777)
shutil.copy(pot_src, po_dest)
os.chmod(po_dest, 0o666)
print(f"File created at {po_dest}")
else:
print("Add failed. Missing required locale code.")
@web.memoize
def load_translations(lang):
po = os.path.join(root, lang, 'messages.po')
mo_path = os.path.join(root, lang, 'messages.mo')
if os.path.exists(mo_path):
return Translations(open(mo_path, 'rb'))
@web.memoize
def load_locale(lang):
try:
return babel.Locale(lang)
except babel.UnknownLocaleError:
pass
class GetText:
def __call__(self, string, *args, **kwargs):
"""Translate a given string to the language of the current locale."""
# Get the website locale from the global ctx.lang variable, set in i18n_loadhook
translations = load_translations(web.ctx.lang)
value = (translations and translations.ugettext(string)) or string
if args:
value = value % args
elif kwargs:
value = value % kwargs
return value
def __getattr__(self, key):
from infogami.utils.i18n import strings
# for backward-compatability
return strings.get('', key)
class LazyGetText:
def __call__(self, string, *args, **kwargs):
"""Translate a given string lazily."""
return LazyObject(lambda: GetText()(string, *args, **kwargs))
class LazyObject:
def __init__(self, creator):
self._creator = creator
def __str__(self):
return web.safestr(self._creator())
def __repr__(self):
return repr(self._creator())
def __add__(self, other):
return self._creator() + other
def __radd__(self, other):
return other + self._creator()
def ungettext(s1, s2, _n, *a, **kw):
# Get the website locale from the global ctx.lang variable, set in i18n_loadhook
translations = load_translations(web.ctx.lang)
value = translations and translations.ungettext(s1, s2, _n)
if not value:
# fallback when translation is not provided
if _n == 1:
value = s1
else:
value = s2
if a:
return value % a
elif kw:
return value % kw
else:
return value
def gettext_territory(code):
"""Returns the territory name in the current locale."""
# Get the website locale from the global ctx.lang variable, set in i18n_loadhook
locale = load_locale(web.ctx.lang)
return locale.territories.get(code, code)
gettext = GetText()
ugettext = gettext
lgettext = LazyGetText()
_ = gettext
| 13,325 | Python | .py | 326 | 30.803681 | 111 | 0.584973 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
370 | validators.py | internetarchive_openlibrary/openlibrary/i18n/validators.py | from itertools import groupby
import re
from babel.messages.catalog import TranslationError, Message, Catalog
from babel.messages.checkers import python_format
def validate(message: Message, catalog: Catalog) -> list[str]:
errors = [f' {err}' for err in message.check(catalog)]
if message.python_format and not message.pluralizable and message.string:
errors.extend(_validate_cfmt(str(message.id or ''), str(message.string or '')))
return errors
def _validate_cfmt(msgid: str, msgstr: str) -> list[str]:
errors = []
if _cfmt_fingerprint(msgid) != _cfmt_fingerprint(msgstr):
errors.append(' Failed custom string format validation')
return errors
def _cfmt_fingerprint(string: str):
"""
Get a fingerprint dict of the cstyle format in this string
>>> _cfmt_fingerprint('hello %s')
{'%s': 1}
>>> _cfmt_fingerprint('hello %s and %s')
{'%s': 2}
>>> _cfmt_fingerprint('hello %(title)s. %(first)s %(last)s')
{'%(title)s': 1, '%(first)s': 1, '%(last)s': 1}
"""
pieces = _parse_cfmt(string)
return {key: len(list(grp)) for key, grp in groupby(pieces)}
def _parse_cfmt(string: str):
"""
Extract e.g. '%s' from cstyle python format strings
>>> _parse_cfmt('hello %s')
['%s']
>>> _parse_cfmt(' by %(name)s')
['%(name)s']
>>> _parse_cfmt('%(count)d Lists')
['%(count)d']
>>> _parse_cfmt('100%% Complete!')
['%%']
>>> _parse_cfmt('%(name)s avez %(count)s listes.')
['%(name)s', '%(count)s']
>>> _parse_cfmt('')
[]
>>> _parse_cfmt('Hello World')
[]
"""
cfmt_re = r'''
(
%(?:
(?:\([a-zA-Z_][a-zA-Z0-9_]*?\))? # e.g. %(blah)s
(?:[-+0 #]{0,5}) # optional flags
(?:\d+|\*)? # width
(?:\.(?:\d+|\*))? # precision
(?:h|l|ll|w|I|I32|I64)? # size
[cCdiouxXeEfgGaAnpsSZ] # type
)
)
| # OR
%% # literal "%%"
'''
return [m.group(0) for m in re.finditer(cfmt_re, string, flags=re.VERBOSE)]
| 2,241 | Python | .py | 59 | 31.40678 | 87 | 0.513601 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
371 | ol_infobase.py | internetarchive_openlibrary/openlibrary/plugins/ol_infobase.py | #!/usr/bin/env python
"""Open Library plugin for infobase.
"""
import datetime
import json
import logging
import logging.config
import os
import re
import sys
import traceback
import requests
import web
from infogami.infobase import cache, common, config, dbstore, server
from openlibrary.plugins.upstream.utils import strip_accents
from ..utils.isbn import isbn_10_to_isbn_13, isbn_13_to_isbn_10, normalize_isbn
# relative import
from .openlibrary import schema
logger = logging.getLogger("infobase.ol")
def init_plugin():
"""Initialize infobase plugin."""
from infogami.infobase import common, dbstore
from infogami.infobase import logger as infobase_logger
from infogami.infobase import server
dbstore.default_schema = schema.get_schema()
# Replace infobase Indexer with OL custom Indexer
dbstore.Indexer = OLIndexer
if config.get('errorlog'):
common.record_exception = lambda: save_error(config.errorlog, 'infobase') # type: ignore[attr-defined]
ol = server.get_site('openlibrary.org')
ib = server._infobase
if config.get('writelog'):
ib.add_event_listener(infobase_logger.Logger(config.writelog))
ib.add_event_listener(invalidate_most_recent_change)
setup_logging()
if ol: # noqa: SIM102
# install custom indexer
# XXX-Anand: this might create some trouble. Commenting out.
# ol.store.indexer = Indexer()
if config.get('http_listeners'):
logger.info("setting up http listeners")
ol.add_trigger(None, http_notify)
# # memcache invalidator is not required now. It was added for future use.
# _cache = config.get("cache", {})
# if _cache.get("type") == "memcache":
# logger.info("setting up memcache invalidater")
# ol.add_trigger(None, MemcacheInvalidater())
# hook to add count functionality
server.app.add_mapping(
r"/([^/]*)/count_editions_by_author", __name__ + ".count_editions_by_author"
)
server.app.add_mapping(
r"/([^/]*)/count_editions_by_work", __name__ + ".count_editions_by_work"
)
server.app.add_mapping(
r"/([^/]*)/count_edits_by_user", __name__ + ".count_edits_by_user"
)
server.app.add_mapping(r"/([^/]*)/most_recent", __name__ + ".most_recent")
server.app.add_mapping(r"/([^/]*)/clear_cache", __name__ + ".clear_cache")
server.app.add_mapping(r"/([^/]*)/stats/(\d\d\d\d-\d\d-\d\d)", __name__ + ".stats")
server.app.add_mapping(r"/([^/]*)/has_user", __name__ + ".has_user")
server.app.add_mapping(r"/([^/]*)/olid_to_key", __name__ + ".olid_to_key")
server.app.add_mapping(r"/_reload_config", __name__ + ".reload_config")
server.app.add_mapping(r"/_inspect", __name__ + "._inspect")
def setup_logging():
try:
logconfig = config.get("logging_config_file")
if logconfig and os.path.exists(logconfig):
logging.config.fileConfig(logconfig, disable_existing_loggers=False)
logger.info("logging initialized")
logger.debug("debug")
except Exception as e:
print("Unable to set logging configuration:", str(e), file=sys.stderr)
raise
class reload_config:
@server.jsonify
def POST(self):
logging.info("reloading logging config")
setup_logging()
return {"ok": "true"}
class _inspect:
"""Backdoor to inspect the running process.
Tries to import _inspect module and executes inspect function in that. The module is reloaded on every invocation.
"""
def GET(self):
sys.modules.pop("_inspect", None)
try:
import _inspect
return _inspect.inspect()
except Exception:
return traceback.format_exc()
def get_db():
site = server.get_site('openlibrary.org')
return site.store.db
@web.memoize
def get_property_id(type, name):
db = get_db()
type_id = get_thing_id(type)
try:
return db.where('property', type=type_id, name=name)[0].id
except IndexError:
return None
def get_thing_id(key):
try:
return get_db().where('thing', key=key)[0].id
except IndexError:
return None
def count(table, type, key, value):
pid = get_property_id(type, key)
value_id = get_thing_id(value)
if value_id is None:
return 0
return (
get_db()
.query(
"SELECT count(*) FROM " + table + " WHERE key_id=$pid AND value=$value_id",
vars=locals(),
)[0]
.count
)
class count_editions_by_author:
@server.jsonify
def GET(self, sitename):
i = server.input('key')
return count('edition_ref', '/type/edition', 'authors', i.key)
class count_editions_by_work:
@server.jsonify
def GET(self, sitename):
i = server.input('key')
return count('edition_ref', '/type/edition', 'works', i.key)
class count_edits_by_user:
@server.jsonify
def GET(self, sitename):
i = server.input('key')
author_id = get_thing_id(i.key)
return (
get_db()
.query(
"SELECT count(*) as count FROM transaction WHERE author_id=$author_id",
vars=locals(),
)[0]
.count
)
class has_user:
@server.jsonify
def GET(self, sitename):
i = server.input("username")
# Don't allows OLIDs to be usernames
if web.re_compile(r"OL\d+[A-Z]").match(i.username.upper()):
return True
key = "/user/" + i.username.lower()
type_user = get_thing_id("/type/user")
d = get_db().query(
"SELECT * from thing WHERE lower(key) = $key AND type=$type_user",
vars=locals(),
)
return bool(d)
class stats:
@server.jsonify
def GET(self, sitename, today):
return dict(self.stats(today))
def stats(self, today):
tomorrow = self.nextday(today)
yield 'edits', self.edits(today, tomorrow)
yield 'edits_by_bots', self.edits(today, tomorrow, bots=True)
yield 'new_accounts', self.new_accounts(today, tomorrow)
def nextday(self, today):
return (
get_db().query("SELECT date($today) + 1 AS value", vars=locals())[0].value
)
def edits(self, today, tomorrow, bots=False):
tables = 'version v, transaction t'
where = 'v.transaction_id=t.id AND t.created >= date($today) AND t.created < date($tomorrow)'
if bots:
where += (
" AND t.author_id IN (SELECT thing_id FROM account WHERE bot = 't')"
)
return self.count(tables=tables, where=where, vars=locals())
def new_accounts(self, today, tomorrow):
type_user = get_thing_id('/type/user')
return self.count(
'thing',
'type=$type_user AND created >= date($today) AND created < date($tomorrow)',
vars=locals(),
)
def total_accounts(self):
type_user = get_thing_id('/type/user')
return self.count(tables='thing', where='type=$type_user', vars=locals())
def count(self, tables, where, vars):
return (
get_db()
.select(what="count(*) as value", tables=tables, where=where, vars=vars)[0]
.value
)
most_recent_change = None
def invalidate_most_recent_change(event):
global most_recent_change
most_recent_change = None
class most_recent:
@server.jsonify
def GET(self, sitename):
global most_recent_change
if most_recent_change is None:
site = server.get_site('openlibrary.org')
most_recent_change = site.versions({'limit': 1})[0]
return most_recent_change
class clear_cache:
@server.jsonify
def POST(self, sitename):
from infogami.infobase import cache
cache.global_cache.clear()
return {'done': True}
class olid_to_key:
@server.jsonify
def GET(self, sitename):
i = server.input('olid')
d = get_db().query(
'SELECT key FROM thing WHERE get_olid(key) = $i.olid', vars=locals()
)
key = d and d[0].key or None
return {'olid': i.olid, 'key': key}
def write(path, data):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(path, 'w')
f.write(data)
f.close()
def save_error(dir, prefix):
try:
logger.error("Error", exc_info=True)
error = web.djangoerror()
now = datetime.datetime.utcnow()
path = '%s/%04d-%02d-%02d/%s-%02d%02d%02d.%06d.html' % (
dir,
now.year,
now.month,
now.day,
prefix,
now.hour,
now.minute,
now.second,
now.microsecond,
)
logger.error('Error saved to %s', path)
write(path, web.safestr(error))
except Exception:
logger.error('Exception in saving the error', exc_info=True)
def get_object_data(site, thing):
"""Return expanded data of specified object."""
def expand(value):
if isinstance(value, list):
return [expand(v) for v in value]
elif isinstance(value, common.Reference):
t = site._get_thing(value)
return t and t._get_data()
else:
return value
d = thing._get_data()
for k, v in d.items():
# save some space by not expanding type
if k != 'type':
d[k] = expand(v)
return d
def http_notify(site, old, new):
"""Notify listeners over http."""
if isinstance(new, dict):
data = new
else:
# new is a thing. call format_data to get the actual data.
data = new.format_data()
json_data = json.dumps(data)
key = data['key']
# optimize the most common case.
# The following prefixes are never cached at the client. Avoid cache invalidation in that case.
not_cached = [
'/b/',
'/a/',
'/books/',
'/authors/',
'/works/',
'/subjects/',
'/publishers/',
'/user/',
'/usergroup/',
'/people/',
]
for prefix in not_cached:
if key.startswith(prefix):
return
for url in config.http_listeners:
try:
response = requests.get(url, params=json_data)
response.raise_for_status()
print(
'http_notify', repr(url), repr(key), repr(response.text), file=web.debug
)
except Exception:
print('failed to send http_notify', repr(url), repr(key), file=web.debug)
import traceback
traceback.print_exc()
class MemcacheInvalidater:
def __init__(self):
self.memcache = self.get_memcache_client()
def get_memcache_client(self):
_cache = config.get('cache', {})
if _cache.get('type') == 'memcache' and 'servers' in _cache:
return olmemcache.Client(_cache['servers'])
def to_dict(self, d):
if isinstance(d, dict):
return d
else:
# new is a thing. call format_data to get the actual data.
return d.format_data()
def __call__(self, site, old, new):
if not old:
return
old = self.to_dict(old)
new = self.to_dict(new)
type = old['type']['key']
if type == '/type/author':
keys = self.invalidate_author(site, old)
elif type == '/type/edition':
keys = self.invalidate_edition(site, old)
elif type == '/type/work':
keys = self.invalidate_work(site, old)
else:
keys = self.invalidate_default(site, old)
self.memcache.delete_multi(['details/' + k for k in keys])
def invalidate_author(self, site, old):
yield old.key
def invalidate_edition(self, site, old):
yield old.key
for w in old.get('works', []):
if 'key' in w:
yield w['key']
def invalidate_work(self, site, old):
yield old.key
# invalidate all work.editions
editions = site.things({'type': '/type/edition', 'work': old.key})
for e in editions:
yield e['key']
# invalidate old.authors
authors = old.get('authors', [])
for a in authors:
if 'author' in a and 'key' in a['author']:
yield a['author']['key']
def invalidate_default(self, site, old):
yield old.key
# openlibrary.utils can't be imported directly because
# openlibrary.plugins.openlibrary masks openlibrary module
olmemcache = __import__('openlibrary.utils.olmemcache', None, None, ['x'])
def MemcachedDict(servers=None):
servers = servers or []
"""Cache implementation with OL customized memcache client."""
client = olmemcache.Client(servers)
return cache.MemcachedDict(memcache_client=client)
cache.register_cache('memcache', MemcachedDict)
def _process_key(key):
mapping = (
'/l/',
'/languages/',
'/a/',
'/authors/',
'/b/',
'/books/',
'/user/',
'/people/',
)
for old, new in web.group(mapping, 2):
if key.startswith(old):
return new + key[len(old) :]
return key
def _process_data(data):
if isinstance(data, list):
return [_process_data(d) for d in data]
elif isinstance(data, dict):
if 'key' in data:
data['key'] = _process_key(data['key'])
return {k: _process_data(v) for k, v in data.items()}
else:
return data
def safeint(value, default=0):
"""Convert the value to integer. Returns default, if the conversion fails."""
try:
return int(value)
except Exception:
return default
def fix_table_of_contents(table_of_contents):
"""Some books have bad table_of_contents. This function converts them in to correct format."""
def row(r):
if isinstance(r, str):
level = 0
label = ''
title = web.safeunicode(r)
pagenum = ''
elif 'value' in r:
level = 0
label = ''
title = web.safeunicode(r['value'])
pagenum = ''
elif isinstance(r, dict):
level = safeint(r.get('level', '0'), 0)
label = r.get('label', '')
title = r.get('title', '')
pagenum = r.get('pagenum', '')
else:
return {}
return {"level": level, "label": label, "title": title, "pagenum": pagenum}
d = [row(r) for r in table_of_contents]
return [row for row in d if any(row.values())]
def process_json(key, json_str):
if key is None or json_str is None:
return None
base = key[1:].split('/')[0]
if base in [
'authors',
'books',
'works',
'languages',
'people',
'usergroup',
'permission',
]:
data = json.loads(json_str)
data = _process_data(data)
if base == 'books' and 'table_of_contents' in data:
data['table_of_contents'] = fix_table_of_contents(data['table_of_contents'])
json_str = json.dumps(data)
return json_str
dbstore.process_json = process_json
_Indexer = dbstore.Indexer
re_normalize = re.compile('[^[:alphanum:] ]', re.U)
class OLIndexer(_Indexer): # type: ignore[misc,valid-type]
"""OL custom indexer to index normalized_title etc."""
def compute_index(self, doc):
type = self.get_type(doc)
if type == '/type/edition':
doc = self.process_edition_doc(doc)
return _Indexer.compute_index(self, doc)
def get_type(self, doc):
return doc.get('type', {}).get('key')
def process_edition_doc(self, doc):
"""Process edition doc to add computed fields used for import.
Make the computed field names end with an underscore to avoid conflicting with regular fields.
"""
doc = dict(doc)
title = doc.get('title', '')
doc['normalized_title_'] = self.normalize_edition_title(title)
isbns = doc.get('isbn_10', []) + doc.get('isbn_13', [])
isbns = [normalize_isbn(isbn) for isbn in isbns if normalize_isbn(isbn)]
doc['isbn_'] = self.expand_isbns(isbns)
return doc
def normalize_edition_title(self, title):
if isinstance(title, bytes):
title = title.decode('utf-8', 'ignore')
if not isinstance(title, str):
return ""
norm = strip_accents(title).lower()
norm = norm.replace(' and ', ' ')
if norm.startswith('the '):
norm = norm[4:]
elif norm.startswith('a '):
norm = norm[2:]
return norm.replace(' ', '')[:25]
def expand_isbns(self, isbns):
"""Expands the list of isbns by adding ISBN-10 for ISBN-13 and vice-verse."""
s = set(isbns)
for isbn in isbns:
if len(isbn) == 10:
s.add(isbn_10_to_isbn_13(isbn))
else:
s.add(isbn_13_to_isbn_10(isbn))
return [isbn for isbn in s if isbn is not None]
| 17,209 | Python | .py | 474 | 28.259494 | 118 | 0.590879 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
372 | filters.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/filters.py | """
Filters used to check if a certain statistic should be recorded
"""
import re
import logging
logger = logging.getLogger("openlibrary.stats_filters")
import web
from infogami import config
def all(**params):
"Returns true for all requests"
return True
def url(**params):
logger.debug("Evaluate url '%s'" % web.ctx.path)
return bool(re.search(params["pattern"], web.ctx.path))
def loggedin(**kw):
"""Returns True if any user is logged in."""
# Assuming that presence of cookie is an indication of logged-in user.
# Avoiding validation or calling web.ctx.site.get_user() as they are expensive.
return config.login_cookie_name in web.cookies()
def not_loggedin(**kw):
"""Returns True if no user is logged in."""
return not loggedin()
| 785 | Python | .py | 22 | 32.363636 | 83 | 0.727394 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
373 | processors.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/processors.py | """web.py application processors for Open Library.
"""
import re
import web
from openlibrary.accounts import get_current_user
from openlibrary.core import cache
from openlibrary.core.processors import ReadableUrlProcessor
from openlibrary.plugins.openlibrary.home import caching_prethread
from openlibrary.utils import dateutil
from openlibrary.core import helpers as h
urlsafe = h.urlsafe
_safepath = h.urlsafe
class ProfileProcessor:
"""Processor to profile the webpage when ?_profile=true is added to the url."""
def __call__(self, handler):
i = web.input(_method="GET", _profile="")
if i._profile.lower() == "true":
out, result = web.profile(handler)()
if isinstance(out, web.template.TemplateResult):
out.__body__ = (
out.get('__body__', '')
+ '<pre class="profile">'
+ web.websafe(result)
+ '</pre>'
)
return out
elif isinstance(out, str):
return (
out
+ '<br/>'
+ '<pre class="profile">'
+ web.websafe(result)
+ '</pre>'
)
else:
# don't know how to handle this.
return out
else:
return handler()
class CORSProcessor:
"""Processor to handle OPTIONS method to support
Cross Origin Resource Sharing.
"""
def __init__(self, cors_prefixes=None):
self.cors_prefixes = cors_prefixes
def __call__(self, handler):
if self.is_cors_path():
self.add_cors_headers()
if web.ctx.method == "OPTIONS":
raise web.ok("")
else:
return handler()
def is_cors_path(self):
if self.cors_prefixes is None or web.ctx.path.endswith(".json"):
return True
return any(
web.ctx.path.startswith(path_segment) for path_segment in self.cors_prefixes
)
def add_cors_headers(self):
# Allow anyone to access GET and OPTIONS requests
allowed = "GET, OPTIONS"
# unless the path is /account/* or /admin/*
for p in ["/account", "/admin"]:
if web.ctx.path.startswith(p):
allowed = "OPTIONS"
if (
web.ctx.path == "/account/login.json"
and web.input(auth_provider="").auth_provider == "archive"
):
allowed += ", POST"
web.header('Access-Control-Allow-Credentials', 'true')
web.header("Access-Control-Allow-Origin", "https://archive.org")
else:
web.header("Access-Control-Allow-Origin", "*")
web.header("Access-Control-Allow-Method", allowed)
web.header("Access-Control-Max-Age", 3600 * 24) # one day
class PreferenceProcessor:
"""Processor to handle unauthorized patron preference reads"""
def __init__(self):
self.pref_pattern = re.compile(r'^\/people\/([^/]+)\/preferences(.json|.yml)?$')
def __call__(self, handler):
if self.pref_pattern.match(web.ctx.path):
user = get_current_user()
if not user:
# Must be logged in to see preferences
raise web.Unauthorized()
username = web.ctx.path.split('/')[2]
if username != user.get_username() and not user.is_admin():
# Can only view preferences if page owner or admin
raise web.Forbidden()
return handler()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3,656 | Python | .py | 94 | 28.531915 | 88 | 0.564726 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
374 | opds.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/opds.py | """
OPDS helper class.
A lightweight version of github.com/internetarchive/bookserver
"""
import lxml.etree as ET
from infogami.infobase.utils import parse_datetime
class OPDS:
xmlns_atom = 'http://www.w3.org/2005/Atom'
xmlns_dcterms = 'http://purl.org/dc/terms/'
xmlns_opds = 'http://opds-spec.org/'
xmlns_rdvocab = 'http://RDVocab.info/elements/'
xmlns_bibo = 'http://purl.org/ontology/bibo/'
xmlns_xsi = 'http://www.w3.org/2001/XMLSchema-instance'
nsmap = {
None: xmlns_atom,
'dcterms': xmlns_dcterms,
'opds': xmlns_opds,
'rdvocab': xmlns_rdvocab,
'bibo': xmlns_bibo,
'xsi': xmlns_xsi,
}
atom = "{%s}" % xmlns_atom
dcterms = "{%s}" % xmlns_dcterms
opdsNS = "{%s}" % xmlns_opds
rdvocab = "{%s}" % xmlns_rdvocab
bibo = "{%s}" % xmlns_bibo
xsi = "{%s}" % xmlns_xsi
fileExtMap = {
'pdf': 'application/pdf',
'epub': 'application/epub+zip',
'mobi': 'application/x-mobipocket-ebook',
}
ebookTypes = (
'application/pdf',
'application/epub+zip',
'application/x-mobipocket-ebook',
)
# create_text_element()
# ___________________________________________________________________________
def create_text_element(self, parent, name, value):
element = ET.SubElement(parent, name)
element.text = value
return element
# add()
# ___________________________________________________________________________
def add(self, name, value, attrs=None):
attrs = attrs or {}
element = self.create_text_element(self.root, name, value)
for a in attrs:
element.attrib[a] = attrs[a]
# add_list()
# ___________________________________________________________________________
def add_list(self, name, values, prefix='', attrs=None):
attrs = attrs or {}
if isinstance(values, (list, tuple)):
for v in values:
self.add(name, prefix + str(v), attrs)
elif values:
self.add(name, prefix + str(values), attrs)
# add_author()
# ___________________________________________________________________________
def add_author(self, name, uri=None):
element = ET.SubElement(self.root, 'author')
self.create_text_element(element, 'name', name)
if uri:
self.create_text_element(element, 'uri', uri)
return element
# create_rel_link()
# ___________________________________________________________________________
def create_rel_link(
self, parent, rel, absurl, type='application/atom+xml', title=None
):
if parent is None:
parent = self.root
element = ET.SubElement(parent, 'link')
element.attrib['rel'] = rel
element.attrib['type'] = type
element.attrib['href'] = absurl
if title:
element.attrib['title'] = title
return element
# to_string()
# ___________________________________________________________________________
def to_string(self):
return ET.tostring(self.root, pretty_print=True)
# create_root()
# ___________________________________________________________________________
def create_root(self, root_name):
# ## TODO: add updated element and uuid element
opds = ET.Element(OPDS.atom + root_name, nsmap=OPDS.nsmap)
return opds
# __init__()
# ___________________________________________________________________________
def __init__(self, root_name="feed"):
self.root = self.create_root(root_name)
class OPDSEntry(OPDS):
def _add_subelement(self, tagname, **attrs):
"""Adds a sub element with given tagname and attributes.
Ensures all attribute values are xml-safe before setting in the
element. Returns the element added.
"""
element = ET.SubElement(self.root, tagname)
for name, value in attrs.items():
element.attrib[name] = xmlsafe(value)
return element
# add_category()
# ___________________________________________________________________________
def add_category(self, term, label):
return self._add_subelement("category", term=term, label=label)
# add_indirect_acq()
# ___________________________________________________________________________
def add_indirect_acq(self, parent, type):
element = ET.SubElement(parent, self.opdsNS + 'indirectAcquisition')
element.attrib['type'] = type
return element
# add_acquisition_links()
# ___________________________________________________________________________
def add_acquisition_links(self, book, collection):
if not book.ocaid:
return
if 'inlibrary' in collection or 'lendinglibrary' in collection:
available_loans = book.get_available_loans()
loan_types = [loan['resource_type'] for loan in available_loans]
got_epub = 'epub' in loan_types
got_pdf = 'pdf' in loan_types
if got_epub or got_pdf:
link = self.create_rel_link(
None,
'http://opds-spec.org/acquisition/borrow',
'https://openlibrary.org' + book.url('/borrow'),
'text/html',
)
indirect_acq = self.add_indirect_acq(
link, 'application/vnd.adobe.adept+xml'
)
if got_epub:
self.add_indirect_acq(indirect_acq, 'application/epub+zip')
if got_pdf:
self.add_indirect_acq(indirect_acq, 'application/pdf')
elif 'printdisabled' not in collection:
self.create_rel_link(
None,
'http://opds-spec.org/acquisition/open-access',
f'https://archive.org/download/{book.ocaid}/{book.ocaid}.pdf',
'application/pdf',
)
self.create_rel_link(
None,
'http://opds-spec.org/acquisition/open-access',
f'https://archive.org/download/{book.ocaid}/{book.ocaid}.epub',
'application/epub+zip',
)
# add_rel_links()
# ___________________________________________________________________________
def add_rel_links(self, book, work):
links = []
if work:
self.create_rel_link(
None,
'related',
'https://openlibrary.org' + work.key,
'text/html',
'Open Library Work',
)
for name, values in book.get_identifiers().multi_items():
for id in values:
if id.url and name not in [
'oclc_numbers',
'lccn',
'ocaid',
]: # these go in other elements
self.create_rel_link(
None, 'related', id.url, 'text/html', 'View on ' + id.label
)
# __init__()
# ___________________________________________________________________________
def __init__(self, book):
self.root = self.create_root('entry')
bookID = book.key
atomID = 'https://openlibrary.org' + bookID + '.opds'
title = book.title
if book.subtitle:
title += " " + book.subtitle
updated = parse_datetime(book.last_modified).strftime('%Y-%m-%dT%H:%M:%SZ')
work = book.works and book.works[0]
if work:
authors = work.get_authors()
subjects = work.get_subjects()
else:
authors = book.get_authors()
subjects = book.get_subjects()
pages = book.pagination or book.number_of_pages
# the collection and inlibrary check is coped from databarWork.html
collection = set()
if meta_fields := book.get_ia_meta_fields():
collection = meta_fields.get('collection', [])
contrib = meta_fields.get('contributor')
coverLarge = book.get_cover_url('L')
coverThumb = book.get_cover_url('S')
self.add('id', atomID)
self.create_rel_link(None, 'self', atomID)
self.create_rel_link(
None, 'alternate', 'https://openlibrary.org' + book.url(), 'text/html'
)
self.add('title', title)
self.add('updated', updated)
for a in authors:
self.add_author(a.name, 'https://openlibrary.org' + a.url())
self.add_list(self.dcterms + 'publisher', book.publishers)
self.add_list(self.rdvocab + 'placeOfPublication', book.publish_places)
self.add_list(self.dcterms + 'issued', book.publish_date)
self.add_list(self.dcterms + 'extent', pages)
self.add_list(self.rdvocab + 'dimensions', book.physical_dimensions)
self.add_list(self.bibo + 'edition', book.edition_name)
for subject in subjects:
self.add_category(
'/subjects/' + subject.lower().replace(' ', '_').replace(',', ''),
subject,
)
self.add_list('summary', book.description)
self.add_list(self.rdvocab + 'note', book.notes)
for lang in book.languages:
self.add_list(self.dcterms + 'language', lang.code)
self.add_list(
self.dcterms + 'identifier',
book.key,
'https://openlibrary.org',
{self.xsi + 'type': 'dcterms:URI'},
)
self.add_list(
self.dcterms + 'identifier',
book.ocaid,
'https://archive.org/details/',
{self.xsi + 'type': 'dcterms:URI'},
)
self.add_list(
self.dcterms + 'identifier',
book.isbn_10,
'urn:ISBN:',
{self.xsi + 'type': 'dcterms:ISBN'},
)
self.add_list(
self.dcterms + 'identifier',
book.isbn_13,
'urn:ISBN:',
{self.xsi + 'type': 'dcterms:ISBN'},
)
self.add_list(self.bibo + 'oclcnum', book.oclc_numbers)
self.add_list(self.bibo + 'lccn', book.lccn)
if coverLarge:
self.create_rel_link(
None, 'http://opds-spec.org/image', coverLarge, 'image/jpeg'
)
if coverThumb:
self.create_rel_link(
None, 'http://opds-spec.org/image/thumbnail', coverThumb, 'image/jpeg'
)
self.add_acquisition_links(book, collection)
self.add_rel_links(book, work)
def xmlsafe(s):
"""Removes all the XML-unsafe characters from given string.
XML cannot include certain characters mainly control ones with
byte value below 32. This function strips them all.
"""
if isinstance(s, bytes):
s = s.decode('utf-8')
# ignore the first 32 bytes of ASCII, which are not allowed in XML
return "".join(c for c in s if ord(c) >= 0x20)
| 11,027 | Python | .py | 268 | 31.074627 | 86 | 0.504668 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
375 | authors.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/authors.py | from infogami.utils import delegate
from infogami.utils.view import render_template
def setup():
pass
class author(delegate.page):
path = "/authors"
def GET(self):
return render_template("authors/index.html")
| 234 | Python | .py | 8 | 25.125 | 52 | 0.742081 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
376 | bulk_tag.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/bulk_tag.py | from infogami.utils import delegate
from openlibrary.core import stats
from openlibrary.utils import uniq
import web
import json
class bulk_tag_works(delegate.page):
path = "/tags/bulk_tag_works"
def POST(self):
i = web.input(work_ids='', tags_to_add='', tags_to_remove='')
works = i.work_ids.split(',')
tags_to_add = json.loads(i.tags_to_add or '{}')
tags_to_remove = json.loads(i.tags_to_remove or '{}')
docs_to_update = []
# Number of tags added per work:
docs_adding = 0
# Number of tags removed per work:
docs_removing = 0
for work in works:
w = web.ctx.site.get(f"/works/{work}")
current_subjects = {
# XXX : Should an empty list be the default for these?
'subjects': uniq(w.get('subjects', '')),
'subject_people': uniq(w.get('subject_people', '')),
'subject_places': uniq(w.get('subject_places', '')),
'subject_times': uniq(w.get('subject_times', '')),
}
for subject_type, add_list in tags_to_add.items():
if add_list:
orig_len = len(current_subjects[subject_type])
current_subjects[subject_type] = uniq( # dedupe incoming subjects
current_subjects[subject_type] + add_list
)
docs_adding += len(current_subjects[subject_type]) - orig_len
w[subject_type] = current_subjects[subject_type]
for subject_type, remove_list in tags_to_remove.items():
if remove_list:
orig_len = len(current_subjects[subject_type])
current_subjects[subject_type] = [
item
for item in current_subjects[subject_type]
if item not in remove_list
]
docs_removing += orig_len - len(current_subjects[subject_type])
w[subject_type] = current_subjects[subject_type]
docs_to_update.append(
w.dict()
) # need to convert class to raw dict in order for save_many to work
web.ctx.site.save_many(docs_to_update, comment="Bulk tagging works")
def response(msg, status="success"):
return delegate.RawText(
json.dumps({status: msg}), content_type="application/json"
)
# Number of times the handler was hit:
stats.increment('ol.tags.bulk_update')
stats.increment('ol.tags.bulk_update.add', n=docs_adding)
stats.increment('ol.tags.bulk_update.remove', n=docs_removing)
return response('Tagged works successfully')
def setup():
pass
| 2,808 | Python | .py | 59 | 34.525424 | 86 | 0.559093 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
377 | home.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/home.py | """Controller for home page.
"""
import random
import web
import logging
from infogami.utils import delegate
from infogami.utils.view import render_template, public
from infogami.infobase.client import storify
from infogami import config
from openlibrary.core import admin, cache, ia, lending
from openlibrary.i18n import gettext as _
from openlibrary.utils import dateutil
from openlibrary.plugins.upstream.utils import get_blog_feeds, get_coverstore_public_url
from openlibrary.plugins.worksearch import search, subjects
logger = logging.getLogger("openlibrary.home")
CAROUSELS_PRESETS = {
'preset:thrillers': (
'(creator:"Clancy, Tom" OR creator:"King, Stephen" OR creator:"Clive Cussler" '
'OR creator:("Cussler, Clive") OR creator:("Dean Koontz") OR creator:("Koontz, '
'Dean") OR creator:("Higgins, Jack")) AND !publisher:"Pleasantville, N.Y. : '
'Reader\'s Digest Association" AND languageSorter:"English"'
),
'preset:comics': (
'(subject:"comics" OR creator:("Gary Larson") OR creator:("Larson, Gary") '
'OR creator:("Charles M Schulz") OR creator:("Schulz, Charles M") OR '
'creator:("Jim Davis") OR creator:("Davis, Jim") OR creator:("Bill Watterson")'
'OR creator:("Watterson, Bill") OR creator:("Lee, Stan"))'
),
'preset:authorsalliance_mitpress': (
'(openlibrary_subject:(authorsalliance) OR collection:(mitpress) OR '
'publisher:(MIT Press) OR openlibrary_subject:(mitpress))'
),
}
def get_homepage():
try:
stats = admin.get_stats()
except Exception:
logger.error("Error in getting stats", exc_info=True)
stats = None
blog_posts = get_blog_feeds()
# render template should be setting ctx.cssfile
# but because get_homepage is cached, this doesn't happen
# during subsequent called
page = render_template("home/index", stats=stats, blog_posts=blog_posts)
# Convert to a dict so it can be cached
return dict(page)
def get_cached_homepage():
five_minutes = 5 * dateutil.MINUTE_SECS
lang = web.ctx.lang
pd = web.cookies().get('pd', False)
key = "home.homepage." + lang
if pd:
key += '.pd'
mc = cache.memcache_memoize(
get_homepage, key, timeout=five_minutes, prethread=caching_prethread()
)
page = mc()
if not page:
mc(_cache='delete')
return page
# Because of caching, memcache will call `get_homepage` on another thread! So we
# need a way to carry some information to that computation on the other thread.
# We do that by using a python closure. The outer function is executed on the main
# thread, so all the web.* stuff is correct. The inner function is executed on the
# other thread, so all the web.* stuff will be dummy.
def caching_prethread():
# web.ctx.lang is undefined on the new thread, so need to transfer it over
lang = web.ctx.lang
def main():
# Leaving this in since this is a bit strange, but you can see it clearly
# in action with this debug line:
# web.debug(f'XXXXXXXXXXX web.ctx.lang={web.ctx.get("lang")}; {lang=}')
delegate.fakeload()
web.ctx.lang = lang
return main
class home(delegate.page):
path = "/"
def GET(self):
cached_homepage = get_cached_homepage()
# when homepage is cached, home/index.html template
# doesn't run ctx.setdefault to set the cssfile so we must do so here:
web.template.Template.globals['ctx']['cssfile'] = 'home'
return web.template.TemplateResult(cached_homepage)
class random_book(delegate.page):
path = "/random"
def GET(self):
solr = search.get_solr()
key = solr.select(
'type:edition AND ebook_access:[borrowable TO *]',
fields=['key'],
rows=1,
sort=f'random_{random.random()} desc',
)['docs'][0]['key']
raise web.seeother(key)
def get_ia_carousel_books(query=None, subject=None, sorts=None, limit=None):
if 'env' not in web.ctx:
delegate.fakeload()
elif query in CAROUSELS_PRESETS:
query = CAROUSELS_PRESETS[query]
limit = limit or lending.DEFAULT_IA_RESULTS
books = lending.get_available(
limit=limit,
subject=subject,
sorts=sorts,
query=query,
)
formatted_books = [
format_book_data(book, False) for book in books if book != 'error'
]
return formatted_books
def get_featured_subjects():
# web.ctx must be initialized as it won't be available to the background thread.
if 'env' not in web.ctx:
delegate.fakeload()
FEATURED_SUBJECTS = [
{'key': '/subjects/art', 'presentable_name': _('Art')},
{'key': '/subjects/science_fiction', 'presentable_name': _('Science Fiction')},
{'key': '/subjects/fantasy', 'presentable_name': _('Fantasy')},
{'key': '/subjects/biographies', 'presentable_name': _('Biographies')},
{'key': '/subjects/recipes', 'presentable_name': _('Recipes')},
{'key': '/subjects/romance', 'presentable_name': _('Romance')},
{'key': '/subjects/textbooks', 'presentable_name': _('Textbooks')},
{'key': '/subjects/children', 'presentable_name': _('Children')},
{'key': '/subjects/history', 'presentable_name': _('History')},
{'key': '/subjects/medicine', 'presentable_name': _('Medicine')},
{'key': '/subjects/religion', 'presentable_name': _('Religion')},
{
'key': '/subjects/mystery_and_detective_stories',
'presentable_name': _('Mystery and Detective Stories'),
},
{'key': '/subjects/plays', 'presentable_name': _('Plays')},
{'key': '/subjects/music', 'presentable_name': _('Music')},
{'key': '/subjects/science', 'presentable_name': _('Science')},
]
return [
{**subject, **(subjects.get_subject(subject['key'], limit=0) or {})}
for subject in FEATURED_SUBJECTS
]
@public
def get_cached_featured_subjects():
return cache.memcache_memoize(
get_featured_subjects,
f"home.featured_subjects.{web.ctx.lang}",
timeout=dateutil.HOUR_SECS,
prethread=caching_prethread(),
)()
@public
def generic_carousel(
query=None,
subject=None,
sorts=None,
limit=None,
timeout=None,
):
memcache_key = 'home.ia_carousel_books'
cached_ia_carousel_books = cache.memcache_memoize(
get_ia_carousel_books,
memcache_key,
timeout=timeout or cache.DEFAULT_CACHE_LIFETIME,
)
books = cached_ia_carousel_books(
query=query,
subject=subject,
sorts=sorts,
limit=limit,
)
if not books:
books = cached_ia_carousel_books.update(
query=query,
subject=subject,
sorts=sorts,
limit=limit,
)[0]
return storify(books) if books else books
def format_list_editions(key):
"""Formats the editions of a list suitable for display in carousel."""
if 'env' not in web.ctx:
delegate.fakeload()
seed_list = web.ctx.site.get(key)
if not seed_list:
return []
editions = {}
for seed in seed_list.seeds:
if not isinstance(seed, str):
if seed.type.key == "/type/edition":
editions[seed.key] = seed
else:
try:
e = pick_best_edition(seed)
except StopIteration:
continue
editions[e.key] = e
return [format_book_data(e) for e in editions.values()]
# cache the results of format_list_editions in memcache for 5 minutes
format_list_editions = cache.memcache_memoize(
format_list_editions, "home.format_list_editions", timeout=5 * 60
)
def pick_best_edition(work):
return next(e for e in work.editions if e.ocaid)
def format_work_data(work):
d = dict(work)
key = work.get('key', '')
# New solr stores the key as /works/OLxxxW
if not key.startswith("/works/"):
key = "/works/" + key
d['url'] = key
d['title'] = work.get('title', '')
if 'author_key' in work and 'author_name' in work:
d['authors'] = [
{"key": key, "name": name}
for key, name in zip(work['author_key'], work['author_name'])
]
if 'cover_edition_key' in work:
coverstore_url = get_coverstore_public_url()
d['cover_url'] = f"{coverstore_url}/b/olid/{work['cover_edition_key']}-M.jpg"
d['read_url'] = "//archive.org/stream/" + work['ia'][0]
return d
def format_book_data(book, fetch_availability=True):
d = web.storage()
d.key = book.get('key')
d.url = book.url()
d.title = book.title or None
d.ocaid = book.get("ocaid")
d.eligibility = book.get("eligibility", {})
d.availability = book.get('availability', {})
def get_authors(doc):
return [web.storage(key=a.key, name=a.name or None) for a in doc.get_authors()]
work = book.works and book.works[0]
d.authors = get_authors(work if work else book)
d.work_key = work.key if work else book.key
cover = work.get_cover() if work and work.get_cover() else book.get_cover()
if cover:
d.cover_url = cover.url("M")
elif d.ocaid:
d.cover_url = 'https://archive.org/services/img/%s' % d.ocaid
if fetch_availability and d.ocaid:
collections = ia.get_metadata(d.ocaid).get('collection', [])
if 'lendinglibrary' in collections or 'inlibrary' in collections:
d.borrow_url = book.url("/borrow")
else:
d.read_url = book.url("/borrow")
return d
def setup():
pass
| 9,678 | Python | .py | 244 | 32.897541 | 88 | 0.63424 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
378 | sentry.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/sentry.py | import infogami
from infogami.utils import delegate
from openlibrary.utils.sentry import Sentry, InfogamiSentryProcessor
sentry: Sentry | None = None
def setup():
global sentry
sentry = Sentry(getattr(infogami.config, 'sentry', {}))
if sentry.enabled:
sentry.init()
delegate.add_exception_hook(lambda: sentry.capture_exception_webpy())
delegate.app.add_processor(InfogamiSentryProcessor(delegate.app))
| 442 | Python | .py | 11 | 35.545455 | 77 | 0.758782 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
379 | code.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/code.py | """
Open Library Plugin.
"""
from urllib.parse import parse_qs, urlparse, urlencode, urlunparse
import requests
import web
import json
import os
import socket
import random
import datetime
import logging
from time import time
import math
import infogami
from openlibrary.core import db
from openlibrary.core.batch_imports import (
batch_import,
)
from openlibrary.i18n import gettext as _
# make sure infogami.config.features is set
if not hasattr(infogami.config, 'features'):
infogami.config.features = [] # type: ignore[attr-defined]
from infogami.utils.app import metapage
from infogami.utils import delegate
from openlibrary.utils import dateutil
from infogami.utils.view import (
render,
render_template,
public,
safeint,
add_flash_message,
)
from infogami.infobase import client
from infogami.core.db import ValidationException
from openlibrary.core import cache
from openlibrary.core.vendors import create_edition_from_amazon_metadata
from openlibrary.utils.isbn import isbn_13_to_isbn_10, isbn_10_to_isbn_13, canonical
from openlibrary.core.models import Edition
from openlibrary.core.lending import get_availability
from openlibrary.core.fulltext import fulltext_search
import openlibrary.core.stats
from openlibrary.plugins.openlibrary.home import format_work_data
from openlibrary.plugins.openlibrary.stats import increment_error_count
from openlibrary.plugins.openlibrary import processors
from openlibrary.plugins.worksearch.code import do_search
delegate.app.add_processor(processors.ReadableUrlProcessor())
delegate.app.add_processor(processors.ProfileProcessor())
delegate.app.add_processor(processors.CORSProcessor(cors_prefixes={'/api/'}))
delegate.app.add_processor(processors.PreferenceProcessor())
try:
from infogami.plugins.api import code as api
except:
api = None # type: ignore[assignment]
# http header extension for OL API
infogami.config.http_ext_header_uri = 'http://openlibrary.org/dev/docs/api' # type: ignore[attr-defined]
# setup special connection with caching support
from openlibrary.plugins.openlibrary import connection
client._connection_types['ol'] = connection.OLConnection # type: ignore[assignment]
infogami.config.infobase_parameters = {'type': 'ol'}
# set up infobase schema. required when running in standalone mode.
from openlibrary.core import schema
schema.register_schema()
from openlibrary.core import models
models.register_models()
models.register_types()
import openlibrary.core.lists.model as list_models
list_models.register_models()
# Remove movefiles install hook. openlibrary manages its own files.
infogami._install_hooks = [
h for h in infogami._install_hooks if h.__name__ != 'movefiles'
]
from openlibrary.plugins.openlibrary import lists, bulk_tag
lists.setup()
bulk_tag.setup()
logger = logging.getLogger('openlibrary')
class hooks(client.hook):
def before_new_version(self, page):
user = web.ctx.site.get_user()
account = user and user.get_account()
if account and account.is_blocked():
raise ValidationException(
'Your account has been suspended. You are not allowed to make any edits.'
)
if page.key.startswith('/a/') or page.key.startswith('/authors/'):
if page.type.key == '/type/author':
return
books = web.ctx.site.things({'type': '/type/edition', 'authors': page.key})
books = books or web.ctx.site.things(
{'type': '/type/work', 'authors': {'author': {'key': page.key}}}
)
if page.type.key == '/type/delete' and books:
raise ValidationException(
'This Author page cannot be deleted as %d record(s) still reference this id. Please remove or reassign before trying again. Referenced by: %s'
% (len(books), books)
)
elif page.type.key != '/type/author' and books:
raise ValidationException(
'Changing type of author pages is not allowed.'
)
@infogami.action
def sampledump():
"""Creates a dump of objects from OL database for creating a sample database."""
def expand_keys(keys):
def f(k):
if isinstance(k, dict):
return web.ctx.site.things(k)
elif k.endswith('*'):
return web.ctx.site.things({'key~': k})
else:
return [k]
result = []
for k in keys:
d = f(k)
result += d
return result
def get_references(data, result=None):
if result is None:
result = []
if isinstance(data, dict):
if 'key' in data:
result.append(data['key'])
else:
get_references(data.values(), result)
elif isinstance(data, list):
for v in data:
get_references(v, result)
return result
visiting = {}
visited = set()
def visit(key):
if key in visited or key.startswith('/type/'):
return
elif key in visiting:
# This is a case of circular-dependency. Add a stub object to break it.
print(json.dumps({'key': key, 'type': visiting[key]['type']}))
visited.add(key)
return
thing = web.ctx.site.get(key)
if not thing:
return
d = thing.dict()
d.pop('permission', None)
d.pop('child_permission', None)
d.pop('table_of_contents', None)
visiting[key] = d
for ref in get_references(d.values()):
visit(ref)
visited.add(key)
print(json.dumps(d))
keys = [
'/scan_record',
'/scanning_center',
{'type': '/type/scan_record', 'limit': 10},
]
keys = expand_keys(keys) + ['/b/OL%dM' % i for i in range(1, 100)]
visited = set()
for k in keys:
visit(k)
@infogami.action
def sampleload(filename='sampledump.txt.gz'):
if filename.endswith('.gz'):
import gzip
f = gzip.open(filename)
else:
f = open(filename)
queries = [json.loads(line) for line in f]
print(web.ctx.site.save_many(queries))
class routes(delegate.page):
path = '/developers/routes'
def GET(self):
class ModulesToStr(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, metapage):
return obj.__module__ + '.' + obj.__name__
return super().default(obj)
from openlibrary import code
return '<pre>%s</pre>' % json.dumps(
code.delegate.pages,
sort_keys=True,
cls=ModulesToStr,
indent=4,
separators=(',', ': '),
)
class team(delegate.page):
path = '/about/team'
def GET(self):
return render_template("about/index.html")
class addbook(delegate.page):
path = '/addbook'
def GET(self):
d = {'type': web.ctx.site.get('/type/edition')}
i = web.input()
author = i.get('author') and web.ctx.site.get(i.author)
if author:
d['authors'] = [author]
page = web.ctx.site.new("", d)
return render.edit(page, self.path, 'Add Book')
def POST(self):
from infogami.core.code import edit
key = web.ctx.site.new_key('/type/edition')
web.ctx.path = key
return edit().POST(key)
class widget(delegate.page):
path = r'(/works/OL\d+W|/books/OL\d+M)/widget'
def GET(self, key: str): # type: ignore[override]
olid = key.split('/')[-1]
item = web.ctx.site.get(key)
is_work = key.startswith('/works/')
item['olid'] = olid
item['availability'] = get_availability(
'openlibrary_work' if is_work else 'openlibrary_edition',
[olid],
).get(olid)
item['authors'] = [
web.storage(key=a.key, name=a.name or None) for a in item.get_authors()
]
return delegate.RawText(
render_template('widget', format_work_data(item) if is_work else item),
content_type='text/html',
)
class addauthor(delegate.page):
path = '/addauthor'
def POST(self):
i = web.input('name')
if len(i.name) < 2:
return web.badrequest()
key = web.ctx.site.new_key('/type/author')
web.ctx.path = key
web.ctx.site.save(
{'key': key, 'name': i.name, 'type': {'key': '/type/author'}},
comment='New Author',
)
raise web.HTTPError('200 OK', {}, key)
class clonebook(delegate.page):
def GET(self):
from infogami.core.code import edit
i = web.input('key')
page = web.ctx.site.get(i.key)
if page is None:
raise web.seeother(i.key)
else:
d = page._getdata()
for k in ['isbn_10', 'isbn_13', 'lccn', 'oclc']:
d.pop(k, None)
return render.edit(page, '/addbook', 'Clone Book')
class search(delegate.page):
path = '/suggest/search'
def GET(self):
i = web.input(prefix='')
if len(i.prefix) > 2:
q = {
'type': '/type/author',
'name~': i.prefix + '*',
'sort': 'name',
'limit': 5,
}
things = web.ctx.site.things(q)
things = [web.ctx.site.get(key) for key in things]
result = [
{
'type': [{'id': t.key, 'name': t.key}],
'name': web.safestr(t.name),
'guid': t.key,
'id': t.key,
'article': {'id': t.key},
}
for t in things
]
else:
result = []
callback = i.pop('callback', None)
d = {
'status': '200 OK',
'query': dict(i, escape='html'),
'code': '/api/status/ok',
'result': result,
}
if callback:
data = f'{callback}({json.dumps(d)})'
else:
data = json.dumps(d)
raise web.HTTPError('200 OK', {}, data)
class blurb(delegate.page):
path = '/suggest/blurb/(.*)'
def GET(self, path):
i = web.input()
author = web.ctx.site.get('/' + path)
body = ''
if author.birth_date or author.death_date:
body = f'{author.birth_date} - {author.death_date}'
else:
body = '%s' % author.date
body += '<br/>'
if author.bio:
body += web.safestr(author.bio)
result = {'body': body, 'media_type': 'text/html', 'text_encoding': 'utf-8'}
d = {'status': '200 OK', 'code': '/api/status/ok', 'result': result}
if callback := i.pop('callback', None):
data = f'{callback}({json.dumps(d)})'
else:
data = json.dumps(d)
raise web.HTTPError('200 OK', {}, data)
class thumbnail(delegate.page):
path = '/suggest/thumbnail'
@public
def get_property_type(type, name):
for p in type.properties:
if p.name == name:
return p.expected_type
return web.ctx.site.get('/type/string')
def save(filename, text):
root = os.path.dirname(__file__)
path = root + filename
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(path, 'w')
f.write(text)
f.close()
def change_ext(filename, ext):
filename, _ = os.path.splitext(filename)
if ext:
filename = filename + ext
return filename
def get_pages(type, processor):
pages = web.ctx.site.things({'type': type})
for p in pages:
processor(web.ctx.site.get(p))
class robotstxt(delegate.page):
path = '/robots.txt'
def GET(self):
web.header('Content-Type', 'text/plain')
is_dev = 'dev' in infogami.config.features or web.ctx.host != 'openlibrary.org'
robots_file = 'norobots.txt' if is_dev else 'robots.txt'
return web.ok(open(f'static/{robots_file}').read())
@web.memoize
def fetch_ia_js(filename: str) -> str:
return requests.get(f'https://archive.org/includes/{filename}').text
class ia_js_cdn(delegate.page):
path = r'/cdn/archive.org/(donate\.js|analytics\.js)'
def GET(self, filename):
web.header('Content-Type', 'text/javascript')
web.header("Cache-Control", "max-age=%d" % (24 * 3600))
return web.ok(fetch_ia_js(filename))
class serviceworker(delegate.page):
path = '/sw.js'
def GET(self):
web.header('Content-Type', 'text/javascript')
return web.ok(open('static/build/sw.js').read())
class assetlinks(delegate.page):
path = '/.well-known/assetlinks'
def GET(self):
web.header('Content-Type', 'application/json')
return web.ok(open('static/.well-known/assetlinks.json').read())
class opensearchxml(delegate.page):
path = '/opensearch.xml'
def GET(self):
web.header('Content-Type', 'text/plain')
return web.ok(open('static/opensearch.xml').read())
class health(delegate.page):
path = '/health'
def GET(self):
web.header('Content-Type', 'text/plain')
return web.ok('OK')
def remove_high_priority(query: str) -> str:
"""
Remove `high_priority=true` and `high_priority=false` from query parameters,
as the API expects to pass URL parameters through to another query, and
these may interfere with that query.
>>> remove_high_priority('high_priority=true&v=1')
'v=1'
"""
query_params = parse_qs(query)
query_params.pop("high_priority", None)
new_query = urlencode(query_params, doseq=True)
return new_query
class batch_imports(delegate.page):
"""
The batch import endpoint. Expects a JSONL file POSTed with multipart/form-data.
"""
path = '/import/batch/new'
def GET(self):
return render_template("batch_import.html", batch_result=None)
def POST(self):
user_key = delegate.context.user and delegate.context.user.key
import_status = (
"pending"
if user_key in _get_members_of_group("/usergroup/admin")
else "needs_review"
)
# Get the upload from web.py. See the template for the <form> used.
batch_result = None
form_data = web.input()
if form_data.get("batchImportFile"):
batch_result = batch_import(
form_data['batchImportFile'], import_status=import_status
)
elif form_data.get("batchImportText"):
batch_result = batch_import(
form_data['batchImportText'].encode("utf-8"),
import_status=import_status,
)
else:
add_flash_message(
'error',
'Either attach a JSONL file or copy/paste JSONL into the text area.',
)
return render_template("batch_import.html", batch_result=batch_result)
class BatchImportView(delegate.page):
path = r'/import/batch/(\d+)'
def GET(self, batch_id):
i = web.input(page=1, limit=10, sort='added_time asc')
page = int(i.page)
limit = int(i.limit)
sort = i.sort
valid_sort_fields = ['added_time', 'import_time', 'status']
sort_field, sort_order = sort.split()
if sort_field not in valid_sort_fields or sort_order not in ['asc', 'desc']:
sort_field = 'added_time'
sort_order = 'asc'
offset = (page - 1) * limit
batch = db.select('import_batch', where='id=$batch_id', vars=locals())[0]
total_rows = db.query(
'SELECT COUNT(*) AS count FROM import_item WHERE batch_id=$batch_id',
vars=locals(),
)[0].count
rows = db.select(
'import_item',
where='batch_id=$batch_id',
order=f'{sort_field} {sort_order}',
limit=limit,
offset=offset,
vars=locals(),
)
status_counts = db.query(
'SELECT status, COUNT(*) AS count FROM import_item WHERE batch_id=$batch_id GROUP BY status',
vars=locals(),
)
return render_template(
'batch_import_view.html',
batch=batch,
rows=rows,
total_rows=total_rows,
page=page,
limit=limit,
sort=sort,
status_counts=status_counts,
)
class BatchImportApprove(delegate.page):
"""
Approve `batch_id`, with a `status` of `needs_review`, for import.
Making a GET as an admin to this endpoint will change a batch's status from
`needs_review` to `pending`.
"""
path = r'/import/batch/approve/(\d+)'
def GET(self, batch_id):
user_key = delegate.context.user and delegate.context.user.key
if user_key not in _get_members_of_group("/usergroup/admin"):
raise Forbidden('Permission Denied.')
db.query(
"""
UPDATE import_item
SET status = 'pending'
WHERE batch_id = $1 AND status = 'needs_review';
""",
(batch_id,),
)
return web.found(f"/import/batch/{batch_id}")
class BatchImportPendingView(delegate.page):
"""
Endpoint for viewing `needs_review` batch imports.
"""
path = r"/import/batch/pending"
def GET(self):
i = web.input(page=1, limit=10, sort='added_time asc')
page = int(i.page)
limit = int(i.limit)
sort = i.sort
valid_sort_fields = ['added_time', 'import_time', 'status']
sort_field, sort_order = sort.split()
if sort_field not in valid_sort_fields or sort_order not in ['asc', 'desc']:
sort_field = 'added_time'
sort_order = 'asc'
offset = (page - 1) * limit
rows = db.query(
"""
SELECT batch_id, MIN(status) AS status, MIN(comments) AS comments, MIN(added_time) AS added_time, MAX(submitter) AS submitter
FROM import_item
WHERE status = 'needs_review'
GROUP BY batch_id;
""",
vars=locals(),
)
return render_template(
"batch_import_pending_view",
rows=rows,
page=page,
limit=limit,
)
class isbn_lookup(delegate.page):
path = r'/(?:isbn|ISBN)/(.{10,})'
def GET(self, isbn: str):
input = web.input(high_priority=False)
isbn = isbn if isbn.upper().startswith("B") else canonical(isbn)
high_priority = input.get("high_priority") == "true"
if "high_priority" in web.ctx.env.get('QUERY_STRING'):
web.ctx.env['QUERY_STRING'] = remove_high_priority(
web.ctx.env.get('QUERY_STRING')
)
# Preserve the url type (e.g. `.json`) and query params
ext = ''
if web.ctx.encoding and web.ctx.path.endswith('.' + web.ctx.encoding):
ext = '.' + web.ctx.encoding
if web.ctx.env.get('QUERY_STRING'):
ext += '?' + web.ctx.env['QUERY_STRING']
try:
if ed := Edition.from_isbn(isbn_or_asin=isbn, high_priority=high_priority):
return web.found(ed.key + ext)
except Exception as e:
logger.error(e)
return repr(e)
web.ctx.status = '404 Not Found'
return render.notfound(web.ctx.path, create=False)
class bookpage(delegate.page):
"""
Load an edition bookpage by identifier: isbn, oclc, lccn, or ia (ocaid).
otherwise, return a 404.
"""
path = r'/(oclc|lccn|ia|OCLC|LCCN|IA)/([^/]*)(/.*)?'
def GET(self, key, value, suffix=''):
key = key.lower()
if key == 'oclc':
key = 'oclc_numbers'
elif key == 'ia':
key = 'ocaid'
if key != 'ocaid': # example: MN41558ucmf_6
value = value.replace('_', ' ')
if web.ctx.encoding and web.ctx.path.endswith('.' + web.ctx.encoding):
ext = '.' + web.ctx.encoding
else:
ext = ''
if web.ctx.env.get('QUERY_STRING'):
ext += '?' + web.ctx.env['QUERY_STRING']
q = {'type': '/type/edition', key: value}
result = web.ctx.site.things(q)
if result:
return web.found(result[0] + ext)
elif key == 'ocaid':
# Try a range of ocaid alternatives:
ocaid_alternatives = [
{'type': '/type/edition', 'source_records': 'ia:' + value},
{'type': '/type/volume', 'ia_id': value},
]
for q in ocaid_alternatives:
result = web.ctx.site.things(q)
if result:
return web.found(result[0] + ext)
# Perform import, if possible
from openlibrary.plugins.importapi.code import ia_importapi, BookImportError
from openlibrary import accounts
with accounts.RunAs('ImportBot'):
try:
ia_importapi.ia_import(value, require_marc=True)
except BookImportError:
logger.exception('Unable to import ia record')
# Go the the record created, or to the dummy ia-wrapper record
return web.found('/books/ia:' + value + ext)
web.ctx.status = '404 Not Found'
return render.notfound(web.ctx.path, create=False)
delegate.media_types['application/rdf+xml'] = 'rdf'
class rdf(delegate.mode):
name = 'view'
encoding = 'rdf'
def GET(self, key):
page = web.ctx.site.get(key)
if not page:
raise web.notfound('')
else:
from infogami.utils import template
try:
result = template.typetemplate('rdf')(page)
except:
raise web.notfound('')
else:
return delegate.RawText(
result, content_type='application/rdf+xml; charset=utf-8'
)
delegate.media_types[' application/atom+xml;profile=opds'] = 'opds'
class opds(delegate.mode):
name = 'view'
encoding = 'opds'
def GET(self, key):
page = web.ctx.site.get(key)
if not page:
raise web.notfound('')
else:
from openlibrary.plugins.openlibrary import opds
try:
result = opds.OPDSEntry(page).to_string()
except:
raise web.notfound('')
else:
return delegate.RawText(
result, content_type=' application/atom+xml;profile=opds'
)
delegate.media_types['application/marcxml+xml'] = 'marcxml'
class marcxml(delegate.mode):
name = 'view'
encoding = 'marcxml'
def GET(self, key):
page = web.ctx.site.get(key)
if page is None or page.type.key != '/type/edition':
raise web.notfound('')
else:
from infogami.utils import template
try:
result = template.typetemplate('marcxml')(page)
except:
raise web.notfound('')
else:
return delegate.RawText(
result, content_type='application/marcxml+xml; charset=utf-8'
)
delegate.media_types['text/x-yaml'] = 'yml'
class _yaml(delegate.mode):
name = 'view'
encoding = 'yml'
def GET(self, key):
d = self.get_data(key)
if web.input(text='false').text.lower() == 'true':
web.header('Content-Type', 'text/plain; charset=utf-8')
else:
web.header('Content-Type', 'text/x-yaml; charset=utf-8')
raise web.ok(self.dump(d))
def get_data(self, key):
i = web.input(v=None)
v = safeint(i.v, None)
data = {'key': key, 'revision': v}
try:
d = api.request('/get', data=data)
except client.ClientException as e:
if e.json:
msg = self.dump(json.loads(e.json))
else:
msg = str(e)
raise web.HTTPError(e.status, data=msg)
return json.loads(d)
def dump(self, d):
import yaml
return yaml.safe_dump(d, indent=4, allow_unicode=True, default_flow_style=False)
def load(self, data):
import yaml
return yaml.safe_load(data)
class _yaml_edit(_yaml):
name = 'edit'
encoding = 'yml'
def is_admin(self):
u = delegate.context.user
return u and (u.is_admin() or u.is_super_librarian())
def GET(self, key):
# only allow admin users to edit yaml
if not self.is_admin():
return render.permission_denied(key, 'Permission Denied')
try:
d = self.get_data(key)
except web.HTTPError as e:
if web.ctx.status.lower() == '404 not found':
d = {'key': key}
else:
raise
return render.edit_yaml(key, self.dump(d))
def POST(self, key):
# only allow admin users to edit yaml
if not self.is_admin():
return render.permission_denied(key, 'Permission Denied')
i = web.input(body='', _comment=None)
if '_save' in i:
d = self.load(i.body)
p = web.ctx.site.new(key, d)
try:
p._save(i._comment)
except (client.ClientException, ValidationException) as e:
add_flash_message('error', str(e))
return render.edit_yaml(key, i.body)
raise web.seeother(key + '.yml')
elif '_preview' in i:
add_flash_message('Preview not supported')
return render.edit_yaml(key, i.body)
else:
add_flash_message('unknown action')
return render.edit_yaml(key, i.body)
def _get_user_root():
user_root = infogami.config.get('infobase', {}).get('user_root', '/user')
return web.rstrips(user_root, '/')
def _get_bots():
bots = web.ctx.site.store.values(type='account', name='bot', value='true')
user_root = _get_user_root()
return [user_root + '/' + account['username'] for account in bots]
def _get_members_of_group(group_key):
"""Returns keys of all members of the group identifier by group_key."""
usergroup = web.ctx.site.get(group_key) or {}
return [m.key for m in usergroup.get('members', [])]
def can_write():
"""
Any user with bot flag set can write.
For backward-compatability, all admin users and people in api usergroup are also allowed to write.
"""
user_key = delegate.context.user and delegate.context.user.key
bots = (
_get_members_of_group('/usergroup/api')
+ _get_members_of_group('/usergroup/admin')
+ _get_bots()
)
return user_key in bots
# overwrite the implementation of can_write in the infogami API plugin with this one.
api.can_write = can_write
class Forbidden(web.HTTPError):
def __init__(self, msg=''):
web.HTTPError.__init__(self, '403 Forbidden', {}, msg)
class BadRequest(web.HTTPError):
def __init__(self, msg=''):
web.HTTPError.__init__(self, '400 Bad Request', {}, msg)
class new:
"""API to create new author/edition/work/publisher/series."""
def prepare_query(self, query):
"""
Add key to query and returns the key.
If query is a list multiple queries are returned.
"""
if isinstance(query, list):
return [self.prepare_query(q) for q in query]
else:
type = query['type']
if isinstance(type, dict):
type = type['key']
query['key'] = web.ctx.site.new_key(type)
return query['key']
def verify_types(self, query):
if isinstance(query, list):
for q in query:
self.verify_types(q)
else:
if 'type' not in query:
raise BadRequest('Missing type')
type = query['type']
if isinstance(type, dict):
if 'key' not in type:
raise BadRequest('Bad Type: ' + json.dumps(type))
type = type['key']
if type not in [
'/type/author',
'/type/edition',
'/type/work',
'/type/series',
'/type/publisher',
]:
raise BadRequest('Bad Type: ' + json.dumps(type))
def POST(self):
if not can_write():
raise Forbidden('Permission Denied.')
try:
query = json.loads(web.data())
h = api.get_custom_headers()
comment = h.get('comment')
action = h.get('action')
except Exception as e:
raise BadRequest(str(e))
self.verify_types(query)
keys = self.prepare_query(query)
try:
if not isinstance(query, list):
query = [query]
web.ctx.site.save_many(query, comment=comment, action=action)
except client.ClientException as e:
raise BadRequest(str(e))
# graphite/statsd tracking of bot edits
user = delegate.context.user and delegate.context.user.key
if user.lower().endswith('bot'):
botname = user.replace('/people/', '', 1)
botname = botname.replace('.', '-')
key = 'ol.edits.bots.' + botname
openlibrary.core.stats.increment(key)
return json.dumps(keys)
api and api.add_hook('new', new)
@public
def changequery(query=None, _path=None, **kw):
if query is None:
query = web.input(_method='get', _unicode=False)
for k, v in kw.items():
if v is None:
query.pop(k, None)
else:
query[k] = v
query = {
k: [web.safestr(s) for s in v] if isinstance(v, list) else web.safestr(v)
for k, v in query.items()
}
out = _path or web.ctx.get('readable_path', web.ctx.path)
if query:
out += '?' + urllib.parse.urlencode(query, doseq=True)
return out
# Hack to limit recent changes offset.
# Large offsets are blowing up the database.
from infogami.core.db import get_recent_changes as _get_recentchanges
import urllib
@public
def get_recent_changes(*a, **kw):
if 'offset' in kw and kw['offset'] > 5000:
return []
else:
return _get_recentchanges(*a, **kw)
@public
def most_recent_change():
if 'cache_most_recent' in infogami.config.features:
v = web.ctx.site._request('/most_recent')
v.thing = web.ctx.site.get(v.key)
v.author = v.author and web.ctx.site.get(v.author)
v.created = client.parse_datetime(v.created)
return v
else:
return get_recent_changes(limit=1)[0]
@public
def get_cover_id(key):
try:
_, cat, oln = key.split('/')
return requests.get(
f"https://covers.openlibrary.org/{cat}/query?olid={oln}&limit=1"
).json()[0]
except (IndexError, json.decoder.JSONDecodeError, TypeError, ValueError):
return None
local_ip = None
class invalidate(delegate.page):
path = '/system/invalidate'
def POST(self):
global local_ip
if local_ip is None:
local_ip = socket.gethostbyname(socket.gethostname())
if (
web.ctx.ip != '127.0.0.1'
and web.ctx.ip.rsplit('.', 1)[0] != local_ip.rsplit('.', 1)[0]
):
raise Forbidden('Allowed only in the local network.')
data = json.loads(web.data())
if not isinstance(data, list):
data = [data]
for d in data:
thing = client.Thing(web.ctx.site, d['key'], client.storify(d))
client._run_hooks('on_new_version', thing)
return delegate.RawText('ok')
def save_error():
t = datetime.datetime.utcnow()
name = '%04d-%02d-%02d/%02d%02d%02d%06d' % (
t.year,
t.month,
t.day,
t.hour,
t.minute,
t.second,
t.microsecond,
)
path = infogami.config.get('errorlog', 'errors') + '/' + name + '.html'
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
error = web.safestr(web.djangoerror())
f = open(path, 'w')
f.write(error)
f.close()
print('error saved to', path, file=web.debug)
return name
def internalerror():
i = web.input(_method='GET', debug='false')
name = save_error()
# TODO: move this stats stuff to plugins\openlibrary\stats.py
# Can't have sub-metrics, so can't add more info
openlibrary.core.stats.increment('ol.internal-errors')
increment_error_count('ol.internal-errors-segmented')
# TODO: move this to plugins\openlibrary\sentry.py
from openlibrary.plugins.openlibrary.sentry import sentry
if sentry.enabled:
sentry.capture_exception_webpy()
if i.debug.lower() == 'true':
raise web.debugerror()
else:
msg = render.site(render.internalerror(name))
raise web.internalerror(web.safestr(msg))
delegate.app.internalerror = internalerror
delegate.add_exception_hook(save_error)
class memory(delegate.page):
path = '/debug/memory'
def GET(self):
import guppy
h = guppy.hpy()
return delegate.RawText(str(h.heap()))
def _get_relatedcarousels_component(workid):
if 'env' not in web.ctx:
delegate.fakeload()
work = web.ctx.site.get('/works/%s' % workid) or {}
component = render_template('books/RelatedWorksCarousel', work)
return {0: str(component)}
def get_cached_relatedcarousels_component(*args, **kwargs):
memoized_get_component_metadata = cache.memcache_memoize(
_get_relatedcarousels_component,
"book.bookspage.component.relatedcarousels",
timeout=dateutil.HALF_DAY_SECS,
)
return (
memoized_get_component_metadata(*args, **kwargs)
or memoized_get_component_metadata.update(*args, **kwargs)[0]
)
class Partials(delegate.page):
path = '/partials'
encoding = 'json'
def GET(self):
# `data` is meant to be a dict with two keys: `args` and `kwargs`.
# `data['args']` is meant to be a list of a template's positional arguments, in order.
# `data['kwargs']` is meant to be a dict containing a template's keyword arguments.
i = web.input(workid=None, _component=None, data=None)
component = i.pop("_component")
partial = {}
if component == "RelatedWorkCarousel":
partial = _get_relatedcarousels_component(i.workid)
elif component == "AffiliateLinks":
data = json.loads(i.data)
args = data.get('args', [])
# XXX : Throw error if args length is less than 2
macro = web.template.Template.globals['macros'].AffiliateLinks(
args[0], args[1]
)
partial = {"partials": str(macro)}
elif component == 'SearchFacets':
data = json.loads(i.data)
path = data.get('path')
query = data.get('query', '')
parsed_qs = parse_qs(query.replace('?', ''))
param = data.get('param', {})
sort = None
search_response = do_search(
param, sort, rows=0, spellcheck_count=3, facet=True
)
sidebar = render_template(
'search/work_search_facets',
param,
facet_counts=search_response.facet_counts,
async_load=False,
path=path,
query=parsed_qs,
)
active_facets = render_template(
'search/work_search_selected_facets',
param,
search_response,
param.get('q', ''),
path=path,
query=parsed_qs,
)
partial = {
"sidebar": str(sidebar),
"title": active_facets.title,
"activeFacets": str(active_facets).strip(),
}
elif component == "FulltextSearchSuggestion":
query = i.get('data', '')
data = fulltext_search(query)
hits = data.get('hits', [])
if not hits['hits']:
macro = '<div></div>'
else:
macro = web.template.Template.globals[
'macros'
].FulltextSearchSuggestion(query, data)
partial = {"partials": str(macro)}
return delegate.RawText(json.dumps(partial))
def is_bot():
r"""Generated on ol-www1 within /var/log/nginx with:
cat access.log | grep -oh "; \w*[bB]ot" | sort --unique | awk '{print tolower($2)}'
cat access.log | grep -oh "; \w*[sS]pider" | sort --unique | awk '{print tolower($2)}'
Manually removed singleton `bot` (to avoid overly complex grep regex)
"""
user_agent_bots = [
'sputnikbot',
'dotbot',
'semrushbot',
'googlebot',
'yandexbot',
'monsidobot',
'kazbtbot',
'seznambot',
'dubbotbot',
'360spider',
'redditbot',
'yandexmobilebot',
'linkdexbot',
'musobot',
'mojeekbot',
'focuseekbot',
'behloolbot',
'startmebot',
'yandexaccessibilitybot',
'uptimerobot',
'femtosearchbot',
'pinterestbot',
'toutiaospider',
'yoozbot',
'parsijoobot',
'equellaurlbot',
'donkeybot',
'paperlibot',
'nsrbot',
'discordbot',
'ahrefsbot',
'`googlebot',
'coccocbot',
'buzzbot',
'laserlikebot',
'baiduspider',
'bingbot',
'mj12bot',
'yoozbotadsbot',
'ahrefsbot',
'amazonbot',
'applebot',
'bingbot',
'brightbot',
'gptbot',
'petalbot',
'semanticscholarbot',
'yandex.com/bots',
'icc-crawler',
]
if not web.ctx.env.get('HTTP_USER_AGENT'):
return True
user_agent = web.ctx.env['HTTP_USER_AGENT'].lower()
return any(bot in user_agent for bot in user_agent_bots)
def setup_template_globals():
# must be imported here, otherwise silently messes up infogami's import execution
# order, resulting in random errors like the the /account/login.json endpoint
# defined in accounts.py being ignored, and using the infogami endpoint instead.
from openlibrary.book_providers import (
get_best_edition,
get_book_provider,
get_book_provider_by_name,
get_cover_url,
)
def get_supported_languages():
return {
"cs": {"code": "cs", "localized": _('Czech'), "native": "Čeština"},
"de": {"code": "de", "localized": _('German'), "native": "Deutsch"},
"en": {"code": "en", "localized": _('English'), "native": "English"},
"es": {"code": "es", "localized": _('Spanish'), "native": "Español"},
"fr": {"code": "fr", "localized": _('French'), "native": "Français"},
"hr": {"code": "hr", "localized": _('Croatian'), "native": "Hrvatski"},
"it": {"code": "it", "localized": _('Italian'), "native": "Italiano"},
"pt": {"code": "pt", "localized": _('Portuguese'), "native": "Português"},
"hi": {"code": "hi", "localized": _('Hindi'), "native": "हिंदी"},
"sc": {"code": "sc", "localized": _('Sardinian'), "native": "Sardu"},
"te": {"code": "te", "localized": _('Telugu'), "native": "తెలుగు"},
"uk": {"code": "uk", "localized": _('Ukrainian'), "native": "Українська"},
"zh": {"code": "zh", "localized": _('Chinese'), "native": "中文"},
}
web.template.Template.globals.update(
{
'cookies': web.cookies,
'next': next,
'sorted': sorted,
'zip': zip,
'tuple': tuple,
'hash': hash,
'urlquote': web.urlquote,
'isbn_13_to_isbn_10': isbn_13_to_isbn_10,
'isbn_10_to_isbn_13': isbn_10_to_isbn_13,
'NEWLINE': '\n',
'random': random.Random(),
'choose_random_from': random.choice,
'get_lang': lambda: web.ctx.lang,
'get_supported_languages': get_supported_languages,
'ceil': math.ceil,
'get_best_edition': get_best_edition,
'get_book_provider': get_book_provider,
'get_book_provider_by_name': get_book_provider_by_name,
'get_cover_url': get_cover_url,
# bad use of globals
'is_bot': is_bot,
'time': time,
'input': web.input,
'dumps': json.dumps,
}
)
def setup_context_defaults():
from infogami.utils import context
context.defaults.update({'features': [], 'user': None, 'MAX_VISIBLE_BOOKS': 5})
def setup():
from openlibrary.plugins.openlibrary import (
sentry,
home,
borrow_home,
stats,
support,
events,
design,
status,
authors,
swagger,
)
sentry.setup()
home.setup()
design.setup()
borrow_home.setup()
stats.setup()
support.setup()
events.setup()
status.setup()
authors.setup()
swagger.setup()
from openlibrary.plugins.openlibrary import api
delegate.app.add_processor(web.unloadhook(stats.stats_hook))
if infogami.config.get('dev_instance') is True:
from openlibrary.plugins.openlibrary import dev_instance
dev_instance.setup()
setup_context_defaults()
setup_template_globals()
setup()
| 41,921 | Python | .py | 1,123 | 28.162066 | 162 | 0.576301 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
380 | infobase_hook.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/infobase_hook.py | """Infobase hook for openlibrary.
* Log all modified book pages as required for the search engine.
"""
from infogami.infobase import config
from infogami.infobase.logger import Logger
import datetime
root = getattr(config, 'booklogroot', 'booklog')
_logger = Logger(root)
def hook(object):
"""
Add this hook to infobase.hooks to log all book modifications.
"""
site = object._site
timestamp = datetime.datetime.utcnow()
if object.type.key == '/type/edition':
d = object._get_data(expand=True)
# save some space by not expanding type
d['type'] = {'key': '/type/edition'}
_logger.write('book', site.name, timestamp, d)
# TODO: take care of author modifications
| 727 | Python | .py | 20 | 31.95 | 66 | 0.69671 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
381 | api.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/api.py | """
This file should be for internal APIs which Open Library requires for
its experience. This does not include public facing APIs with LTS
(long term support)
"""
import web
import re
import json
import qrcode
import io
from collections import defaultdict
from openlibrary.views.loanstats import get_trending_books
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import render_template # noqa: F401 used for its side effects
from infogami.plugins.api.code import jsonapi
from infogami.utils.view import add_flash_message
from openlibrary import accounts
from openlibrary.plugins.openlibrary.code import can_write
from openlibrary.utils import extract_numeric_id_from_olid
from openlibrary.utils.isbn import isbn_10_to_isbn_13, normalize_isbn
from openlibrary.plugins.worksearch.subjects import get_subject
from openlibrary.accounts.model import OpenLibraryAccount
from openlibrary.core import ia, db, models, lending, helpers as h
from openlibrary.core.bookshelves_events import BookshelvesEvents
from openlibrary.core.observations import Observations, get_observation_metrics
from openlibrary.core.models import Booknotes, Work
from openlibrary.core.follows import PubSub
from openlibrary.core.vendors import (
create_edition_from_amazon_metadata,
get_amazon_metadata,
get_betterworldbooks_metadata,
)
from openlibrary.core.helpers import NothingEncoder
class book_availability(delegate.page):
path = "/availability/v2"
def GET(self):
i = web.input(type='', ids='')
id_type = i.type
ids = i.ids.split(',')
result = self.get_book_availability(id_type, ids)
return delegate.RawText(json.dumps(result), content_type="application/json")
def POST(self):
i = web.input(type='')
j = json.loads(web.data())
id_type = i.type
ids = j.get('ids', [])
result = self.get_book_availability(id_type, ids)
return delegate.RawText(json.dumps(result), content_type="application/json")
def get_book_availability(self, id_type, ids):
if id_type in ["openlibrary_work", "openlibrary_edition", "identifier"]:
return lending.get_availability(id_type, ids)
else:
return []
class trending_books_api(delegate.page):
path = "/trending(/?.*)"
# path = "/trending/(now|daily|weekly|monthly|yearly|forever)"
encoding = "json"
def GET(self, period="/daily"):
from openlibrary.views.loanstats import SINCE_DAYS
period = period[1:] # remove slash
i = web.input(
page=1, limit=100, days=0, hours=0, sort_by_count=False, minimum=0
)
days = SINCE_DAYS.get(period, int(i.days))
works = get_trending_books(
since_days=days,
since_hours=int(i.hours),
limit=int(i.limit),
page=int(i.page),
books_only=True,
sort_by_count=i.sort_by_count != "false",
minimum=i.minimum,
)
result = {
'query': f"/trending/{period}",
'works': [dict(work) for work in works],
'days': days,
'hours': i.hours,
}
return delegate.RawText(json.dumps(result), content_type="application/json")
class browse(delegate.page):
path = "/browse"
encoding = "json"
def GET(self):
i = web.input(q='', page=1, limit=100, subject='', sorts='')
sorts = i.sorts.split(',')
page = int(i.page)
limit = int(i.limit)
url = lending.compose_ia_url(
query=i.q,
limit=limit,
page=page,
subject=i.subject,
sorts=sorts,
)
works = lending.get_available(url=url) if url else []
result = {
'query': url,
'works': [work.dict() for work in works],
}
return delegate.RawText(json.dumps(result), content_type="application/json")
class ratings(delegate.page):
path = r"/works/OL(\d+)W/ratings"
encoding = "json"
@jsonapi
def GET(self, work_id):
from openlibrary.core.ratings import Ratings
if stats := Ratings.get_work_ratings_summary(work_id):
return json.dumps(
{
'summary': {
'average': stats['ratings_average'],
'count': stats['ratings_count'],
'sortable': stats['ratings_sortable'],
},
'counts': {
'1': stats['ratings_count_1'],
'2': stats['ratings_count_2'],
'3': stats['ratings_count_3'],
'4': stats['ratings_count_4'],
'5': stats['ratings_count_5'],
},
}
)
else:
return json.dumps(
{
'summary': {
'average': None,
'count': 0,
},
'counts': {
'1': 0,
'2': 0,
'3': 0,
'4': 0,
'5': 0,
},
}
)
def POST(self, work_id):
"""Registers new ratings for this work"""
user = accounts.get_current_user()
i = web.input(
edition_id=None,
rating=None,
redir=False,
redir_url=None,
page=None,
ajax=False,
)
key = (
i.redir_url
if i.redir_url
else i.edition_id if i.edition_id else ('/works/OL%sW' % work_id)
)
edition_id = (
int(extract_numeric_id_from_olid(i.edition_id)) if i.edition_id else None
)
if not user:
raise web.seeother('/account/login?redirect=%s' % key)
username = user.key.split('/')[2]
def response(msg, status="success"):
return delegate.RawText(
json.dumps({status: msg}), content_type="application/json"
)
if i.rating is None:
models.Ratings.remove(username, work_id)
r = response('removed rating')
else:
try:
rating = int(i.rating)
if rating not in models.Ratings.VALID_STAR_RATINGS:
raise ValueError
except ValueError:
return response('invalid rating', status="error")
models.Ratings.add(
username=username, work_id=work_id, rating=rating, edition_id=edition_id
)
r = response('rating added')
if i.redir and not i.ajax:
p = h.safeint(i.page, 1)
query_params = f'?page={p}' if p > 1 else ''
if i.page:
raise web.seeother(f'{key}{query_params}')
raise web.seeother(key)
return r
class booknotes(delegate.page):
path = r"/works/OL(\d+)W/notes"
encoding = "json"
def POST(self, work_id):
"""
Add a note to a work (or a work and an edition)
GET params:
- edition_id str (optional)
- redir bool: if patron not logged in, redirect back to page after login
:param str work_id: e.g. OL123W
:rtype: json
:return: the note
"""
user = accounts.get_current_user()
if not user:
raise web.seeother('/account/login?redirect=/works/%s' % work_id)
i = web.input(notes=None, edition_id=None, redir=None)
edition_id = (
int(extract_numeric_id_from_olid(i.edition_id)) if i.edition_id else -1
)
username = user.key.split('/')[2]
def response(msg, status="success"):
return delegate.RawText(
json.dumps({status: msg}), content_type="application/json"
)
if i.notes is None:
Booknotes.remove(username, work_id, edition_id=edition_id)
return response('removed note')
Booknotes.add(
username=username, work_id=work_id, notes=i.notes, edition_id=edition_id
)
if i.redir:
raise web.seeother("/works/%s" % work_id)
return response('note added')
# The GET of work_bookshelves, work_ratings, and work_likes should return some summary of likes,
# not a value tied to this logged in user. This is being used as debugging.
class work_bookshelves(delegate.page):
path = r"/works/OL(\d+)W/bookshelves"
encoding = "json"
@jsonapi
def GET(self, work_id):
from openlibrary.core.models import Bookshelves
return json.dumps({'counts': Bookshelves.get_work_summary(work_id)})
def POST(self, work_id):
"""
Add a work (or a work and an edition) to a bookshelf.
GET params:
- edition_id str (optional)
- action str: e.g. "add", "remove"
- redir bool: if patron not logged in, redirect back to page after login
- bookshelf_id int: which bookshelf? e.g. the ID for "want to read"?
- dont_remove bool: if book exists & action== "add", don't try removal
:param str work_id: e.g. OL123W
:rtype: json
:return: a list of bookshelves_affected
"""
from openlibrary.core.models import Bookshelves
user = accounts.get_current_user()
i = web.input(
edition_id=None,
action="add",
redir=False,
bookshelf_id=None,
dont_remove=False,
)
key = i.edition_id if i.edition_id else ('/works/OL%sW' % work_id)
if not user:
raise web.seeother('/account/login?redirect=%s' % key)
username = user.key.split('/')[2]
current_status = Bookshelves.get_users_read_status_of_work(username, work_id)
try:
bookshelf_id = int(i.bookshelf_id)
shelf_ids = Bookshelves.PRESET_BOOKSHELVES.values()
if bookshelf_id != -1 and bookshelf_id not in shelf_ids:
raise ValueError
except (TypeError, ValueError):
return delegate.RawText(
json.dumps({'error': 'Invalid bookshelf'}),
content_type="application/json",
)
if (not i.dont_remove) and bookshelf_id == current_status or bookshelf_id == -1:
work_bookshelf = Bookshelves.remove(
username=username, work_id=work_id, bookshelf_id=current_status
)
BookshelvesEvents.delete_by_username_and_work(username, work_id)
else:
edition_id = int(i.edition_id.split('/')[2][2:-1]) if i.edition_id else None
work_bookshelf = Bookshelves.add(
username=username,
bookshelf_id=bookshelf_id,
work_id=work_id,
edition_id=edition_id,
)
if i.redir:
raise web.seeother(key)
return delegate.RawText(
json.dumps({'bookshelves_affected': work_bookshelf}),
content_type="application/json",
)
class work_editions(delegate.page):
path = r"(/works/OL\d+W)/editions"
encoding = "json"
def GET(self, key):
doc = web.ctx.site.get(key)
if not doc or doc.type.key != "/type/work":
raise web.notfound('')
else:
i = web.input(limit=50, offset=0)
limit = h.safeint(i.limit) or 50
offset = h.safeint(i.offset) or 0
data = self.get_editions_data(doc, limit=limit, offset=offset)
return delegate.RawText(json.dumps(data), content_type="application/json")
def get_editions_data(self, work, limit, offset):
limit = min(limit, 1000)
keys = web.ctx.site.things(
{
"type": "/type/edition",
"works": work.key,
"limit": limit,
"offset": offset,
}
)
editions = web.ctx.site.get_many(keys, raw=True)
size = work.edition_count
links = {
"self": web.ctx.fullpath,
"work": work.key,
}
if offset > 0:
links['prev'] = web.changequery(offset=min(0, offset - limit))
if offset + len(editions) < size:
links['next'] = web.changequery(offset=offset + limit)
return {"links": links, "size": size, "entries": editions}
class author_works(delegate.page):
path = r"(/authors/OL\d+A)/works"
encoding = "json"
def GET(self, key):
doc = web.ctx.site.get(key)
if not doc or doc.type.key != "/type/author":
raise web.notfound('')
else:
i = web.input(limit=50, offset=0)
limit = h.safeint(i.limit, 50)
offset = h.safeint(i.offset, 0)
data = self.get_works_data(doc, limit=limit, offset=offset)
return delegate.RawText(json.dumps(data), content_type="application/json")
def get_works_data(self, author, limit, offset):
limit = min(limit, 1000)
keys = web.ctx.site.things(
{
"type": "/type/work",
"authors": {"author": {"key": author.key}},
"limit": limit,
"offset": offset,
}
)
works = web.ctx.site.get_many(keys, raw=True)
size = author.get_work_count()
links = {
"self": web.ctx.fullpath,
"author": author.key,
}
if offset > 0:
links['prev'] = web.changequery(offset=min(0, offset - limit))
if offset + len(works) < size:
links['next'] = web.changequery(offset=offset + limit)
return {"links": links, "size": size, "entries": works}
class price_api(delegate.page):
path = r'/prices'
@jsonapi
def GET(self):
i = web.input(isbn='', asin='')
if not (i.isbn or i.asin):
return json.dumps({'error': 'isbn or asin required'})
id_ = i.asin if i.asin else normalize_isbn(i.isbn)
id_type = 'asin' if i.asin else 'isbn_' + ('13' if len(id_) == 13 else '10')
metadata = {
'amazon': get_amazon_metadata(id_, id_type=id_type[:4]) or {},
'betterworldbooks': (
get_betterworldbooks_metadata(id_)
if id_type.startswith('isbn_')
else {}
),
}
# if user supplied isbn_{n} fails for amazon, we may want to check the alternate isbn
# if bwb fails and isbn10, try again with isbn13
if id_type == 'isbn_10' and metadata['betterworldbooks'].get('price') is None:
isbn_13 = isbn_10_to_isbn_13(id_)
metadata['betterworldbooks'] = (
isbn_13 and get_betterworldbooks_metadata(isbn_13) or {}
)
# fetch book by isbn if it exists
# TODO: perform existing OL lookup by ASIN if supplied, if possible
matches = web.ctx.site.things(
{
'type': '/type/edition',
id_type: id_,
}
)
book_key = matches[0] if matches else None
# if no OL edition for isbn, attempt to create
if (not book_key) and metadata.get('amazon'):
book_key = create_edition_from_amazon_metadata(id_, id_type[:4])
# include ol edition metadata in response, if available
if book_key:
ed = web.ctx.site.get(book_key)
if ed:
metadata['key'] = ed.key
if getattr(ed, 'ocaid'): # noqa: B009
metadata['ocaid'] = ed.ocaid
return json.dumps(metadata)
class patrons_follows_json(delegate.page):
path = r"(/people/[^/]+)/follows"
encoding = "json"
def GET(self, key):
i = web.input(publisher='', redir_url='', state='')
user = accounts.get_current_user()
if not user or user.key != key:
raise web.seeother(f'/account/login?redir_url={i.redir_url}')
username = user.key.split('/')[2]
return delegate.RawText(
json.dumps(PubSub.get_subscriptions(username), cls=NothingEncoder),
content_type="application/json",
)
def POST(self, key):
i = web.input(publisher='', redir_url='', state='')
user = accounts.get_current_user()
if not user or user.key != key:
raise web.seeother(f'/account/login?redir_url={i.redir_url}')
username = user.key.split('/')[2]
action = PubSub.subscribe if i.state == '0' else PubSub.unsubscribe
action(username, i.publisher)
raise web.seeother(i.redir_url)
class patrons_observations(delegate.page):
"""
Fetches a patron's observations for a work, requires auth, intended
to be used internally to power the My Books Page & books pages modal
"""
path = r"/works/OL(\d+)W/observations"
encoding = "json"
def GET(self, work_id):
user = accounts.get_current_user()
if not user:
raise web.seeother('/account/login')
username = user.key.split('/')[2]
existing_records = Observations.get_patron_observations(username, work_id)
patron_observations = defaultdict(list)
for r in existing_records:
kv_pair = Observations.get_key_value_pair(r['type'], r['value'])
patron_observations[kv_pair.key].append(kv_pair.value)
return delegate.RawText(
json.dumps(patron_observations), content_type="application/json"
)
def POST(self, work_id):
user = accounts.get_current_user()
if not user:
raise web.seeother('/account/login')
data = json.loads(web.data())
Observations.persist_observation(
data['username'], work_id, data['observation'], data['action']
)
def response(msg, status="success"):
return delegate.RawText(
json.dumps({status: msg}), content_type="application/json"
)
return response('Observations added')
def DELETE(self, work_id):
user = accounts.get_current_user()
username = user.key.split('/')[2]
if not user:
raise web.seeother('/account/login')
Observations.remove_observations(username, work_id)
def response(msg, status="success"):
return delegate.RawText(
json.dumps({status: msg}), content_type="application/json"
)
return response('Observations removed')
class public_observations(delegate.page):
"""
Public observations fetches anonymized community reviews
for a list of works. Useful for decorating search results.
"""
path = '/observations'
encoding = 'json'
def GET(self):
i = web.input(olid=[])
works = i.olid
metrics = {w: get_observation_metrics(w) for w in works}
return delegate.RawText(
json.dumps({'observations': metrics}), content_type='application/json'
)
class work_delete(delegate.page):
path = r"/works/(OL\d+W)/[^/]+/delete"
def get_editions_of_work(self, work: Work) -> list[dict]:
i = web.input(bulk=False)
limit = 1_000 # This is the max limit of the things function
all_keys: list = []
offset = 0
while True:
keys: list = web.ctx.site.things(
{
"type": "/type/edition",
"works": work.key,
"limit": limit,
"offset": offset,
}
)
all_keys.extend(keys)
if len(keys) == limit:
if not i.bulk:
raise web.HTTPError(
'400 Bad Request',
data=json.dumps(
{
'error': f'API can only delete {limit} editions per work.',
}
),
headers={"Content-Type": "application/json"},
)
else:
offset += limit
else:
break
return web.ctx.site.get_many(all_keys, raw=True)
def POST(self, work_id: str):
if not can_write():
return web.HTTPError('403 Forbidden')
web_input = web.input(comment=None)
comment = web_input.get('comment')
work: Work = web.ctx.site.get(f'/works/{work_id}')
if work is None:
return web.HTTPError(status='404 Not Found')
editions: list[dict] = self.get_editions_of_work(work)
keys_to_delete: list = [el.get('key') for el in [*editions, work.dict()]]
delete_payload: list[dict] = [
{'key': key, 'type': {'key': '/type/delete'}} for key in keys_to_delete
]
web.ctx.site.save_many(delete_payload, comment)
return delegate.RawText(
json.dumps(
{
'status': 'ok',
}
),
content_type="application/json",
)
class hide_banner(delegate.page):
path = '/hide_banner'
def POST(self):
user = accounts.get_current_user()
data = json.loads(web.data())
# Set truthy cookie that expires in 30 days:
DAY_SECONDS = 60 * 60 * 24
cookie_duration_days = int(data.get('cookie-duration-days', 30))
if user and data['cookie-name'].startswith('yrg'):
user.save_preferences({'yrg_banner_pref': data['cookie-name']})
web.setcookie(
data['cookie-name'], '1', expires=(cookie_duration_days * DAY_SECONDS)
)
return delegate.RawText(
json.dumps({'success': 'Preference saved'}), content_type="application/json"
)
class create_qrcode(delegate.page):
path = '/qrcode'
def GET(self):
i = web.input(path='/')
page_path = i.path
qr_url = f'{web.ctx.home}{page_path}'
img = qrcode.make(qr_url)
with io.BytesIO() as buf:
img.save(buf, format='PNG')
web.header("Content-Type", "image/png")
return delegate.RawText(buf.getvalue())
| 22,401 | Python | .py | 561 | 29.128342 | 96 | 0.561576 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
382 | status.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/status.py | from dataclasses import dataclass
import datetime
import functools
from pathlib import Path
import re
import socket
import sys
from typing import Any
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import render_template, public
from openlibrary.core import stats
from openlibrary.utils import get_software_version
status_info: dict[str, Any] = {}
feature_flagso: dict[str, Any] = {}
class status(delegate.page):
def GET(self):
return render_template(
"status",
status_info,
feature_flags,
dev_merged_status=get_dev_merged_status(),
)
@functools.cache
def get_dev_merged_status():
return DevMergedStatus.from_file()
@dataclass
class DevMergedStatus:
git_status: str
pr_statuses: 'list[PRStatus]'
footer: str
@staticmethod
def from_output(output: str) -> 'DevMergedStatus':
dev_merged_pieces = output.split('\n---\n')
return DevMergedStatus(
git_status=dev_merged_pieces[0],
pr_statuses=list(map(PRStatus.from_output, dev_merged_pieces[1:-1])),
footer=dev_merged_pieces[-1],
)
@staticmethod
def from_file() -> 'DevMergedStatus | None':
"""If we're on testing and the file exists, return staged PRs"""
fp = Path('./_dev-merged_status.txt')
if fp.exists() and (contents := fp.read_text()):
return DevMergedStatus.from_output(contents)
return None
@dataclass
class PRStatus:
pull_line: str
status: str
body: str
@property
def name(self) -> str | None:
if '#' in self.pull_line:
return self.pull_line.split(' # ')[1]
else:
return self.pull_line
@property
def pull_id(self) -> int | None:
if m := re.match(r'^origin pull/(\d+)', self.pull_line):
return int(m.group(1))
else:
return None
@property
def link(self) -> str | None:
if self.pull_id is not None:
return f'https://github.com/internetarchive/openlibrary/pull/{self.pull_id}'
else:
return None
@staticmethod
def from_output(output: str) -> 'PRStatus':
lines = output.strip().split('\n')
return PRStatus(pull_line=lines[0], status=lines[-1], body='\n'.join(lines[1:]))
@public
def get_git_revision_short_hash():
return (
status_info.get('Software version')
if status_info and isinstance(status_info, dict)
else None
)
def get_features_enabled():
return config.features
def setup():
"Basic startup status for the server"
global status_info, feature_flags
host = socket.gethostname()
status_info = {
"Software version": get_software_version(),
"Python version": sys.version.split()[0],
"Host": host,
"Start time": datetime.datetime.now(datetime.UTC),
}
feature_flags = get_features_enabled()
# Host is e.g. ol-web4.blah.archive.org ; we just want the first subdomain
first_subdomain = host.split('.')[0] or 'unknown'
stats.increment('ol.servers.%s.started' % first_subdomain)
| 3,176 | Python | .py | 96 | 26.760417 | 88 | 0.647367 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
383 | events.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/events.py | """Handling various events triggered by Open Library.
"""
from infogami.infobase import client
import logging
import web
import eventer
logger = logging.getLogger("openlibrary.events")
def on_page_edit(page):
pass
class EditHook(client.hook):
"""Ugly Interface provided by Infobase to get event notifications."""
def on_new_version(self, page):
"""Fires page.edit event using msg broker."""
# The argument passes by Infobase is not a thing object.
# Create a thing object to pass to event listeners.
page = web.ctx.site.get(page['key'])
eventer.trigger("page.edit", page)
def setup():
"""Installs handlers for various events."""
eventer.bind("page.edit", on_page_edit)
| 739 | Python | .py | 20 | 32.45 | 73 | 0.712271 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
384 | lists.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/lists.py | """Lists implementation.
"""
from dataclasses import dataclass, field
import json
from urllib.parse import parse_qs
import random
from typing import Literal, cast
import web
from infogami.utils import delegate
from infogami.utils.view import render_template, public, require_login
from infogami.infobase import client, common
from openlibrary.accounts import get_current_user
from openlibrary.core import formats, cache
from openlibrary.core.models import ThingKey
from openlibrary.core.lists.model import (
AnnotatedSeedDict,
List,
ThingReferenceDict,
SeedSubjectString,
)
import openlibrary.core.helpers as h
from openlibrary.i18n import gettext as _
from openlibrary.plugins.upstream.addbook import safe_seeother
from openlibrary.utils import dateutil, olid_to_key
from openlibrary.plugins.upstream import spamcheck, utils
from openlibrary.plugins.upstream.account import MyBooksTemplate
from openlibrary.plugins.worksearch import subjects
from openlibrary.coverstore.code import render_list_preview_image
def subject_key_to_seed(key: subjects.SubjectPseudoKey) -> SeedSubjectString:
name_part = key.split("/")[-1].replace(",", "_").replace("__", "_")
if name_part.split(":")[0] in ("place", "person", "time"):
return name_part
else:
return "subject:" + name_part
def is_seed_subject_string(seed: str) -> bool:
subject_type = seed.split(":")[0]
return subject_type in ("subject", "place", "person", "time")
def is_empty_annotated_seed(seed: AnnotatedSeedDict) -> bool:
"""
An empty seed can be represented as a simple SeedDict
"""
return not seed.get('notes')
Seed = ThingReferenceDict | SeedSubjectString | AnnotatedSeedDict
"""
The JSON-friendly seed representation (as opposed to `openlibrary.core.lists.model.Seed`).
Can either a thing reference, a subject key, or an annotated seed.
"""
@dataclass
class ListRecord:
key: str | None = None
name: str = ''
description: str = ''
seeds: list[Seed] = field(default_factory=list)
@staticmethod
def normalize_input_seed(
seed: ThingReferenceDict | AnnotatedSeedDict | str,
) -> Seed:
if isinstance(seed, str):
if seed.startswith('/subjects/'):
return subject_key_to_seed(seed)
elif seed.startswith('/'):
return {'key': seed}
elif is_seed_subject_string(seed):
return seed
else:
return {'key': olid_to_key(seed)}
else:
if 'thing' in seed:
annotated_seed = cast(AnnotatedSeedDict, seed) # Appease mypy
if is_empty_annotated_seed(annotated_seed):
return ListRecord.normalize_input_seed(annotated_seed['thing'])
elif annotated_seed['thing']['key'].startswith('/subjects/'):
return subject_key_to_seed(annotated_seed['thing']['key'])
else:
return annotated_seed
elif seed['key'].startswith('/subjects/'):
thing_ref = cast(ThingReferenceDict, seed) # Appease mypy
return subject_key_to_seed(thing_ref['key'])
else:
return seed
@staticmethod
def from_input():
DEFAULTS = {
'key': None,
'name': '',
'description': '',
'seeds': [],
}
if data := web.data():
# If the requests has data, parse it and use it to populate the list
if web.ctx.env.get('CONTENT_TYPE') == 'application/json':
i = {} | DEFAULTS | json.loads(data)
else:
form_data = {
# By default all the values are lists
k: v[0]
for k, v in parse_qs(bytes.decode(data)).items()
}
i = {} | DEFAULTS | utils.unflatten(form_data)
else:
# Otherwise read from the query string
i = utils.unflatten(web.input(**DEFAULTS))
normalized_seeds = [
ListRecord.normalize_input_seed(seed)
for seed_list in i['seeds']
for seed in (
seed_list.split(',') if isinstance(seed_list, str) else [seed_list]
)
]
normalized_seeds = [
seed
for seed in normalized_seeds
if seed and (isinstance(seed, str) or seed.get('key') or seed.get('thing'))
]
return ListRecord(
key=i['key'],
name=i['name'],
description=i['description'],
seeds=normalized_seeds,
)
def to_thing_json(self):
return {
"key": self.key,
"type": {"key": "/type/list"},
"name": self.name,
"description": self.description,
"seeds": self.seeds,
}
class lists_home(delegate.page):
path = "/lists"
def GET(self):
delegate.context.setdefault('cssfile', 'lists')
return render_template("lists/home")
SeedType = Literal['subject', 'author', 'work', 'edition']
def seed_key_to_seed_type(key: str) -> SeedType:
match key.split('/')[1]:
case 'subjects':
return 'subject'
case 'authors':
return 'author'
case 'works':
return 'work'
case 'books':
return 'edition'
case _:
raise ValueError(f'Invalid seed key: {key}')
@public
def get_seed_info(doc):
"""Takes a thing, determines what type it is, and returns a seed summary"""
seed_type = seed_key_to_seed_type(doc.key)
match seed_type:
case 'subject':
seed = subject_key_to_seed(doc.key)
title = doc.name
case 'work' | 'edition':
seed = {"key": doc.key}
title = doc.get("title", "untitled")
case 'author':
seed = {"key": doc.key}
title = doc.get('name', 'name missing')
case _:
raise ValueError(f'Invalid seed type: {seed_type}')
return {
"seed": seed,
"type": seed_type,
"title": web.websafe(title),
"remove_dialog_html": _(
'Are you sure you want to remove <strong>%(title)s</strong> from your list?',
title=web.websafe(title),
),
}
@public
def get_list_data(list, seed, include_cover_url=True):
list_items = []
for s in list.get_seeds():
list_items.append(s.key)
d = web.storage(
{
"name": list.name or "",
"key": list.key,
"active": list.has_seed(seed) if seed else False,
"list_items": list_items,
}
)
if include_cover_url:
cover = list.get_cover() or list.get_default_cover()
d['cover_url'] = cover and cover.url("S") or "/images/icons/avatar_book-sm.png"
if 'None' in d['cover_url']:
d['cover_url'] = "/images/icons/avatar_book-sm.png"
d['owner'] = None
if owner := list.get_owner():
d['owner'] = web.storage(displayname=owner.displayname or "", key=owner.key)
return d
@public
def get_user_lists(seed_info):
user = get_current_user()
if not user:
return []
user_lists = user.get_lists(sort=True)
seed = seed_info['seed'] if seed_info else None
return [get_list_data(user_list, seed) for user_list in user_lists]
class lists_partials(delegate.page):
path = "/lists/partials"
encoding = "json"
def GET(self):
partials = self.get_partials()
return delegate.RawText(json.dumps(partials))
def get_partials(self):
user_lists = get_user_lists(None)
dropper = render_template('lists/dropper_lists', user_lists)
list_data = {
list_data['key']: {
'members': list_data['list_items'],
'listName': list_data['name'],
}
for list_data in user_lists
}
return {
'dropper': str(dropper),
'listData': list_data,
}
class lists(delegate.page):
"""Controller for displaying lists of a seed or lists of a person."""
path = "(/(?:people|books|works|authors|subjects)/[^/]+)/lists"
def is_enabled(self):
return "lists" in web.ctx.features
def GET(self, path):
# If logged in patron is viewing their lists page, use MyBooksTemplate
if path.startswith("/people/"):
username = path.split('/')[-1]
mb = MyBooksTemplate(username, 'lists')
if not mb.user:
raise web.notfound()
template = render_template(
"lists/lists.html", mb.user, mb.user.get_lists(), show_header=False
)
return mb.render(
template=template,
header_title=_("Lists (%(count)d)", count=len(mb.lists)),
)
else:
doc = self.get_doc(path)
if not doc:
raise web.notfound()
lists = doc.get_lists()
return render_template("lists/lists.html", doc, lists, show_header=True)
def get_doc(self, key):
if key.startswith("/subjects/"):
s = subjects.get_subject(key)
if s.work_count > 0:
return s
else:
return None
else:
return web.ctx.site.get(key)
class lists_edit(delegate.page):
path = r"(/people/[^/]+)?(/lists/OL\d+L)/edit"
def GET(self, user_key: str | None, list_key: str): # type: ignore[override]
key = (user_key or '') + list_key
if not web.ctx.site.can_write(key):
return render_template(
"permission_denied",
web.ctx.fullpath,
f"Permission denied to edit {key}.",
)
lst = cast(List | None, web.ctx.site.get(key))
if lst is None:
raise web.notfound()
return render_template("type/list/edit", lst, new=False)
def POST(self, user_key: str | None, list_key: str | None = None): # type: ignore[override]
key = (user_key or '') + (list_key or '')
if not web.ctx.site.can_write(key):
return render_template(
"permission_denied",
web.ctx.fullpath,
f"Permission denied to edit {key}.",
)
list_record = ListRecord.from_input()
if not list_record.name:
raise web.badrequest('A list name is required.')
# Creating a new list
if not list_key:
list_num = web.ctx.site.seq.next_value("list")
list_key = f"/lists/OL{list_num}L"
list_record.key = (user_key or '') + list_key
web.ctx.site.save(
list_record.to_thing_json(),
action="lists",
comment=web.input(_comment="")._comment or None,
)
# If content type json, return json response
if web.ctx.env.get('CONTENT_TYPE') == 'application/json':
return delegate.RawText(json.dumps({'key': list_record.key}))
else:
return safe_seeother(list_record.key)
class lists_add_account(delegate.page):
path = r"/account/lists/add"
@require_login
def GET(self):
return web.seeother(f'{get_current_user().key}/lists/add{web.ctx.query}')
class lists_add(delegate.page):
path = r"(/people/[^/]+)?/lists/add"
def GET(self, user_key: str | None): # type: ignore[override]
if user_key and not web.ctx.site.can_write(user_key):
return render_template(
"permission_denied",
web.ctx.fullpath,
f"Permission denied to edit {user_key}.",
)
list_record = ListRecord.from_input()
# Only admins can add global lists for now
admin_only = not user_key
return render_template(
"type/list/edit", list_record, new=True, admin_only=admin_only
)
def POST(self, user_key: str | None): # type: ignore[override]
return lists_edit().POST(user_key, None)
class lists_delete(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/delete"
encoding = "json"
def POST(self, key):
doc = web.ctx.site.get(key)
if doc is None or doc.type.key != '/type/list':
raise web.notfound()
# Deletes list preview from memcache, if it exists
cache_key = "core.patron_lists.%s" % web.safestr(doc.key)
cache.memcache_cache.delete(cache_key)
doc = {"key": key, "type": {"key": "/type/delete"}}
try:
result = web.ctx.site.save(doc, action="lists", comment="Deleted list.")
except client.ClientException as e:
web.ctx.status = e.status
web.header("Content-Type", "application/json")
return delegate.RawText(e.json)
web.header("Content-Type", "application/json")
return delegate.RawText('{"status": "ok"}')
class lists_json(delegate.page):
path = "(/(?:people|books|works|authors|subjects)/[^/]+)/lists"
encoding = "json"
content_type = "application/json"
def GET(self, path):
if path.startswith("/subjects/"):
doc = subjects.get_subject(path)
else:
doc = web.ctx.site.get(path)
if not doc:
raise web.notfound()
i = web.input(offset=0, limit=50)
i.offset = h.safeint(i.offset, 0)
i.limit = h.safeint(i.limit, 50)
i.limit = min(i.limit, 100)
i.offset = max(i.offset, 0)
lists = self.get_lists(doc, limit=i.limit, offset=i.offset)
return delegate.RawText(self.dumps(lists))
def get_lists(self, doc, limit=50, offset=0):
lists = doc.get_lists(limit=limit, offset=offset)
size = len(lists)
if offset or len(lists) == limit:
# There could be more lists than len(lists)
size = len(doc.get_lists(limit=1000))
d = {
"links": {"self": web.ctx.path},
"size": size,
"entries": [lst.preview() for lst in lists],
}
if offset + len(lists) < size:
d['links']['next'] = web.changequery(limit=limit, offset=offset + limit)
if offset:
offset = max(0, offset - limit)
d['links']['prev'] = web.changequery(limit=limit, offset=offset)
return d
def forbidden(self):
headers = {"Content-Type": self.get_content_type()}
data = {"message": "Permission denied."}
return web.HTTPError("403 Forbidden", data=self.dumps(data), headers=headers)
def POST(self, user_key):
# POST is allowed only for /people/foo/lists
if not user_key.startswith("/people/"):
raise web.nomethod()
site = web.ctx.site
user = site.get(user_key)
if not user:
raise web.notfound()
if not site.can_write(user_key):
raise self.forbidden()
data = self.loads(web.data())
# TODO: validate data
seeds = self.process_seeds(data.get('seeds', []))
lst = user.new_list(
name=data.get('name', ''),
description=data.get('description', ''),
tags=data.get('tags', []),
seeds=seeds,
)
if spamcheck.is_spam(lst):
raise self.forbidden()
try:
result = site.save(
lst.dict(),
comment="Created new list.",
action="lists",
data={"list": {"key": lst.key}, "seeds": seeds},
)
except client.ClientException as e:
headers = {"Content-Type": self.get_content_type()}
data = {"message": str(e)}
raise web.HTTPError(e.status, data=self.dumps(data), headers=headers)
web.header("Content-Type", self.get_content_type())
return delegate.RawText(self.dumps(result))
@staticmethod
def process_seeds(
seeds: ThingReferenceDict | subjects.SubjectPseudoKey | ThingKey,
) -> list[Seed]:
return [ListRecord.normalize_input_seed(seed) for seed in seeds]
def get_content_type(self):
return self.content_type
def dumps(self, data):
return formats.dump(data, self.encoding)
def loads(self, text):
return formats.load(text, self.encoding)
class lists_yaml(lists_json):
encoding = "yml"
content_type = "text/yaml"
def get_list(key, raw=False):
if lst := web.ctx.site.get(key):
if raw:
return lst.dict()
return {
"links": {
"self": lst.key,
"seeds": lst.key + "/seeds",
"subjects": lst.key + "/subjects",
"editions": lst.key + "/editions",
},
"name": lst.name or None,
"type": {"key": lst.key},
"description": (lst.description and str(lst.description) or None),
"seed_count": lst.seed_count,
"meta": {
"revision": lst.revision,
"created": lst.created.isoformat(),
"last_modified": lst.last_modified.isoformat(),
},
}
class list_view_json(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)"
encoding = "json"
content_type = "application/json"
def GET(self, key):
i = web.input()
raw = i.get("_raw") == "true"
lst = get_list(key, raw=raw)
if not lst or lst['type']['key'] == '/type/delete':
raise web.notfound()
web.header("Content-Type", self.content_type)
return delegate.RawText(formats.dump(lst, self.encoding))
class list_view_yaml(list_view_json):
encoding = "yml"
content_type = "text/yaml"
@public
def get_list_seeds(key):
if lst := web.ctx.site.get(key):
seeds = [seed.dict() for seed in lst.get_seeds()]
return {
"links": {"self": key + "/seeds", "list": key},
"size": len(seeds),
"entries": seeds,
}
class list_seeds(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/seeds"
encoding = "json"
content_type = "application/json"
def GET(self, key):
lst = get_list_seeds(key)
if not lst:
raise web.notfound()
return delegate.RawText(
formats.dump(lst, self.encoding), content_type=self.content_type
)
def POST(self, key):
site = web.ctx.site
lst = cast(List | None, site.get(key))
if not lst:
raise web.notfound()
if not site.can_write(key):
raise self.forbidden()
data = formats.load(web.data(), self.encoding)
data.setdefault("add", [])
data.setdefault("remove", [])
# support /subjects/foo and /books/OL1M along with subject:foo and {"key": "/books/OL1M"}.
for seed in lists_json.process_seeds(data["add"]):
lst.add_seed(seed)
for seed in lists_json.process_seeds(data["remove"]):
lst.remove_seed(seed)
seeds = []
for seed in data["add"] + data["remove"]:
if isinstance(seed, dict):
seeds.append(seed['key'])
else:
seeds.append(seed)
changeset_data = {
"list": {"key": key},
"seeds": seeds,
"add": data["add"],
"remove": data["remove"],
}
d = lst._save(comment="Updated list.", action="lists", data=changeset_data)
web.header("Content-Type", self.content_type)
return delegate.RawText(formats.dump(d, self.encoding))
class list_seed_yaml(list_seeds):
encoding = "yml"
content_type = 'text/yaml; charset="utf-8"'
def get_list_editions(key, offset=0, limit=50, api=False):
if lst := cast(List | None, web.ctx.site.get(key)):
offset = offset or 0 # enforce sane int defaults
all_editions = list(lst.get_editions())
editions = all_editions[offset : offset + limit]
if api:
return make_collection(
size=len(all_editions),
entries=[e.dict() for e in editions],
limit=limit,
offset=offset,
key=key,
)
return editions
class list_editions_json(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/editions"
encoding = "json"
content_type = "application/json"
def GET(self, key):
i = web.input(limit=50, offset=0)
limit = h.safeint(i.limit, 50)
offset = h.safeint(i.offset, 0)
editions = get_list_editions(key, offset=offset, limit=limit, api=True)
if not editions:
raise web.notfound()
return delegate.RawText(
formats.dump(editions, self.encoding), content_type=self.content_type
)
class list_editions_yaml(list_editions_json):
encoding = "yml"
content_type = 'text/yaml; charset="utf-8"'
def make_collection(size, entries, limit, offset, key=None):
d = {
"size": size,
"start": offset,
"end": offset + limit,
"entries": entries,
"links": {
"self": web.changequery(),
},
}
if offset + len(entries) < size:
d['links']['next'] = web.changequery(limit=limit, offset=offset + limit)
if offset:
d['links']['prev'] = web.changequery(limit=limit, offset=max(0, offset - limit))
if key:
d['links']['list'] = key
return d
class list_subjects_json(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/subjects"
encoding = "json"
content_type = "application/json"
def GET(self, key):
lst = cast(List | None, web.ctx.site.get(key))
if not lst:
raise web.notfound()
i = web.input(limit=20)
limit = h.safeint(i.limit, 20)
data = self.get_subjects(lst, limit=limit)
data['links'] = {"self": key + "/subjects", "list": key}
text = formats.dump(data, self.encoding)
return delegate.RawText(text, content_type=self.content_type)
def get_subjects(self, lst, limit):
data = lst.get_subjects(limit=limit)
for key, subjects_ in data.items():
data[key] = [self._process_subject(s) for s in subjects_]
return dict(data)
def _process_subject(self, s):
key = s['key']
if key.startswith("subject:"):
key = "/subjects/" + web.lstrips(key, "subject:")
else:
key = "/subjects/" + key
return {"name": s['name'], "count": s['count'], "url": key}
class list_subjects_yaml(list_subjects_json):
encoding = "yml"
content_type = 'text/yaml; charset="utf-8"'
class lists_embed(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/embed"
def GET(self, key):
doc = web.ctx.site.get(key)
if doc is None or doc.type.key != '/type/list':
raise web.notfound()
return render_template("type/list/embed", doc)
class export(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/export"
def GET(self, key):
lst = cast(List | None, web.ctx.site.get(key))
if not lst:
raise web.notfound()
format = web.input(format="html").format
if format == "html":
data = self.get_exports(lst)
html = render_template(
"lists/export_as_html",
lst,
data["editions"],
data["works"],
data["authors"],
)
return delegate.RawText(html)
elif format == "bibtex":
data = self.get_exports(lst)
html = render_template(
"lists/export_as_bibtex",
lst,
data["editions"],
data["works"],
data["authors"],
)
return delegate.RawText(html)
elif format == "json":
data = self.get_exports(lst, raw=True)
web.header("Content-Type", "application/json")
return delegate.RawText(json.dumps(data))
elif format == "yaml":
data = self.get_exports(lst, raw=True)
web.header("Content-Type", "application/yaml")
return delegate.RawText(formats.dump_yaml(data))
else:
raise web.notfound()
def get_exports(self, lst: List, raw: bool = False) -> dict[str, list]:
export_data = lst.get_export_list()
if "editions" in export_data:
export_data["editions"] = sorted(
export_data["editions"],
key=lambda doc: doc['last_modified']['value'],
reverse=True,
)
if "works" in export_data:
export_data["works"] = sorted(
export_data["works"],
key=lambda doc: doc['last_modified']['value'],
reverse=True,
)
if "authors" in export_data:
export_data["authors"] = sorted(
export_data["authors"],
key=lambda doc: doc['last_modified']['value'],
reverse=True,
)
if not raw:
if "editions" in export_data:
export_data["editions"] = [
self.make_doc(e) for e in export_data["editions"]
]
lst.preload_authors(export_data["editions"])
else:
export_data["editions"] = []
if "works" in export_data:
export_data["works"] = [self.make_doc(e) for e in export_data["works"]]
lst.preload_authors(export_data["works"])
else:
export_data["works"] = []
if "authors" in export_data:
export_data["authors"] = [
self.make_doc(e) for e in export_data["authors"]
]
lst.preload_authors(export_data["authors"])
else:
export_data["authors"] = []
return export_data
def make_doc(self, rawdata):
data = web.ctx.site._process_dict(common.parse_query(rawdata))
doc = client.create_thing(web.ctx.site, data['key'], data)
return doc
class feeds(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/feeds/(updates).(atom)"
def GET(self, key, name, fmt):
lst = cast(List | None, web.ctx.site.get(key))
if lst is None:
raise web.notfound()
text = getattr(self, 'GET_' + name + '_' + fmt)(lst)
return delegate.RawText(text)
def GET_updates_atom(self, lst):
web.header("Content-Type", 'application/atom+xml; charset="utf-8"')
return render_template("lists/feed_updates.xml", lst)
def setup():
pass
def _get_recently_modified_lists(limit, offset=0):
"""Returns the most recently modified lists as list of dictionaries.
This function is memoized for better performance.
"""
# this function is memozied with background=True option.
# web.ctx must be initialized as it won't be available to the background thread.
if 'env' not in web.ctx:
delegate.fakeload()
keys = web.ctx.site.things(
{
"type": "/type/list",
"sort": "-last_modified",
"limit": limit,
"offset": offset,
}
)
lists = web.ctx.site.get_many(keys)
return [lst.dict() for lst in lists]
def get_cached_recently_modified_lists(limit, offset=0):
f = cache.memcache_memoize(
_get_recently_modified_lists,
key_prefix="lists.get_recently_modified_lists",
timeout=0,
) # dateutil.HALF_HOUR_SECS)
return f(limit, offset=offset)
def _preload_lists(lists):
"""Preloads all referenced documents for each list.
List can be either a dict of a model object.
"""
keys = set()
for xlist in lists:
if not isinstance(xlist, dict):
xlist = xlist.dict()
owner = xlist['key'].rsplit("/lists/", 1)[0]
if owner:
keys.add(owner)
for seed in xlist.get("seeds", []):
if isinstance(seed, dict) and "key" in seed:
keys.add(seed['key'])
web.ctx.site.get_many(list(keys))
def _get_active_lists_in_random(limit=20, preload=True):
if 'env' not in web.ctx:
delegate.fakeload()
lists = []
offset = 0
while len(lists) < limit:
result = get_cached_recently_modified_lists(limit * 5, offset=offset)
if not result:
break
offset += len(result)
# ignore lists with 4 or less seeds
lists += [xlist for xlist in result if len(xlist.get("seeds", [])) > 4]
if len(lists) > limit:
lists = random.sample(lists, limit)
if preload:
_preload_lists(lists)
return lists
@public
def get_active_lists_in_random(limit=20, preload=True):
f = cache.memcache_memoize(
_get_active_lists_in_random,
key_prefix="lists.get_active_lists_in_random",
timeout=0,
)
lists = f(limit=limit, preload=preload)
# convert rawdata into models.
return [web.ctx.site.new(xlist['key'], xlist) for xlist in lists]
class lists_preview(delegate.page):
path = r"((?:/people/[^/]+)?/lists/OL\d+L)/preview.png"
def GET(self, lst_key):
image_bytes = render_list_preview_image(lst_key)
web.header("Content-Type", "image/png")
return delegate.RawText(image_bytes)
| 29,612 | Python | .py | 764 | 29.141361 | 98 | 0.569026 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
385 | design.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/design.py | import web
import logging
from infogami.utils import delegate
from infogami.utils.view import render_template, public
logger = logging.getLogger("openlibrary.design")
class home(delegate.page):
path = "/developers/design"
def GET(self):
return render_template("design")
def setup():
pass
| 315 | Python | .py | 11 | 25.181818 | 55 | 0.767677 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
386 | borrow_home.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/borrow_home.py | """
Controllers for /borrow pages.
These endpoints are largely deprecated, and only maintained for
backwards compatibility.
"""
import web
import datetime
import json
import eventer
from infogami.utils import delegate
from infogami.utils.view import render_template # used for its side effects
from openlibrary.core import statsdb
class borrow(delegate.page):
path = "/borrow"
def GET(self):
raise web.seeother('/subjects/in_library#ebooks=true')
class borrow_json(delegate.page):
path = "/borrow"
encoding = "json"
def GET(self):
raise web.seeother('/subjects/in_library.json' + web.ctx.query)
class read(delegate.page):
path = "/read"
def GET(self):
web.seeother('/subjects/accessible_book#ebooks=true')
class read_json(delegate.page):
path = "/read"
encoding = "json"
def GET(self):
web.seeother('/subjects/accessible_book.json' + web.ctx.query)
def on_loan_created_statsdb(loan):
"""Adds the loan info to the stats database."""
key = _get_loan_key(loan)
t_start = datetime.datetime.utcfromtimestamp(loan['loaned_at'])
d = {
"book": loan['book'],
"identifier": loan['ocaid'],
"resource_type": loan['resource_type'],
"t_start": t_start.isoformat(),
"status": "active",
}
d['library'] = "/libraries/internet_archive"
d['geoip_country'] = None # we removed geoip
statsdb.add_entry(key, d)
def on_loan_completed_statsdb(loan):
"""Marks the loan as completed in the stats database."""
key = _get_loan_key(loan)
t_start = datetime.datetime.utcfromtimestamp(loan['loaned_at'])
t_end = datetime.datetime.utcfromtimestamp(loan['returned_at'])
d = {
"book": loan['book'],
"identifier": loan['ocaid'],
"resource_type": loan['resource_type'],
"t_start": t_start.isoformat(),
"t_end": t_end.isoformat(),
"status": "completed",
}
if old := statsdb.get_entry(key):
olddata = json.loads(old.json)
d = dict(olddata, **d)
statsdb.update_entry(key, d)
def _get_loan_key(loan):
# The loan key is now changed from uuid to fixed key.
# Using _key as key for loan stats will result in overwriting previous loans.
# Using the unique uuid to create the loan key and falling back to _key
# when uuid is not available.
return "loans/" + loan.get("uuid") or loan["_key"]
def setup():
eventer.bind("loan-created", on_loan_created_statsdb)
eventer.bind("loan-completed", on_loan_completed_statsdb)
| 2,567 | Python | .py | 70 | 31.471429 | 81 | 0.669769 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
387 | support.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/support.py | import hashlib
import web
import logging
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import render_template
from openlibrary import accounts
from openlibrary.core import stats
from openlibrary.core.cache import get_memcache
from openlibrary.plugins.upstream.addbook import get_recaptcha
from openlibrary.utils.dateutil import MINUTE_SECS
logger = logging.getLogger("openlibrary")
class contact(delegate.page):
def GET(self):
i = web.input(path=None)
user = accounts.get_current_user()
email = user and user.email
hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()
has_emailed_recently = get_memcache().get('contact-POST-%s' % hashed_ip)
recaptcha = has_emailed_recently and get_recaptcha()
return render_template("support", email=email, url=i.path, recaptcha=recaptcha)
def POST(self):
form = web.input()
patron_name = form.get("name", "")
email = form.get("email", "")
topic = form.get("topic", "")
subject_line = form.get('subject', '')
description = form.get("question", "")
url = form.get("url", "")
user = accounts.get_current_user()
useragent = web.ctx.env.get("HTTP_USER_AGENT", "")
if not all([email, description]):
return ""
hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()
has_emailed_recently = get_memcache().get('contact-POST-%s' % hashed_ip)
if has_emailed_recently:
recap = get_recaptcha()
if recap and not recap.validate():
return render_template(
"message.html",
'Recaptcha solution was incorrect',
(
'Please <a href="javascript:history.back()">go back</a> and try '
'again.'
),
)
default_assignees = config.get("support_default_assignees", {})
if (topic_key := str(topic.replace(" ", "_").lower())) in default_assignees:
assignee = default_assignees.get(topic_key)
else:
assignee = default_assignees.get("default", "[email protected]")
stats.increment("ol.support.all")
subject = "Support case *%s*" % self.prepare_subject_line(subject_line)
url = web.ctx.home + url
displayname = user and user.get_name() or ""
username = user and user.get_username() or ""
message = SUPPORT_EMAIL_TEMPLATE % locals()
sendmail(email, assignee, subject, message)
get_memcache().set(
'contact-POST-%s' % hashed_ip, "true", expires=15 * MINUTE_SECS
)
return render_template("email/case_created", assignee)
def prepare_subject_line(self, subject, max_length=60):
if not subject:
return '[no subject]'
if len(subject) <= max_length:
return subject
return subject[:max_length]
def sendmail(from_address, to_address, subject, message):
if config.get('dummy_sendmail'):
msg = (
f'To: {to_address}\n'
f'From:{from_address}\n'
f'Subject:{subject}\n'
f'\n{web.safestr(message)}'
)
logger.info("sending email:\n%s", msg)
else:
web.sendmail(from_address, to_address, subject, message)
SUPPORT_EMAIL_TEMPLATE = """
Description:\n
%(description)s
A new support case has been filed by %(displayname)s <%(email)s>.
Subject: %(subject_line)s
URL: %(url)s
User-Agent: %(useragent)s
OL-username: %(username)s
Patron-name: %(patron_name)s
"""
def setup():
pass
| 3,694 | Python | .py | 91 | 32.197802 | 89 | 0.617105 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
388 | stats.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/stats.py | """Hooks for collecting performance stats.
"""
import logging
import os
import re
import sys
import time
import traceback
from types import TracebackType
from typing import Any
from infogami.utils.app import find_page, find_view, find_mode
from openlibrary.core import stats as graphite_stats
import web
from infogami import config
from infogami.utils import delegate, stats
import openlibrary.plugins.openlibrary.filters as stats_filters
logger = logging.getLogger("openlibrary.stats")
TIME_BUCKETS = [10, 100, 1000, 5000, 10000, 20000] # in ms
filters: dict[str, Any] = {}
def evaluate_and_store_stat(name, stat, summary):
"""Evaluates whether the given statistic is to be recorded and if
so, records it."""
global filters
if not summary:
return
try:
f = filters[stat.filter]
except KeyError:
logger.warning("Filter %s not registered", stat.filter)
return
try:
if f(**stat):
if "time" in stat:
graphite_stats.put(name, summary[stat.time]["time"] * 100)
elif "count" in stat:
# print "Storing count for key %s"%stat.count
# XXX-Anand: where is the code to update counts?
pass
else:
logger.warning("No storage item specified for stat %s", name)
except Exception as k:
logger.warning("Error while storing stats (%s). Complete traceback follows" % k)
logger.warning(traceback.format_exc())
def update_all_stats(stats_summary):
"""
Run through the filters and record requested items in graphite
"""
for stat in config.get("stats", []):
evaluate_and_store_stat(stat, config.stats.get(stat), stats_summary)
def stats_hook():
"""web.py unload hook to add X-OL-Stats header.
Also, send stats to graphite using statsd
"""
stats_summary = stats.stats_summary()
update_all_stats(stats_summary)
try:
if "stats-header" in web.ctx.features:
web.header("X-OL-Stats", format_stats(stats_summary))
except Exception as e:
# don't let errors in stats collection break the app.
print(str(e), file=web.debug)
# This name is misleading. It gets incremented for more than just pages.
# E.g. *.json requests (even ajax), image requests. Although I can't
# see any *.js requests? So not sure exactly when we're called here.
graphite_stats.increment('ol.pageviews')
memcache_hits = 0
memcache_misses = 0
for s in web.ctx.get("stats", []):
if s.name == 'memcache.get':
if s.data['hit']:
memcache_hits += 1
else:
memcache_misses += 1
if memcache_hits:
graphite_stats.increment('ol.memcache.hits', memcache_hits, rate=0.025)
if memcache_misses:
graphite_stats.increment('ol.memcache.misses', memcache_misses, rate=0.025)
for name, value in stats_summary.items():
name = name.replace(".", "_")
time = value.get("time", 0.0) * 1000
key = 'ol.' + name
graphite_stats.put(key, time)
def format_stats(stats):
s = " ".join("%s %d %0.03f" % entry for entry in process_stats(stats))
return '"%s"' % s
labels = {
"total": "TT",
"memcache": "MC",
"infobase": "IB",
"solr": "SR",
"archive.org": "IA",
"couchdb": "CD",
}
def process_stats(stats):
"""Process stats and returns a list of (label, count, time) for each entry.
Entries like "memcache.get" and "memcache.set" will be collapsed into "memcache".
"""
d = {}
for name, value in stats.items():
name = name.split(".")[0]
label = labels.get(name, "OT")
count = value.get("count", 0)
time = value.get("time", 0.0)
xcount, xtime = d.get(label, [0, 0])
d[label] = xcount + count, xtime + time
return [(label, count, time) for label, (count, time) in sorted(d.items())]
def register_filter(name, function):
global filters
filters[name] = function
def _encode_key_part(key_part: str) -> str:
return key_part.replace('.', '_')
def _get_path_page_name() -> str:
pageClass, _ = find_page()
if pageClass is None: # Check for view handlers
pageClass, _ = find_view()
if pageClass is None: # Check for mode handlers
pageClass, _ = find_mode()
result = pageClass.__name__
if hasattr(pageClass, 'encoding') and not result.endswith(pageClass.encoding):
result += '_' + pageClass.encoding
return result
def _get_top_level_path_for_metric(full_path: str) -> str:
"""
Normalize + shorten the string since it could be user-entered
:param str full_path:
"""
path_parts = full_path.strip('/').split('/')
path = path_parts[0] or 'home'
return path.replace('.', '_')[:50]
class GraphiteRequestStats:
def __init__(self):
self.start: float | None = None
self.end: float | None = None
self.state = None # oneof 'started', 'completed'
self.method = 'unknown'
self.path_page_name = 'unknown'
self.path_level_one = 'unknown'
self.response_code = 'unknown'
self.time_bucket = 'unknown'
self.user = 'not_logged_in'
self.duration = None
def request_loaded(self):
self.start = time.time()
self.state = 'started'
self._compute_fields()
def request_unloaded(self):
self.end = time.time()
self.state = 'completed'
self._compute_fields()
def _compute_fields(self):
if hasattr(web.ctx, 'method') and web.ctx.method:
self.method = web.ctx.method
if hasattr(web.ctx, 'path') and web.ctx.path:
self.path_page_name = _get_path_page_name()
# This can be entered by a user to be anything! We record 404s.
self.path_level_one = _get_top_level_path_for_metric(web.ctx.path)
if hasattr(web.ctx, 'status'):
self.response_code = web.ctx.status.split(' ')[0]
if self.end is not None:
self.duration = (self.end - self.start) * 1000
self.time_bucket = 'LONG'
for upper in TIME_BUCKETS:
if self.duration < upper:
self.time_bucket = '%dms' % upper
break
if stats_filters.loggedin():
self.user = 'logged_in'
def to_metric(self):
return '.'.join(
[
'ol',
'requests',
self.state,
self.method,
self.response_code,
self.user,
self.path_level_one,
'class_' + self.path_page_name,
self.time_bucket,
'count',
]
)
def page_load_hook():
web.ctx.graphiteRequestStats = GraphiteRequestStats()
web.ctx.graphiteRequestStats.request_loaded()
graphite_stats.increment(web.ctx.graphiteRequestStats.to_metric())
def page_unload_hook():
web.ctx.graphiteRequestStats.request_unloaded()
graphite_stats.increment(web.ctx.graphiteRequestStats.to_metric())
def increment_error_count(key: str) -> None:
"""
:param str key: e.g. ol.exceptions or el.internal-errors-segmented
"""
top_url_path = 'none'
page_class = 'none'
if web.ctx and hasattr(web.ctx, 'path') and web.ctx.path:
top_url_path = _get_top_level_path_for_metric(web.ctx.path)
page_class = _get_path_page_name()
# Code that follows relies on these not being falsey, so alert ASAP if they are.
exception_type, exception_value, tback = sys.exc_info()
assert exception_type
assert exception_value
exception_type_name = exception_type.__name__
# Log exception file
top_path_in_tback = find_topmost_useful_file(exception_value, tback)
path = os.path.split(top_path_in_tback)
# log just filename, unless it's code.py (cause that's useless!)
ol_file = path[1]
if path[1] in ('code.py', 'index.html', 'edit.html', 'view.html'):
ol_file = os.path.split(path[0])[1] + '_' + _encode_key_part(path[1])
metric_parts = [
top_url_path,
'class_' + page_class,
ol_file,
exception_type_name,
'count',
]
metric = '.'.join([_encode_key_part(p) for p in metric_parts])
graphite_stats.increment(key + '.' + metric)
TEMPLATE_SYNTAX_ERROR_RE = re.compile(r"File '([^']+?)'")
def find_topmost_useful_file(
exception: BaseException, tback: TracebackType | None
) -> str:
"""
Find the topmost path in the traceback stack that's useful to report.
:param BaseException exception: error from e.g. sys.exc_inf()
:param TracebackType tback: traceback from e.g. sys.exc_inf()
:return: full path
"""
file_path = 'none'
while tback is not None:
cur_file = tback.tb_frame.f_code.co_filename
if '/openlibrary' in cur_file:
file_path = cur_file
tback = tback.tb_next
if file_path.endswith('template.py') and hasattr(exception, 'args'):
m = TEMPLATE_SYNTAX_ERROR_RE.search(exception.args[1])
if m:
file_path = m.group(1)
return file_path
def setup():
"""
This function is called from the main application startup
routine to set things up.
"""
# Initialise the stats filters
register_filter("all", stats_filters.all)
register_filter("url", stats_filters.url)
register_filter("loggedin", stats_filters.loggedin)
register_filter("not_loggedin", stats_filters.not_loggedin)
# Disabled temporarily (2020-04-07); they (the first two more specifically) looked
# like they were causing too much load on graphite servers.
# delegate.app.add_processor(web.loadhook(page_load_hook))
# delegate.app.add_processor(web.unloadhook(page_unload_hook))
# delegate.add_exception_hook(lambda: increment_error_count('ol.exceptions'))
| 9,963 | Python | .py | 255 | 31.843137 | 88 | 0.628216 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
389 | connection.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/connection.py | """Open Library extension to provide a new kind of client connection with caching support.
"""
from infogami import config
from infogami.infobase import client, lru
from infogami.utils import stats
import web
import json
import datetime
from openlibrary.core import ia
import logging
logger = logging.getLogger("openlibrary")
class ConnectionMiddleware:
response_type = "json"
def __init__(self, conn):
self.conn = conn
def get_auth_token(self):
return self.conn.get_auth_token()
def set_auth_token(self, token):
self.conn.set_auth_token(token)
def request(self, sitename, path, method='GET', data=None):
if path == '/get':
return self.get(sitename, data)
elif path == '/get_many':
return self.get_many(sitename, data)
elif path == '/versions':
return self.versions(sitename, data)
elif path == '/_recentchanges':
return self.recentchanges(sitename, data)
elif path == '/things':
return self.things(sitename, data)
elif path == '/write':
return self.write(sitename, data)
elif path.startswith('/save/'):
return self.save(sitename, path, data)
elif path == '/save_many':
return self.save_many(sitename, data)
elif path.startswith("/_store/") and not path.startswith("/_store/_"):
if method == 'GET':
return self.store_get(sitename, path)
elif method == 'PUT':
return self.store_put(sitename, path, data)
elif method == 'DELETE':
return self.store_delete(sitename, path, data)
elif path == "/_store/_save_many" and method == 'POST':
# save multiple things at once
return self.store_put_many(sitename, data)
elif path.startswith("/account"):
return self.account_request(sitename, path, method, data)
return self.conn.request(sitename, path, method, data)
def account_request(self, sitename, path, method="GET", data=None):
return self.conn.request(sitename, path, method, data)
def get(self, sitename, data):
return self.conn.request(sitename, '/get', 'GET', data)
def get_many(self, sitename, data):
return self.conn.request(sitename, '/get_many', 'GET', data)
def versions(self, sitename, data):
return self.conn.request(sitename, '/versions', 'GET', data)
def recentchanges(self, sitename, data):
return self.conn.request(sitename, '/_recentchanges', 'GET', data)
def things(self, sitename, data):
return self.conn.request(sitename, '/things', 'GET', data)
def write(self, sitename, data):
return self.conn.request(sitename, '/write', 'POST', data)
def save(self, sitename, path, data):
return self.conn.request(sitename, path, 'POST', data)
def save_many(self, sitename, data):
# Work-around for https://github.com/internetarchive/openlibrary/issues/4285
# Infogami seems to read encoded bytes as a string with a byte literal inside
# of it, which is invalid JSON and also can't be decode()'d.
if isinstance(data.get('query'), bytes):
data['query'] = data['query'].decode()
return self.conn.request(sitename, '/save_many', 'POST', data)
def store_get(self, sitename, path):
return self.conn.request(sitename, path, 'GET')
def store_put(self, sitename, path, data):
return self.conn.request(sitename, path, 'PUT', data)
def store_put_many(self, sitename, data):
return self.conn.request(sitename, "/_store/_save_many", 'POST', data)
def store_delete(self, sitename, path, data):
return self.conn.request(sitename, path, 'DELETE', data)
_memcache = None
class IAMiddleware(ConnectionMiddleware):
def _get_itemid(self, key):
"""Returns internet archive item id from the key.
If the key is of the form "/books/ia:.*", the part after "/books/ia:"
is returned, otherwise None is returned.
"""
if key and key.startswith("/books/ia:") and key.count("/") == 2:
return key[len("/books/ia:") :]
def get(self, sitename, data):
key = data.get('key')
if itemid := self._get_itemid(key):
edition_key = self._find_edition(sitename, itemid)
if edition_key:
# Delete the store entry, indicating that this is no more is an item to be imported.
self._ensure_no_store_entry(sitename, itemid)
return self._make_redirect(itemid, edition_key)
else:
metadata = ia.get_metadata(itemid)
doc = ia.edition_from_item_metadata(itemid, metadata)
if doc is None:
# Delete store entry, if it exists.
# When an item is darked on archive.org, it should be
# automatically removed from OL. Removing entry from store
# will trigger the solr-updater to delete it from solr as well.
self._ensure_no_store_entry(sitename, itemid)
raise client.ClientException(
"404 Not Found",
"notfound",
json.dumps({"key": "/books/ia:" + itemid, "error": "notfound"}),
)
storedoc = self._ensure_store_entry(sitename, itemid)
# Hack to add additional subjects /books/ia: pages
# Adding subjects to store docs, will add these subjects to the books.
# These subjects are used when indexing the books in solr.
if storedoc.get("subjects"):
doc.setdefault("subjects", []).extend(storedoc['subjects'])
return json.dumps(doc)
else:
return ConnectionMiddleware.get(self, sitename, data)
def _find_edition(self, sitename, itemid):
# match ocaid
q = {"type": "/type/edition", "ocaid": itemid}
keys_json = ConnectionMiddleware.things(
self, sitename, {"query": json.dumps(q)}
)
keys = json.loads(keys_json)
if keys:
return keys[0]
# Match source_records
# When there are multiple scan for the same edition, only scan_records is updated.
q = {"type": "/type/edition", "source_records": "ia:" + itemid}
keys_json = ConnectionMiddleware.things(
self, sitename, {"query": json.dumps(q)}
)
keys = json.loads(keys_json)
if keys:
return keys[0]
def _make_redirect(self, itemid, location):
timestamp = {"type": "/type/datetime", "value": "2010-01-01T00:00:00"}
d = {
"key": "/books/ia:" + itemid,
"type": {"key": "/type/redirect"},
"location": location,
"revision": 1,
"created": timestamp,
"last_modified": timestamp,
}
return json.dumps(d)
def _ensure_no_store_entry(self, sitename, identifier):
key = "ia-scan/" + identifier
store_key = "/_store/" + key
# If the entry is not found, create an entry
try:
jsontext = self.store_get(sitename, store_key)
self.store_delete(sitename, store_key, {"_rev": None})
except client.ClientException as e:
# nothing to do if that doesn't exist
pass
def _ensure_store_entry(self, sitename, identifier):
key = "ia-scan/" + identifier
store_key = "/_store/" + key
# If the entry is not found, create an entry
try:
jsontext = self.store_get(sitename, store_key)
return json.loads(jsontext)
except client.ClientException as e:
logger.error("error", exc_info=True)
if e.status.startswith("404"):
doc = {
"_key": key,
"type": "ia-scan",
"identifier": identifier,
"created": datetime.datetime.utcnow().isoformat(),
}
self.store_put(sitename, store_key, json.dumps(doc))
return doc
except:
logger.error("error", exc_info=True)
def versions(self, sitename, data):
# handle the query of type {"query": '{"key": "/books/ia:foo00bar", ...}}
if 'query' in data:
q = json.loads(data['query'])
itemid = self._get_itemid(q.get('key'))
if itemid:
key = q['key']
return json.dumps([self.dummy_edit(key)])
# if not just go the default way
return ConnectionMiddleware.versions(self, sitename, data)
def recentchanges(self, sitename, data):
# handle the query of type {"query": '{"key": "/books/ia:foo00bar", ...}}
if 'query' in data:
q = json.loads(data['query'])
itemid = self._get_itemid(q.get('key'))
if itemid:
key = q['key']
return json.dumps([self.dummy_recentchange(key)])
# if not just go the default way
return ConnectionMiddleware.recentchanges(self, sitename, data)
def dummy_edit(self, key):
return {
"comment": "",
"author": None,
"ip": "127.0.0.1",
"created": "2012-01-01T00:00:00",
"bot": False,
"key": key,
"action": "edit-book",
"changes": json.dumps({"key": key, "revision": 1}),
"revision": 1,
"kind": "update",
"id": "0",
"timestamp": "2010-01-01T00:00:00",
"data": {},
}
def dummy_recentchange(self, key):
return {
"comment": "",
"author": None,
"ip": "127.0.0.1",
"timestamp": "2012-01-01T00:00:00",
"data": {},
"changes": [{"key": key, "revision": 1}],
"kind": "update",
"id": "0",
}
class MemcacheMiddleware(ConnectionMiddleware):
def __init__(self, conn, memcache_servers):
ConnectionMiddleware.__init__(self, conn)
self.memcache = self.get_memcache(memcache_servers)
def get_memcache(self, memcache_servers):
global _memcache
if _memcache is None:
from openlibrary.utils import olmemcache
_memcache = olmemcache.Client(memcache_servers)
return _memcache
def get(self, sitename, data):
key = data.get('key')
revision = data.get('revision')
if key.startswith("_"):
# Don't cache keys that starts with _ to avoid considering _store/foo as things.
# The _store stuff is used for storing infobase store docs.
return ConnectionMiddleware.get(self, sitename, data)
if revision is None:
stats.begin("memcache.get", key=key)
result = self.memcache.get(key)
stats.end(hit=bool(result))
return result or ConnectionMiddleware.get(self, sitename, data)
else:
# cache get requests with revisions for a minute
mc_key = "%s@%d" % (key, revision)
result = self.mc_get(mc_key)
if result is None:
result = ConnectionMiddleware.get(self, sitename, data)
if result:
self.mc_set(mc_key, result, time=60) # cache for a minute
return result
def get_many(self, sitename, data):
keys = json.loads(data['keys'])
stats.begin("memcache.get_multi")
result = self.memcache.get_multi(keys)
stats.end(found=len(result))
keys2 = [k for k in keys if k not in result]
if keys2:
data['keys'] = json.dumps(keys2)
result2 = ConnectionMiddleware.get_many(self, sitename, data)
result2 = json.loads(result2)
# Memcache expects dict with (key, json) mapping and we have (key, doc) mapping.
# Converting the docs to json before passing to memcache.
self.mc_set_multi({key: json.dumps(doc) for key, doc in result2.items()})
result.update(result2)
# @@ too many JSON conversions
for k in result:
if isinstance(result[k], str):
result[k] = json.loads(result[k])
return json.dumps(result)
def mc_get(self, key):
stats.begin("memcache.get", key=key)
result = self.memcache.get(key)
stats.end(hit=bool(result))
return result
def mc_delete(self, key):
stats.begin("memcache.delete", key=key)
self.memcache.delete(key)
stats.end()
def mc_add(self, key, value, time=0):
stats.begin("memcache.add", key=key, time=time)
self.memcache.add(key, value)
stats.end()
def mc_set(self, key, value, time=0):
stats.begin("memcache.set", key=key)
self.memcache.add(key, value, time=time)
stats.end()
def mc_set_multi(self, mapping):
stats.begin("memcache.set_multi")
self.memcache.set_multi(mapping)
stats.end()
def mc_delete_multi(self, keys):
stats.begin("memcache.delete_multi")
self.memcache.delete_multi(keys)
stats.end()
def store_get(self, sitename, path):
# path will be "/_store/$key"
result = self.mc_get(path)
if result is None:
result = ConnectionMiddleware.store_get(self, sitename, path)
if result:
self.mc_set(path, result, 3600) # cache it only for one hour
return result
def store_put(self, sitename, path, data):
# path will be "/_store/$key"
# deleting before put to make sure the entry is deleted even if the
# process dies immediately after put.
# Still there is very very small chance of invalid cache if someone else
# updates memcache after stmt-1 and this process dies after stmt-2.
self.mc_delete(path)
result = ConnectionMiddleware.store_put(self, sitename, path, data)
self.mc_delete(path)
return result
def store_put_many(self, sitename, datajson):
data = json.loads(datajson)
mc_keys = ["/_store/" + doc['_key'] for doc in data]
self.mc_delete_multi(mc_keys)
result = ConnectionMiddleware.store_put_many(self, sitename, datajson)
self.mc_delete_multi(mc_keys)
return result
def store_delete(self, sitename, key, data):
# see comment in store_put
self.mc_delete(key)
result = ConnectionMiddleware.store_delete(self, sitename, key, data)
self.mc_delete(key)
return result
def account_request(self, sitename, path, method="GET", data=None):
# For post requests, remove the account entry from the cache.
if method == "POST" and isinstance(data, dict):
deletes = []
if 'username' in data:
deletes.append("/_store/account/" + data["username"])
# get the email from account doc and invalidate the email.
# required in cases of email change.
try:
docjson = self.store_get(
sitename, "/_store/account/" + data['username']
)
doc = json.loads(docjson)
deletes.append("/_store/account-email/" + doc["email"])
deletes.append("/_store/account-email/" + doc["email"].lower())
except client.ClientException:
# ignore
pass
if 'email' in data:
# if email is being passed, that that email doc is likely to be changed.
# remove that also from cache.
deletes.append("/_store/account-email/" + data["email"])
deletes.append("/_store/account-email/" + data["email"].lower())
self.mc_delete_multi(deletes)
result = ConnectionMiddleware.account_request(
self, sitename, path, method, data
)
self.mc_delete_multi(deletes)
else:
result = ConnectionMiddleware.account_request(
self, sitename, path, method, data
)
return result
class MigrationMiddleware(ConnectionMiddleware):
"""Temporary middleware to handle upstream to www migration."""
def _process_key(self, key):
mapping = (
"/l/",
"/languages/",
"/a/",
"/authors/",
"/b/",
"/books/",
"/user/",
"/people/",
)
if "/" in key and key.split("/")[1] in ['a', 'b', 'l', 'user']:
for old, new in web.group(mapping, 2):
if key.startswith(old):
return new + key[len(old) :]
return key
def exists(self, key):
try:
d = ConnectionMiddleware.get(self, "openlibrary.org", {"key": key})
return True
except client.ClientException as e:
return False
def _process(self, data):
if isinstance(data, list):
return [self._process(d) for d in data]
elif isinstance(data, dict):
if 'key' in data:
data['key'] = self._process_key(data['key'])
return {k: self._process(v) for k, v in data.items()}
else:
return data
def get(self, sitename, data):
if web.ctx.get('path') == "/api/get" and 'key' in data:
data['key'] = self._process_key(data['key'])
response = ConnectionMiddleware.get(self, sitename, data)
if response:
data = json.loads(response)
data = self._process(data)
data = data and self.fix_doc(data)
response = json.dumps(data)
return response
def fix_doc(self, doc):
type = doc.get("type", {}).get("key")
if type == "/type/work":
if doc.get("authors"):
# some record got empty author records because of an error
# temporary hack to fix
doc['authors'] = [
a for a in doc['authors'] if 'author' in a and 'key' in a['author']
]
elif type == "/type/edition" and 'title_prefix' in doc:
# get rid of title_prefix.
title = doc['title_prefix'].strip() + ' ' + doc.get('title', '')
doc['title'] = title.strip()
del doc['title_prefix']
return doc
def fix_broken_redirect(self, key):
"""Some work/edition records references to redirected author records
and that is making save fail.
This is a hack to work-around that issue.
"""
json_data = self.get("openlibrary.org", {"key": key})
if json:
doc = json.loads(json_data)
if (
doc.get("type", {}).get("key") == "/type/redirect"
and doc.get('location') is not None
):
return doc['location']
return key
def get_many(self, sitename, data):
response = ConnectionMiddleware.get_many(self, sitename, data)
if response:
data = json.loads(response)
data = self._process(data)
data = {key: self.fix_doc(doc) for key, doc in data.items()}
response = json.dumps(data)
return response
class HybridConnection(client.Connection):
"""Infobase connection made of both local and remote connections.
The local connection is used for reads and the remote connection is used for writes.
Some services in the OL infrastructure depends of the log written by the
writer, so remote connection is used, which takes care of writing logs. By
using a local connection for reads improves the performance by cutting
down the overhead of http calls present in case of remote connections.
"""
def __init__(self, reader, writer):
client.Connection.__init__(self)
self.reader = reader
self.writer = writer
def set_auth_token(self, token):
self.reader.set_auth_token(token)
self.writer.set_auth_token(token)
def get_auth_token(self):
return self.writer.get_auth_token()
def request(self, sitename, path, method="GET", data=None):
if method == "GET":
return self.reader.request(sitename, path, method, data=data)
else:
return self.writer.request(sitename, path, method, data=data)
@web.memoize
def _update_infobase_config():
"""Updates infobase config when this function is called for the first time.
From next time onwards, it doesn't do anything because this function is memoized.
"""
# update infobase configuration
from infogami.infobase import server
if not config.get("infobase"):
config.infobase = {}
# This sets web.config.db_parameters
server.update_config(config.infobase)
def create_local_connection():
_update_infobase_config()
return client.connect(type='local', **web.config.db_parameters)
def create_remote_connection():
return client.connect(type='remote', base_url=config.infobase_server)
def create_hybrid_connection():
local = create_local_connection()
remote = create_remote_connection()
return HybridConnection(local, remote)
def OLConnection():
"""Create a connection to Open Library infobase server."""
def create_connection():
if config.get("connection_type") == "hybrid":
return create_hybrid_connection()
elif config.get('infobase_server'):
return create_remote_connection()
elif config.get("infobase", {}).get('db_parameters'):
return create_local_connection()
else:
raise Exception("db_parameters are not specified in the configuration")
conn = create_connection()
if config.get('memcache_servers'):
conn = MemcacheMiddleware(conn, config.get('memcache_servers'))
if config.get('upstream_to_www_migration'):
conn = MigrationMiddleware(conn)
conn = IAMiddleware(conn)
return conn
| 22,354 | Python | .py | 505 | 33.570297 | 100 | 0.584549 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
390 | schema.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/schema.py | """OpenLibrary schema."""
from openlibrary.core.schema import get_schema
if __name__ == "__main__":
print(get_schema().sql())
| 132 | Python | .py | 4 | 30.5 | 46 | 0.666667 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
391 | swagger.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/swagger.py | from infogami.utils import delegate
from infogami.utils.view import render_template
def setup():
pass
class swagger(delegate.page):
path = "/swagger/docs"
def GET(self):
return render_template("swagger/swaggerui.html")
| 244 | Python | .py | 8 | 26.375 | 56 | 0.748918 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
392 | dev_instance.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/dev_instance.py | """Special customizations for dev instance.
This module is imported only if dev_instance is set to True in openlibrary config.
"""
import web
import infogami
from infogami.utils import delegate
from openlibrary.core.task import oltask
def setup():
setup_solr_updater()
from openlibrary.catalog.utils import query
# monkey-patch query to make solr-updater work with-in the process instead of making http requests.
query.query = ol_query
query.withKey = ol_get
infogami.config.middleware.append(CoverstoreMiddleware)
# Borrow code tries to find the loan-status by making a URL call
from openlibrary.plugins.upstream import borrow
borrow.get_loan_status = lambda resource_id: []
class CoverstoreMiddleware:
"""Middleware to delegate all /cover/* requests to coverstore.
This avoids starting a new service for coverstore.
Assumes that coverstore config is at conf/coverstore.yml
"""
def __init__(self, app):
self.app = app
from openlibrary.coverstore import code, server
server.load_config("conf/coverstore.yml")
self.coverstore_app = code.app.wsgifunc()
def __call__(self, environ, start_response):
root = "/covers"
if environ['PATH_INFO'].startswith(root):
environ['PATH_INFO'] = environ['PATH_INFO'][len(root) :]
environ['SCRIPT_NAME'] = environ['SCRIPT_NAME'] + root
return self.coverstore_app(environ, start_response)
else:
return self.app(environ, start_response)
def ol_query(q):
return web.ctx.site.things(q, details=True)
def ol_get(key):
d = web.ctx.site.get(key)
return d and d.dict()
def setup_solr_updater():
from infogami import config
# solr-updater reads configuration from openlibrary.config.runtime_config
from openlibrary import config as olconfig
olconfig.runtime_config = config.__dict__
# The solr-updater makes a http call to the website instead of using the
# infobase API. It requires setting the host before start using it.
from openlibrary.catalog.utils.query import set_query_host
dev_instance_url = config.get("dev_instance_url", "http://127.0.0.1:8080/")
host = web.lstrips(dev_instance_url, "http://").strip("/")
set_query_host(host)
class is_loaned_out(delegate.page):
path = "/is_loaned_out/.*"
def GET(self):
return delegate.RawText("[]", content_type="application/json")
class process_ebooks(delegate.page):
"""Hack to add ebooks to store so that books are visible in the returncart."""
path = "/_dev/process_ebooks"
def GET(self):
from openlibrary.plugins.worksearch.search import get_solr
result = get_solr().select(
query='borrowed_b:false', fields=['key', 'lending_edition_s'], limit=100
)
def make_doc(d):
# Makes a store doc from solr doc
return {
"_key": "ebooks/books/" + d['lending_edition_s'],
"_rev": None, # Don't worry about consistency
"type": "ebook",
"book_key": "/books/" + d['lending_edition_s'],
"borrowed": "false",
}
docs = [make_doc(d) for d in result['docs']]
docdict = {d['_key']: d for d in docs}
web.ctx.site.store.update(docdict)
return delegate.RawText("ok\n")
@oltask
def update_solr(changeset):
"""Updates solr on edit."""
from openlibrary.solr import update
keys = set()
docs = changeset['docs'] + changeset['old_docs']
docs = [doc for doc in docs if doc] # doc can be None if it is newly created.
for doc in docs:
if doc['type']['key'] == '/type/edition':
keys.update(w['key'] for w in doc.get('works', []))
elif doc['type']['key'] == '/type/work':
keys.add(doc['key'])
keys.update(
a['author']['key'] for a in doc.get('authors', []) if 'author' in a
)
elif doc['type']['key'] == '/type/author':
keys.add(doc['key'])
update.update_keys(list(keys))
@infogami.install_hook
def add_ol_user():
"""Creates openlibrary user with admin privileges."""
# Create openlibrary user
if web.ctx.site.get("/people/openlibrary") is None:
web.ctx.site.register(
username="openlibrary",
email="[email protected]",
password="openlibrary",
displayname="Open Library",
)
web.ctx.site.activate_account(username="openlibrary")
if web.ctx.site.get("/usergroup/api") is None:
g_api = web.ctx.site.new(
"/usergroup/api",
{
"key": "/usergroup/api",
"type": "/type/usergroup",
"members": [{"key": "/people/openlibrary"}],
},
)
g_api._save(comment="Added openlibrary user to API usergroup.")
g_admin = web.ctx.site.get("/usergroup/admin").dict()
g_admin.setdefault('members', [])
members = [m['key'] for m in g_admin["members"]]
if 'openlibrary' not in members:
g_admin['members'].append({"key": "/people/openlibrary"})
web.ctx.site.save(g_admin, "Added openlibrary user to admin usergroup.")
@infogami.action
def load_sample_data():
"""Action to load sample data.
This was an experiment to load sample data as part of install. But it
doesn't seem to be working well on linux dev-instance because of some weird
supervisor log issues.
This is unused as of now.
"""
env = {}
with open("scripts/copydocs.py") as in_file:
exec(in_file.read(), env, env)
src = env['OpenLibrary']()
dest = web.ctx.site
comment = "Loaded sample data."
list_key = "/people/anand/lists/OL1815L"
env['copy_list'](src, dest, list_key, comment=comment)
| 5,866 | Python | .py | 138 | 34.985507 | 103 | 0.635915 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
393 | conftest.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/tests/conftest.py | collect_ignore = ['test_listapi.py', 'test_ratingsapi.py']
def pytest_addoption(parser):
parser.addoption("--server", default=None)
parser.addoption("--username")
parser.addoption("--password")
def pytest_configure(config):
print("pytest_configure", config.getvalue("server"))
if config.getvalue("server"):
collect_ignore[:] = []
| 363 | Python | .py | 9 | 35.666667 | 58 | 0.696275 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
394 | test_listapi.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/tests/test_listapi.py | # from py.test import config
import json
import cookielib
import urllib
def pytest_funcarg__config(request):
return request.config
class ListAPI:
def __init__(self, config):
self.server = config.getvalue('server')
self.username = config.getvalue("username")
self.password = config.getvalue("password")
self.cookiejar = cookielib.CookieJar()
self.opener = urllib.request.build_opener()
self.opener.add_handler(urllib.request.HTTPCookieProcessor(self.cookiejar))
def urlopen(self, path, data=None, method=None, headers=None):
headers = headers or {}
"""url open with cookie support."""
if not method:
if data:
method = "POST"
else:
method = "GET"
req = urllib.request.Request(self.server + path, data=data, headers=headers)
req.get_method = lambda: method
return self.opener.open(req)
def login(self):
data = {'username': self.username, 'password': self.password}
self.urlopen("/account/login", data=urllib.parse.urlencode(data), method="POST")
print(self.cookiejar)
def create_list(self, data):
json_data = json.dumps(data)
headers = {"content-type": "application/json"}
response = self.urlopen(
"/people/" + self.username + "/lists", data=json_data, headers=headers
)
return json.loads(response.read())
def get_lists(self):
data = self.urlopen("/people/" + self.username + "/lists.json").read()
return json.loads(data)
def get_list(self, key):
data = self.urlopen(key + ".json").read()
return json.loads(data)
def get_seeds(self, key):
data = self.urlopen(key + "/seeds.json").read()
return json.loads(data)
def update_seeds(self, key, additions, removals):
data = {
"add": additions,
"remove": removals,
}
json_data = json.dumps(data)
response = self.urlopen(key + "/seeds.json", json_data)
return json.loads(response.read())
def test_create(config):
api = ListAPI(config)
api.login()
data = {
"name": "foo",
"description": "foo bar",
"tags": ["t1", "t2"],
"seeds": ["subject:cheese"],
}
result = api.create_list(data)
assert "key" in result
assert result['revision'] == 1
list_key = result['key']
# test get
list = api.get_list(list_key)
for k in ["created", "last_modified"]:
list.pop(k)
assert list == {
"key": result['key'],
"type": {"key": "/type/list"},
"revision": 1,
"latest_revision": 1,
"name": "foo",
"description": {"type": "/type/text", "value": "foo bar"},
"tags": ["t1", "t2"],
"seeds": ["subject:cheese"],
}
# test get seeds
assert api.get_seeds(list_key) == ["subject:cheese"]
def test_add_seeds(config):
api = ListAPI(config)
api.login()
data = {
"name": "foo",
"description": "foo bar",
"tags": ["t1", "t2"],
"seeds": ["subject:cheese"],
}
result = api.create_list(data)
key = result['key']
# remove cheese and add apple
api.update_seeds(key, ["subject:apple"], ["subject:cheese"])
assert api.get_seeds(key) == ["subject:apple"]
def test_lists(config):
api = ListAPI(config)
api.login()
count = api.get_lists()['list_count']
api.create_list({"name": "foo"})
new_count = api.get_lists()['list_count']
# counts are not accurate yet.
# assert new_count == count + 1
| 3,650 | Python | .py | 104 | 27.682692 | 88 | 0.588336 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
395 | test_ratingsapi.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/tests/test_ratingsapi.py | # from py.test import config
import json
import cookielib
import urllib
from openlibrary import accounts
from openlibrary.core import models
def pytest_funcarg__config(request):
return request.config
class RatingsAPI:
def __init__(self, config):
self.server = config.getvalue('server')
self.username = config.getvalue("username")
self.password = config.getvalue("password")
self.cookiejar = cookielib.CookieJar()
self.opener = urllib.request.build_opener()
self.opener.add_handler(urllib.request.HTTPCookieProcessor(self.cookiejar))
def urlopen(self, path, data=None, method=None, headers=None):
headers = headers or {}
"""url open with cookie support."""
if not method:
if data:
method = "POST"
else:
method = "GET"
req = urllib.request.Request(self.server + path, data=data, headers=headers)
req.get_method = lambda: method
return self.opener.open(req)
def login(self):
data = {'username': self.username, 'password': self.password}
self.urlopen("/account/login", data=urllib.parse.urlencode(data), method="POST")
def rate_book(self, work_key, data):
url = '%s/ratings.json' % (work_key)
headers = {"content-type": "application/json"}
r = self.urlopen(url, data=json.dumps(data), headers=headers, method="POST")
return json.loads(r.read())
def test_rating(config, monkeypatch):
api = RatingsAPI(config)
api.login()
work_key = "/works/OL123W"
data = {"rating": "5"}
class FakeUser:
def __init__(self, key):
self.key = '/users/%s' % key
monkeypatch.setattr(accounts, "get_current_user", FakeUser('test'))
monkeypatch.setattr(models.Ratings, "remove", {})
monkeypatch.setattr(models.Ratings, "add", {})
result = api.rate_book(work_key, data)
assert 'success' in result
| 1,964 | Python | .py | 48 | 33.729167 | 88 | 0.650342 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
396 | test_lists.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/tests/test_lists.py | import json
from unittest.mock import patch
import pytest
from openlibrary.plugins.openlibrary.lists import ListRecord
class TestListRecord:
def test_from_input_no_data(self):
with (
patch('web.input') as mock_web_input,
patch('web.data') as mock_web_data,
):
mock_web_data.return_value = b''
mock_web_input.return_value = {
'key': None,
'name': 'foo',
'description': 'bar',
'seeds': [],
}
assert ListRecord.from_input() == ListRecord(
key=None,
name='foo',
description='bar',
seeds=[],
)
def test_from_input_with_data(self):
with (
patch('web.input') as mock_web_input,
patch('web.data') as mock_web_data,
):
mock_web_data.return_value = b'key=/lists/OL1L&name=foo+data&description=bar&seeds--0--key=/books/OL1M&seeds--1--key=/books/OL2M'
mock_web_input.return_value = {
'key': None,
'name': 'foo',
'description': 'bar',
'seeds': [],
}
assert ListRecord.from_input() == ListRecord(
key='/lists/OL1L',
name='foo data',
description='bar',
seeds=[{'key': '/books/OL1M'}, {'key': '/books/OL2M'}],
)
def test_from_input_with_json_data(self):
with (
patch('web.input') as mock_web_input,
patch('web.data') as mock_web_data,
patch('web.ctx') as mock_web_ctx,
):
mock_web_ctx.env = {'CONTENT_TYPE': 'application/json'}
mock_web_data.return_value = json.dumps(
{
'name': 'foo data',
'description': 'bar',
'seeds': [{'key': '/books/OL1M'}, {'key': '/books/OL2M'}],
}
).encode('utf-8')
mock_web_input.return_value = {
'key': None,
'name': 'foo',
'description': 'bar',
'seeds': [],
}
assert ListRecord.from_input() == ListRecord(
key=None,
name='foo data',
description='bar',
seeds=[{'key': '/books/OL1M'}, {'key': '/books/OL2M'}],
)
SEED_TESTS = [
([], []),
(['OL1M'], [{'key': '/books/OL1M'}]),
(['OL1M', 'OL2M'], [{'key': '/books/OL1M'}, {'key': '/books/OL2M'}]),
(['OL1M,OL2M'], [{'key': '/books/OL1M'}, {'key': '/books/OL2M'}]),
]
@pytest.mark.parametrize('seeds,expected', SEED_TESTS)
def test_from_input_seeds(self, seeds, expected):
with (
patch('web.input') as mock_web_input,
patch('web.data') as mock_web_data,
):
mock_web_data.return_value = b''
mock_web_input.return_value = {
'key': None,
'name': 'foo',
'description': 'bar',
'seeds': seeds,
}
assert ListRecord.from_input() == ListRecord(
key=None,
name='foo',
description='bar',
seeds=expected,
)
def test_normalize_input_seed(self):
f = ListRecord.normalize_input_seed
assert f("/books/OL1M") == {"key": "/books/OL1M"}
assert f({"key": "/books/OL1M"}) == {"key": "/books/OL1M"}
assert f("/subjects/love") == "subject:love"
assert f("subject:love") == "subject:love"
| 3,676 | Python | .py | 98 | 24.734694 | 141 | 0.461603 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
397 | test_stats.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/tests/test_stats.py | """
Tests the stats gathering systems.
"""
import calendar
import datetime
from .. import stats
from openlibrary.core.admin import Stats
class MockDoc(dict):
def __init__(self, _id, *largs, **kargs):
self.id = _id
kargs['_key'] = _id
super().__init__(*largs, **kargs)
def __repr__(self):
o = super().__repr__()
return f"<{self.id} - {o}>"
def test_format_stats_entry():
"Tests the stats performance entries"
ps = stats.process_stats
assert ps({"total": {"time": 0.1}}) == [("TT", 0, 0.1)]
# assert ps({"total": {"time": 0.1346}}) == [("TT", 0, 0.135)] # FIXME
assert ps({"memcache": {"count": 2, "time": 0.1}}) == [("MC", 2, 0.100)]
assert ps({"infobase": {"count": 2, "time": 0.1}}) == [("IB", 2, 0.100)]
assert ps({"couchdb": {"count": 2, "time": 0.1}}) == [("CD", 2, 0.100)]
assert ps({"solr": {"count": 2, "time": 0.1}}) == [("SR", 2, 0.100)]
# assert ps({"archive.org": {"count": 2, "time": 0.1}}) == [("IA", 2, 0.100)] # FIXME
assert ps({"something-else": {"count": 2, "time": 0.1}}) == [("OT", 2, 0.100)]
def test_format_stats():
"Tests whether the performance status are output properly in the the X-OL-Stats header"
performance_stats = {"total": {"time": 0.2}, "infobase": {"count": 2, "time": 0.13}}
assert stats.format_stats(performance_stats) == '"IB 2 0.130 TT 0 0.200"'
def test_stats_container():
"Tests the Stats container used in the templates"
# Test basic API and null total count
ipdata = [{"foo": 1}] * 100
s = Stats(ipdata, "foo", "nothing")
expected_op = [(x, 1) for x in range(0, 140, 5)]
assert list(s.get_counts()) == expected_op
assert s.get_summary() == 28
assert s.total == ""
def test_status_total():
"Tests the total attribute of the stats container used in the templates"
ipdata = [{"foo": 1, "total": x * 2} for x in range(1, 100)]
s = Stats(ipdata, "foo", "total")
assert s.total == 198
# Test a total before the last
ipdata = [{"foo": 1, "total": x * 2} for x in range(1, 100)]
for i in range(90, 99):
del ipdata[i]["total"]
ipdata[90]["total"] = 2
s = Stats(ipdata, "foo", "total")
assert s.total == 2
def test_status_timerange():
"Tests the stats container with a time X-axis"
d = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
ipdata = []
expected_op = []
for i in range(10):
doc = MockDoc(_id=d.strftime("counts-%Y-%m-%d"), foo=1)
ipdata.append(doc)
expected_op.append([calendar.timegm(d.timetuple()) * 1000, 1])
d += datetime.timedelta(days=1)
s = Stats(ipdata, "foo", "nothing")
assert s.get_counts(10, True) == expected_op[:10]
| 2,760 | Python | .py | 63 | 38.68254 | 91 | 0.582991 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
398 | test_home.py | internetarchive_openlibrary/openlibrary/plugins/openlibrary/tests/test_home.py | import datetime
import web
from openlibrary.core.admin import Stats
from openlibrary.mocks.mock_infobase import MockSite
from bs4 import BeautifulSoup
from openlibrary.plugins.openlibrary import home
class MockDoc(dict):
def __init__(self, _id, *largs, **kargs):
self.id = _id
kargs['_key'] = _id
super().__init__(*largs, **kargs)
def __repr__(self):
o = super().__repr__()
return f"<{self.id} - {o}>"
class TestHomeTemplates:
def setup_monkeypatch(self, monkeypatch):
ctx = web.storage()
monkeypatch.setattr(web, "ctx", ctx)
monkeypatch.setattr(web.webapi, "ctx", web.ctx)
self._load_fake_context()
web.ctx.lang = 'en'
web.ctx.site = MockSite()
def _load_fake_context(self):
self.app = web.application()
self.env = {
"PATH_INFO": "/",
"HTTP_METHOD": "GET",
}
self.app.load(self.env)
def test_about_template(self, monkeypatch, render_template):
self.setup_monkeypatch(monkeypatch)
html = str(render_template("home/about"))
assert "About the Project" in html
blog = BeautifulSoup(html, "lxml").find("ul", {"id": "olBlog"})
assert blog is not None
assert len(blog.findAll("li")) == 0
posts = [
web.storage(
{
"title": "Blog-post-0",
"link": "https://blog.openlibrary.org/2011/01/01/blog-post-0",
"pubdate": datetime.datetime(2011, 1, 1),
}
)
]
html = str(render_template("home/about", blog_posts=posts))
assert "About the Project" in html
assert "Blog-post-0" in html
assert "https://blog.openlibrary.org/2011/01/01/blog-post-0" in html
blog = BeautifulSoup(html, "lxml").find("ul", {"id": "olBlog"})
assert blog is not None
assert len(blog.findAll("li")) == 1
def test_stats_template(self, render_template):
# Make sure that it works fine without any input (skipping section)
html = str(render_template("home/stats"))
assert html == ""
def test_home_template(self, render_template, mock_site, monkeypatch):
self.setup_monkeypatch(monkeypatch)
docs = [
MockDoc(
_id=datetime.datetime.now().strftime("counts-%Y-%m-%d"),
human_edits=1,
bot_edits=1,
lists=1,
visitors=1,
loans=1,
members=1,
works=1,
editions=1,
ebooks=1,
covers=1,
authors=1,
subjects=1,
)
] * 100
stats = {
'human_edits': Stats(docs, "human_edits", "human_edits"),
'bot_edits': Stats(docs, "bot_edits", "bot_edits"),
'lists': Stats(docs, "lists", "total_lists"),
'visitors': Stats(docs, "visitors", "visitors"),
'loans': Stats(docs, "loans", "loans"),
'members': Stats(docs, "members", "total_members"),
'works': Stats(docs, "works", "total_works"),
'editions': Stats(docs, "editions", "total_editions"),
'ebooks': Stats(docs, "ebooks", "total_ebooks"),
'covers': Stats(docs, "covers", "total_covers"),
'authors': Stats(docs, "authors", "total_authors"),
'subjects': Stats(docs, "subjects", "total_subjects"),
}
mock_site.quicksave("/people/foo/lists/OL1L", "/type/list")
def spoofed_generic_carousel(*args, **kwargs):
return [
{
"work": None,
"key": "/books/OL1M",
"url": "/books/OL1M",
"title": "The Great Book",
"authors": [
web.storage({"key": "/authors/OL1A", "name": "Some Author"})
],
"read_url": "http://archive.org/stream/foo",
"borrow_url": "/books/OL1M/foo/borrow",
"inlibrary_borrow_url": "/books/OL1M/foo/borrow",
"cover_url": "",
}
]
html = str(render_template("home/index", stats=stats, test=True))
headers = [
"Books We Love",
"Recently Returned",
"Kids",
"Thrillers",
"Romance",
"Textbooks",
]
for h in headers:
assert h in html
assert "Around the Library" in html
assert "About the Project" in html
class Test_format_book_data:
def test_all(self, mock_site, mock_ia):
book = mock_site.quicksave("/books/OL1M", "/type/edition", title="Foo")
work = mock_site.quicksave("/works/OL1W", "/type/work", title="Foo")
def test_authors(self, mock_site, mock_ia):
a1 = mock_site.quicksave("/authors/OL1A", "/type/author", name="A1")
a2 = mock_site.quicksave("/authors/OL2A", "/type/author", name="A2")
work = mock_site.quicksave(
"/works/OL1W",
"/type/work",
title="Foo",
authors=[{"author": {"key": "/authors/OL2A"}}],
)
book = mock_site.quicksave("/books/OL1M", "/type/edition", title="Foo")
assert home.format_book_data(book)['authors'] == []
# when there is no work and authors, the authors field must be picked from the book
book = mock_site.quicksave(
"/books/OL1M",
"/type/edition",
title="Foo",
authors=[{"key": "/authors/OL1A"}],
)
assert home.format_book_data(book)['authors'] == [
{"key": "/authors/OL1A", "name": "A1"}
]
# when there is work, the authors field must be picked from the work
book = mock_site.quicksave(
"/books/OL1M",
"/type/edition",
title="Foo",
authors=[{"key": "/authors/OL1A"}],
works=[{"key": "/works/OL1W"}],
)
assert home.format_book_data(book)['authors'] == [
{"key": "/authors/OL2A", "name": "A2"}
]
| 6,240 | Python | .py | 155 | 28.722581 | 91 | 0.519637 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
399 | import_rdf.py | internetarchive_openlibrary/openlibrary/plugins/importapi/import_rdf.py | """
OL Import API RDF parser
"""
from openlibrary.plugins.importapi import import_edition_builder
def parse_string(e, key):
return (key, e.text)
def parse_authors(e, key):
s = './/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}value'
authors = [name.text for name in e.iterfind(s)]
return (key, authors)
# Note that RDF can have subject elements in both dc and dcterms namespaces
# dc:subject is simply parsed by parse_string()
def parse_subject(e, key):
member_of = e.find('.//{http://purl.org/dc/dcam/}memberOf')
resource_type = member_of.get(
'{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource'
)
val = e.find('.//{http://www.w3.org/1999/02/22-rdf-syntax-ns#}value')
if resource_type == 'http://purl.org/dc/terms/DDC':
new_key = 'dewey_decimal_class'
return (new_key, val.text)
elif resource_type == 'http://purl.org/dc/terms/LCC':
new_key = 'lc_classification'
return (new_key, val.text)
else:
return (None, None)
def parse_category(e, key):
return (key, e.get('label'))
def parse_identifier(e, key):
val = e.text
isbn_str = 'urn:ISBN:'
ia_str = 'http://www.archive.org/details/'
if val.startswith(isbn_str):
isbn = val[len(isbn_str) :]
if len(isbn) == 10:
return ('isbn_10', isbn)
elif len(isbn) == 13:
return ('isbn_13', isbn)
elif val.startswith(ia_str):
return ('ocaid', val[len(ia_str) :])
else:
return (None, None)
parser_map = {
'{http://purl.org/ontology/bibo/}authorList': ['author', parse_authors],
'{http://purl.org/dc/terms/}title': ['title', parse_string],
'{http://purl.org/dc/terms/}publisher': ['publisher', parse_string],
'{http://purl.org/dc/terms/}issued': ['publish_date', parse_string],
'{http://purl.org/dc/terms/}extent': ['pagination', parse_string],
'{http://purl.org/dc/elements/1.1/}subject': ['subject', parse_string],
'{http://purl.org/dc/terms/}subject': ['subject', parse_subject],
'{http://purl.org/dc/terms/}language': ['language', parse_string],
'{http://purl.org/ontology/bibo/}lccn': ['lccn', parse_string],
'{http://purl.org/ontology/bibo/}oclcnum': ['oclc_number', parse_string],
'{http://RDVocab.info/elements/}placeOfPublication': [
'publish_place',
parse_string,
],
}
# TODO: {http://purl.org/dc/terms/}identifier (could be ocaid)
# TODO: {http://www.w3.org/2005/Atom}link (could be cover image)
def parse(root):
edition_builder = import_edition_builder.import_edition_builder()
for e in root.iter():
if isinstance(e.tag, str) and e.tag in parser_map:
key = parser_map[e.tag][0]
(new_key, val) = parser_map[e.tag][1](e, key)
if new_key:
if isinstance(val, list):
for v in val:
edition_builder.add(new_key, v)
else:
edition_builder.add(new_key, val)
return edition_builder
| 3,039 | Python | .py | 73 | 34.931507 | 77 | 0.612542 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
Subsets and Splits