id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,100 | buildbot.tac | buildbot_buildbot/master/docker/buildbot.tac | import os
import sys
from twisted.application import service
from twisted.python.log import FileLogObserver
from twisted.python.log import ILogObserver
from buildbot.master import BuildMaster
basedir = os.environ.get("BUILDBOT_BASEDIR",
os.path.abspath(os.path.dirname(__file__)))
configfile = 'master.cfg'
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
m = BuildMaster(basedir, configfile, umask=None)
m.setServiceParent(application)
| 621 | Python | .tac | 15 | 39.8 | 72 | 0.823627 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,101 | test_openstack.py | buildbot_buildbot/master/buildbot/test/unit/worker/test_openstack.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright 2013 Cray Inc.
import hashlib
from unittest import mock
from twisted.internet import defer
from twisted.trial import unittest
import buildbot.test.fake.openstack as novaclient
from buildbot import config
from buildbot import interfaces
from buildbot.process.properties import Interpolate
from buildbot.process.properties import Properties
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.worker import openstack
class TestOpenStackWorker(TestReactorMixin, unittest.TestCase):
os_auth = {
"os_username": 'user',
"os_password": 'pass',
"os_tenant_name": 'tenant',
"os_auth_url": 'auth',
}
os_auth_custom = {"token": 'openstack-token', "auth_type": 'token', "auth_url": 'auth'}
bs_image_args = {"flavor": 1, "image": '28a65eb4-f354-4420-97dc-253b826547f7', **os_auth}
def setUp(self):
self.setup_test_reactor(auto_tear_down=False)
self.patch(openstack, "client", novaclient)
self.patch(openstack, "loading", novaclient)
self.patch(openstack, "session", novaclient)
self.patch(openstack, "NotFound", novaclient.NotFound)
self.build = Properties(
image=novaclient.TEST_UUIDS['image'],
flavor=novaclient.TEST_UUIDS['flavor'],
meta_value='value',
)
self.masterhash = hashlib.sha1(b'fake:/master').hexdigest()[:6]
@defer.inlineCallbacks
def tearDown(self):
yield self.tear_down_test_reactor()
@defer.inlineCallbacks
def setupWorker(self, *args, **kwargs):
worker = openstack.OpenStackLatentWorker(*args, **kwargs)
master = yield fakemaster.make_master(self, wantData=True)
fakemaster.master = master
worker.setServiceParent(master)
yield master.startService()
self.addCleanup(master.stopService)
return worker
@defer.inlineCallbacks
def test_constructor_nonova(self):
self.patch(openstack, "client", None)
with self.assertRaises(config.ConfigErrors):
yield self.setupWorker('bot', 'pass', **self.bs_image_args)
@defer.inlineCallbacks
def test_constructor_nokeystoneauth(self):
self.patch(openstack, "loading", None)
with self.assertRaises(config.ConfigErrors):
yield self.setupWorker('bot', 'pass', **self.bs_image_args)
@defer.inlineCallbacks
def test_constructor_minimal(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
self.assertEqual(bs.workername, 'bot')
self.assertEqual(bs.password, 'pass')
self.assertEqual(bs.flavor, 1)
self.assertEqual(bs.image, '28a65eb4-f354-4420-97dc-253b826547f7')
self.assertEqual(bs.block_devices, None)
self.assertIsInstance(bs.novaclient, novaclient.Client)
@defer.inlineCallbacks
def test_builds_may_be_incompatible(self):
# Minimal set of parameters
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
self.assertEqual(bs.builds_may_be_incompatible, True)
@defer.inlineCallbacks
def test_constructor_minimal_keystone_v3(self):
bs = yield self.setupWorker(
'bot',
'pass',
os_user_domain='test_oud',
os_project_domain='test_opd',
**self.bs_image_args,
)
self.assertEqual(bs.workername, 'bot')
self.assertEqual(bs.password, 'pass')
self.assertEqual(bs.flavor, 1)
self.assertEqual(bs.image, '28a65eb4-f354-4420-97dc-253b826547f7')
self.assertEqual(bs.block_devices, None)
self.assertIsInstance(bs.novaclient, novaclient.Client)
self.assertEqual(bs.novaclient.session.auth.user_domain_name, 'test_oud')
self.assertEqual(bs.novaclient.session.auth.project_domain_name, 'test_opd')
@defer.inlineCallbacks
def test_constructor_token_keystone_v3(self):
bs = yield self.setupWorker(
'bot', 'pass', os_auth_args=self.os_auth_custom, **self.bs_image_args
)
self.assertEqual(bs.workername, 'bot')
self.assertEqual(bs.password, 'pass')
self.assertEqual(bs.flavor, 1)
self.assertEqual(bs.image, '28a65eb4-f354-4420-97dc-253b826547f7')
self.assertEqual(bs.block_devices, None)
self.assertIsInstance(bs.novaclient, novaclient.Client)
self.assertEqual(bs.novaclient.session.auth.user_domain_name, 'token')
self.assertEqual(bs.novaclient.session.auth.project_domain_name, 'token')
@defer.inlineCallbacks
def test_constructor_region(self):
bs = yield self.setupWorker('bot', 'pass', region="test-region", **self.bs_image_args)
self.assertEqual(bs.novaclient.client.region_name, "test-region")
@defer.inlineCallbacks
def test_constructor_block_devices_default(self):
block_devices = [{'uuid': 'uuid', 'volume_size': 10}]
bs = yield self.setupWorker(
'bot', 'pass', flavor=1, block_devices=block_devices, **self.os_auth
)
self.assertEqual(bs.image, None)
self.assertEqual(len(bs.block_devices), 1)
self.assertEqual(
bs.block_devices,
[
{
'boot_index': 0,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': 'vda',
'source_type': 'image',
'volume_size': 10,
'uuid': 'uuid',
}
],
)
@defer.inlineCallbacks
def test_constructor_block_devices_get_sizes(self):
block_devices = [
{'source_type': 'image', 'uuid': novaclient.TEST_UUIDS['image']},
{'source_type': 'image', 'uuid': novaclient.TEST_UUIDS['image'], 'volume_size': 4},
{'source_type': 'volume', 'uuid': novaclient.TEST_UUIDS['volume']},
{'source_type': 'snapshot', 'uuid': novaclient.TEST_UUIDS['snapshot']},
]
def check_volume_sizes(_images, _flavors, block_devices, nova_args, metas):
self.assertEqual(len(block_devices), 4)
self.assertEqual(block_devices[0]['volume_size'], 1)
self.assertIsInstance(
block_devices[0]['volume_size'], int, "Volume size is an integer."
)
self.assertEqual(block_devices[1]['volume_size'], 4)
self.assertEqual(block_devices[2]['volume_size'], 4)
self.assertEqual(block_devices[3]['volume_size'], 2)
lw = yield self.setupWorker(
'bot', 'pass', flavor=1, block_devices=block_devices, **self.os_auth
)
self.assertEqual(lw.image, None)
self.assertEqual(
lw.block_devices,
[
{
'boot_index': 0,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': 'vda',
'source_type': 'image',
'volume_size': None,
'uuid': novaclient.TEST_UUIDS['image'],
},
{
'boot_index': 0,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': 'vda',
'source_type': 'image',
'volume_size': 4,
'uuid': novaclient.TEST_UUIDS['image'],
},
{
'boot_index': 0,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': 'vda',
'source_type': 'volume',
'volume_size': None,
'uuid': novaclient.TEST_UUIDS['volume'],
},
{
'boot_index': 0,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': 'vda',
'source_type': 'snapshot',
'volume_size': None,
'uuid': novaclient.TEST_UUIDS['snapshot'],
},
],
)
self.patch(lw, "_start_instance", check_volume_sizes)
yield lw.start_instance(self.build)
@defer.inlineCallbacks
def test_constructor_block_devices_missing(self):
block_devices = [
{'source_type': 'image', 'uuid': 'image-uuid'},
]
lw = yield self.setupWorker(
'bot', 'pass', flavor=1, block_devices=block_devices, **self.os_auth
)
yield self.assertFailure(lw.start_instance(self.build), novaclient.NotFound)
@defer.inlineCallbacks
def test_constructor_no_image(self):
"""
Must have one of image or block_devices specified.
"""
with self.assertRaises(ValueError):
yield self.setupWorker('bot', 'pass', flavor=1, **self.os_auth)
@defer.inlineCallbacks
def test_getImage_string(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
image_uuid = yield bs._getImage(self.build)
self.assertEqual('28a65eb4-f354-4420-97dc-253b826547f7', image_uuid)
@defer.inlineCallbacks
def test_getImage_renderable(self):
bs = yield self.setupWorker(
'bot', 'pass', flavor=1, image=Interpolate('%(prop:image)s'), **self.os_auth
)
image_uuid = yield bs._getImage(self.build)
self.assertEqual(novaclient.TEST_UUIDS['image'], image_uuid)
@defer.inlineCallbacks
def test_getImage_name(self):
bs = yield self.setupWorker('bot', 'pass', flavor=1, image='CirrOS 0.3.4', **self.os_auth)
image_uuid = yield bs._getImage(self.build)
self.assertEqual(novaclient.TEST_UUIDS['image'], image_uuid)
@defer.inlineCallbacks
def test_getFlavor_string(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
flavor_uuid = yield bs._getFlavor(self.build)
self.assertEqual(1, flavor_uuid)
@defer.inlineCallbacks
def test_getFlavor_renderable(self):
bs = yield self.setupWorker(
'bot', 'pass', image="1", flavor=Interpolate('%(prop:flavor)s'), **self.os_auth
)
flavor_uuid = yield bs._getFlavor(self.build)
self.assertEqual(novaclient.TEST_UUIDS['flavor'], flavor_uuid)
@defer.inlineCallbacks
def test_getFlavor_name(self):
bs = yield self.setupWorker('bot', 'pass', image="1", flavor='m1.small', **self.os_auth)
flavor_uuid = yield bs._getFlavor(self.build)
self.assertEqual(novaclient.TEST_UUIDS['flavor'], flavor_uuid)
@defer.inlineCallbacks
def test_start_instance_already_exists(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
bs.instance = mock.Mock()
yield self.assertFailure(bs.start_instance(self.build), ValueError)
@defer.inlineCallbacks
def test_start_instance_first_fetch_fail(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
bs._poll_resolution = 0
self.patch(novaclient.Servers, 'fail_to_get', True)
self.patch(novaclient.Servers, 'gets_until_disappears', 0)
yield self.assertFailure(
bs.start_instance(self.build), interfaces.LatentWorkerFailedToSubstantiate
)
@defer.inlineCallbacks
def test_start_instance_fail_to_find(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
bs._poll_resolution = 0
self.patch(novaclient.Servers, 'fail_to_get', True)
yield self.assertFailure(
bs.start_instance(self.build), interfaces.LatentWorkerFailedToSubstantiate
)
@defer.inlineCallbacks
def test_start_instance_fail_to_start(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
bs._poll_resolution = 0
self.patch(novaclient.Servers, 'fail_to_start', True)
yield self.assertFailure(
bs.start_instance(self.build), interfaces.LatentWorkerFailedToSubstantiate
)
@defer.inlineCallbacks
def test_start_instance_success(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
bs._poll_resolution = 0
uuid, image_uuid, time_waiting = yield bs.start_instance(self.build)
self.assertTrue(uuid)
self.assertEqual(image_uuid, '28a65eb4-f354-4420-97dc-253b826547f7')
self.assertTrue(time_waiting)
@defer.inlineCallbacks
def test_start_instance_check_meta(self):
meta_arg = {'some_key': 'some-value', 'BUILDBOT:instance': self.masterhash}
bs = yield self.setupWorker('bot', 'pass', meta=meta_arg, **self.bs_image_args)
bs._poll_resolution = 0
yield bs.start_instance(self.build)
self.assertIn('meta', bs.instance.boot_kwargs)
self.assertEqual(bs.instance.metadata, meta_arg)
@defer.inlineCallbacks
def test_start_instance_check_meta_renderable(self):
meta_arg = {'some_key': Interpolate('%(prop:meta_value)s')}
bs = yield self.setupWorker('bot', 'pass', meta=meta_arg, **self.bs_image_args)
bs._poll_resolution = 0
yield bs.start_instance(self.build)
self.assertIn('meta', bs.instance.boot_kwargs)
self.assertEqual(
bs.instance.metadata, {'some_key': 'value', 'BUILDBOT:instance': self.masterhash}
)
@defer.inlineCallbacks
def test_start_instance_check_nova_args(self):
nova_args = {'some-key': 'some-value'}
bs = yield self.setupWorker('bot', 'pass', nova_args=nova_args, **self.bs_image_args)
bs._poll_resolution = 0
yield bs.start_instance(self.build)
self.assertIn('meta', bs.instance.boot_kwargs)
self.assertEqual(bs.instance.boot_kwargs['some-key'], 'some-value')
@defer.inlineCallbacks
def test_start_instance_check_nova_args_renderable(self):
nova_args = {'some-key': Interpolate('%(prop:meta_value)s')}
bs = yield self.setupWorker('bot', 'pass', nova_args=nova_args, **self.bs_image_args)
bs._poll_resolution = 0
yield bs.start_instance(self.build)
self.assertIn('meta', bs.instance.boot_kwargs)
self.assertEqual(bs.instance.boot_kwargs['some-key'], 'value')
@defer.inlineCallbacks
def test_interpolate_renderables_for_new_build(self):
build1 = Properties(image=novaclient.TEST_UUIDS['image'], block_device="some-device")
build2 = Properties(image="build2-image")
block_devices = [{'uuid': Interpolate('%(prop:block_device)s'), 'volume_size': 10}]
bs = yield self.setupWorker(
'bot', 'pass', block_devices=block_devices, **self.bs_image_args
)
bs._poll_resolution = 0
yield bs.start_instance(build1)
yield bs.stop_instance(build1)
self.assertTrue((yield bs.isCompatibleWithBuild(build2)))
@defer.inlineCallbacks
def test_reject_incompatible_build_while_running(self):
build1 = Properties(image=novaclient.TEST_UUIDS['image'], block_device="some-device")
build2 = Properties(image="build2-image")
block_devices = [{'uuid': Interpolate('%(prop:block_device)s'), 'volume_size': 10}]
bs = yield self.setupWorker(
'bot', 'pass', block_devices=block_devices, **self.bs_image_args
)
bs._poll_resolution = 0
yield bs.start_instance(build1)
self.assertFalse((yield bs.isCompatibleWithBuild(build2)))
@defer.inlineCallbacks
def test_stop_instance_cleanup(self):
"""
Test cleaning up leftover instances before starting new.
"""
self.patch(novaclient.Servers, 'fail_to_get', False)
self.patch(novaclient.Servers, 'gets_until_disappears', 9)
novaclient.Servers().create(
['bot', novaclient.TEST_UUIDS['image'], novaclient.TEST_UUIDS['flavor']],
meta={'BUILDBOT:instance': self.masterhash},
)
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
bs._poll_resolution = 0
uuid, image_uuid, time_waiting = yield bs.start_instance(self.build)
self.assertTrue(uuid)
self.assertEqual(image_uuid, '28a65eb4-f354-4420-97dc-253b826547f7')
self.assertTrue(time_waiting)
@defer.inlineCallbacks
def test_stop_instance_not_set(self):
"""
Test stopping the instance but with no instance to stop.
"""
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
bs.instance = None
stopped = yield bs.stop_instance()
self.assertEqual(stopped, None)
@defer.inlineCallbacks
def test_stop_instance_missing(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
instance = mock.Mock()
instance.id = 'uuid'
bs.instance = instance
# TODO: Check log for instance not found.
bs.stop_instance()
@defer.inlineCallbacks
def test_stop_instance_fast(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
# Make instance immediately active.
self.patch(novaclient.Servers, 'gets_until_active', 0)
s = novaclient.Servers()
bs.instance = inst = s.create()
self.assertIn(inst.id, s.instances)
bs.stop_instance(fast=True)
self.assertNotIn(inst.id, s.instances)
@defer.inlineCallbacks
def test_stop_instance_notfast(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
# Make instance immediately active.
self.patch(novaclient.Servers, 'gets_until_active', 0)
s = novaclient.Servers()
bs.instance = inst = s.create()
self.assertIn(inst.id, s.instances)
bs.stop_instance(fast=False)
self.assertNotIn(inst.id, s.instances)
@defer.inlineCallbacks
def test_stop_instance_unknown(self):
bs = yield self.setupWorker('bot', 'pass', **self.bs_image_args)
# Make instance immediately active.
self.patch(novaclient.Servers, 'gets_until_active', 0)
s = novaclient.Servers()
bs.instance = inst = s.create()
# Set status to DELETED. Instance should not be deleted when shutting
# down as it already is.
inst.status = novaclient.DELETED
self.assertIn(inst.id, s.instances)
bs.stop_instance()
self.assertIn(inst.id, s.instances)
| 19,338 | Python | .tac | 416 | 37.012019 | 98 | 0.631007 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,102 | openstack.py | buildbot_buildbot/master/buildbot/test/fake/openstack.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright 2013 Cray Inc.
from __future__ import annotations
import uuid
ACTIVE = 'ACTIVE'
BUILD = 'BUILD'
DELETED = 'DELETED'
ERROR = 'ERROR'
UNKNOWN = 'UNKNOWN'
TEST_UUIDS = {
'image': '28a65eb4-f354-4420-97dc-253b826547f7',
'volume': '65fbb9f1-c4d5-40a8-a233-ad47c52bb837',
'snapshot': 'ab89152d-3c26-4d30-9ae5-65b705f874b7',
'flavor': '853774a1-459f-4f1f-907e-c96f62472531',
}
class FakeNovaClient:
region_name = ""
# Parts used from novaclient
class Client:
def __init__(self, version, session):
self.glance = ItemManager()
self.glance._add_items([Image(TEST_UUIDS['image'], 'CirrOS 0.3.4', 13287936)])
self.volumes = ItemManager()
self.volumes._add_items([Volume(TEST_UUIDS['volume'], 'CirrOS 0.3.4', 4)])
self.volume_snapshots = ItemManager()
self.volume_snapshots._add_items([Snapshot(TEST_UUIDS['snapshot'], 'CirrOS 0.3.4', 2)])
self.flavors = ItemManager()
self.flavors._add_items([Flavor(TEST_UUIDS['flavor'], 'm1.small', 0)])
self.servers = Servers()
self.session = session
self.client = FakeNovaClient()
class ItemManager:
def __init__(self):
self._items = {}
def _add_items(self, new_items):
for item in new_items:
self._items[item.id] = item
def list(self):
return self._items.values()
def get(self, uuid):
if uuid in self._items:
return self._items[uuid]
else:
raise NotFound
def find_image(self, name):
for item in self.list():
if name in (item.name, item.id):
return item
raise NotFound
# This exists because Image needs an attribute that isn't supported by
# namedtuple. And once the base code is there might as well have Volume and
# Snapshot use it too.
class Item:
def __init__(self, id, name, size):
self.id = id
self.name = name
self.size = size
class Image(Item):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
setattr(self, 'OS-EXT-IMG-SIZE:size', self.size)
class Flavor(Item):
pass
class Volume(Item):
pass
class Snapshot(Item):
pass
class Servers:
fail_to_get = False
fail_to_start = False
gets_until_active = 3
gets_until_disappears = 1
instances: dict[uuid.UUID, Instance] = {}
def create(self, *boot_args, **boot_kwargs):
instance_id = uuid.uuid4()
instance = Instance(instance_id, self, boot_args, boot_kwargs)
self.instances[instance_id] = instance
return instance
def get(self, instance_id):
if instance_id not in self.instances:
raise NotFound
inst = self.instances[instance_id]
if not self.fail_to_get or inst.gets < self.gets_until_disappears:
if not inst.status.startswith('BUILD'):
return inst
inst.gets += 1
if inst.gets >= self.gets_until_active:
if not self.fail_to_start:
inst.status = ACTIVE
else:
inst.status = ERROR
return inst
else:
raise NotFound
def delete(self, instance_id):
if instance_id in self.instances:
del self.instances[instance_id]
def findall(self, **kwargs):
name = kwargs.get('name', None)
if name:
return list(filter(lambda item: item.name == name, self.instances.values()))
return []
def find(self, **kwargs):
result = self.findall(**kwargs)
if len(result) > 0:
raise NoUniqueMatch
if len(result) == 0:
raise NotFound
return result[0]
# This is returned by Servers.create().
class Instance:
def __init__(self, id, servers, boot_args, boot_kwargs):
self.id = id
self.servers = servers
self.boot_args = boot_args
self.boot_kwargs = boot_kwargs
self.gets = 0
self.status = 'BUILD(networking)'
self.metadata = boot_kwargs.get('meta', {})
try:
self.name = boot_args[0]
except IndexError:
self.name = 'name'
def delete(self):
self.servers.delete(self.id)
# Parts used from novaclient.exceptions.
class NotFound(Exception):
pass
class NoUniqueMatch(Exception):
pass
# Parts used from keystoneauth1.
def get_plugin_loader(plugin_type):
if plugin_type == 'password':
return PasswordLoader()
if plugin_type == 'token':
return TokenLoader()
raise ValueError(f"plugin_type '{plugin_type}' is not supported")
class PasswordLoader:
def load_from_options(self, **kwargs):
return PasswordAuth(**kwargs)
class TokenLoader:
def load_from_options(self, **kwargs):
return TokenAuth(**kwargs)
class PasswordAuth:
def __init__(
self,
auth_url,
password,
project_name,
username,
user_domain_name=None,
project_domain_name=None,
):
self.auth_url = auth_url
self.password = password
self.project_name = project_name
self.username = username
self.user_domain_name = user_domain_name
self.project_domain_name = project_domain_name
class TokenAuth:
def __init__(self, auth_url, token):
self.auth_url = auth_url
self.token = token
self.project_name = 'tenant'
self.username = 'testuser'
self.user_domain_name = 'token'
self.project_domain_name = 'token'
class Session:
def __init__(self, auth):
self.auth = auth
| 6,386 | Python | .tac | 183 | 27.945355 | 95 | 0.636423 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,103 | openstack.py | buildbot_buildbot/master/buildbot/worker/openstack.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright 2013 Cray Inc.
import hashlib
import math
import time
from twisted.internet import defer
from twisted.internet import threads
from twisted.python import log
from buildbot import config
from buildbot.interfaces import LatentWorkerFailedToSubstantiate
from buildbot.util import unicode2bytes
from buildbot.util.latent import CompatibleLatentWorkerMixin
from buildbot.worker import AbstractLatentWorker
try:
from keystoneauth1 import loading
from keystoneauth1 import session
from novaclient import client
from novaclient.exceptions import NotFound
_hush_pyflakes = [client]
except ImportError:
NotFound = Exception
client = None
loading = None
session = None
ACTIVE = 'ACTIVE'
BUILD = 'BUILD'
DELETED = 'DELETED'
UNKNOWN = 'UNKNOWN'
class OpenStackLatentWorker(CompatibleLatentWorkerMixin, AbstractLatentWorker):
instance = None
_poll_resolution = 5 # hook point for tests
def checkConfig(
self,
name,
password,
flavor,
os_username=None,
os_password=None,
os_tenant_name=None,
os_auth_url=None,
os_user_domain=None,
os_project_domain=None,
os_auth_args=None,
block_devices=None,
region=None,
image=None,
meta=None,
# Have a nova_args parameter to allow passing things directly
# to novaclient.
nova_args=None,
client_version='2',
**kwargs,
):
if not client:
config.error(
"The python module 'novaclient' is needed "
"to use a OpenStackLatentWorker. "
"Please install 'python-novaclient' package."
)
if not loading or not session:
config.error(
"The python module 'keystoneauth1' is needed "
"to use a OpenStackLatentWorker. "
"Please install the 'keystoneauth1' package."
)
if block_devices is None and image is None:
raise ValueError('One of block_devices or image must be given')
if os_auth_args is None:
if os_auth_url is None:
config.error(
"Missing os_auth_url OpenStackLatentWorker and os_auth_args not provided."
)
if os_username is None or os_password is None:
config.error(
"Missing os_username / os_password for OpenStackLatentWorker "
"and os_auth_args not provided."
)
else:
# ensure that at least auth_url is provided
if os_auth_args.get('auth_url') is None:
config.error("Missing 'auth_url' from os_auth_args for OpenStackLatentWorker")
super().checkConfig(name, password, **kwargs)
@defer.inlineCallbacks
def reconfigService(
self,
name,
password,
flavor,
os_username=None,
os_password=None,
os_tenant_name=None,
os_auth_url=None,
os_user_domain=None,
os_project_domain=None,
os_auth_args=None,
block_devices=None,
region=None,
image=None,
meta=None,
# Have a nova_args parameter to allow passing things directly
# to novaclient.
nova_args=None,
client_version='2',
**kwargs,
):
yield super().reconfigService(name, password, **kwargs)
if os_auth_args is None:
os_auth_args = {
'auth_url': os_auth_url,
'username': os_username,
'password': os_password,
}
if os_tenant_name is not None:
os_auth_args['project_name'] = os_tenant_name
if os_user_domain is not None:
os_auth_args['user_domain_name'] = os_user_domain
if os_project_domain is not None:
os_auth_args['project_domain_name'] = os_project_domain
self.flavor = flavor
self.client_version = client_version
if client:
os_auth_args = yield self.renderSecrets(os_auth_args)
self.novaclient = self._constructClient(client_version, os_auth_args)
if region is not None:
self.novaclient.client.region_name = region
if block_devices is not None:
self.block_devices = [self._parseBlockDevice(bd) for bd in block_devices]
else:
self.block_devices = None
self.image = image
self.meta = meta
self.nova_args = nova_args if nova_args is not None else {}
masterName = unicode2bytes(self.master.name)
self.masterhash = hashlib.sha1(masterName).hexdigest()[:6]
def _constructClient(self, client_version, auth_args):
"""Return a novaclient from the given args."""
auth_plugin = auth_args.pop('auth_type', 'password')
loader = loading.get_plugin_loader(auth_plugin)
auth = loader.load_from_options(**auth_args)
sess = session.Session(auth=auth)
return client.Client(client_version, session=sess)
def _parseBlockDevice(self, block_device):
"""
Parse a higher-level view of the block device mapping into something
novaclient wants. This should be similar to how Horizon presents it.
Required keys:
device_name: The name of the device; e.g. vda or xda.
source_type: image, snapshot, volume, or blank/None.
destination_type: Destination of block device: volume or local.
delete_on_termination: True/False.
uuid: The image, snapshot, or volume id.
boot_index: Integer used for boot order.
volume_size: Size of the device in GiB.
"""
client_block_device = {}
client_block_device['device_name'] = block_device.get('device_name', 'vda')
client_block_device['source_type'] = block_device.get('source_type', 'image')
client_block_device['destination_type'] = block_device.get('destination_type', 'volume')
client_block_device['delete_on_termination'] = bool(
block_device.get('delete_on_termination', True)
)
client_block_device['uuid'] = block_device['uuid']
client_block_device['boot_index'] = int(block_device.get('boot_index', 0))
# Allow None here. It will be rendered later.
client_block_device['volume_size'] = block_device.get('volume_size')
return client_block_device
@defer.inlineCallbacks
def _renderBlockDevice(self, block_device, build):
"""Render all of the block device's values."""
rendered_block_device = yield build.render(block_device)
if rendered_block_device['volume_size'] is None:
source_type = rendered_block_device['source_type']
source_uuid = rendered_block_device['uuid']
volume_size = self._determineVolumeSize(source_type, source_uuid)
rendered_block_device['volume_size'] = volume_size
return rendered_block_device
def _determineVolumeSize(self, source_type, source_uuid):
"""
Determine the minimum size the volume needs to be for the source.
Returns the size in GiB.
"""
nova = self.novaclient
if source_type == 'image':
# The size returned for an image is in bytes. Round up to the next
# integer GiB.
image = nova.glance.get(source_uuid)
if hasattr(image, 'OS-EXT-IMG-SIZE:size'):
size = getattr(image, 'OS-EXT-IMG-SIZE:size')
size_gb = int(math.ceil(size / 1024.0**3))
return size_gb
elif source_type == 'volume':
# Volumes are easy because they are already in GiB.
volume = nova.volumes.get(source_uuid)
return volume.size
elif source_type == 'snapshot':
snap = nova.volume_snapshots.get(source_uuid)
return snap.size
else:
unknown_source = (
f"The source type '{source_type}' for UUID '{source_uuid}' is " "unknown"
)
raise ValueError(unknown_source)
return None
@defer.inlineCallbacks
def _getImage(self, build):
image_name = yield build.render(self.image)
# There is images in block devices
if image_name is None:
return None
# find_image() can find by id as well
try:
image = self.novaclient.glance.find_image(image_name)
except NotFound as e:
unknown_image = f"Cannot find OpenStack image {image_name}"
raise ValueError(unknown_image) from e
return image.id
@defer.inlineCallbacks
def _getFlavor(self, build):
flavor_uuid = yield build.render(self.flavor)
# check if we got name instead of uuid
for flavor in self.novaclient.flavors.list():
if flavor.name == flavor_uuid:
flavor_uuid = flavor.id
return flavor_uuid
@defer.inlineCallbacks
def renderWorkerProps(self, build):
image = yield self._getImage(build)
flavor = yield self._getFlavor(build)
nova_args = yield build.render(self.nova_args)
meta = yield build.render(self.meta)
worker_meta = {
'BUILDBOT:instance': self.masterhash,
}
if meta is None:
meta = worker_meta
else:
meta.update(worker_meta)
if self.block_devices is not None:
block_devices = []
for bd in self.block_devices:
rendered_block_device = yield self._renderBlockDevice(bd, build)
block_devices.append(rendered_block_device)
else:
block_devices = None
return (image, flavor, block_devices, nova_args, meta)
@defer.inlineCallbacks
def start_instance(self, build):
if self.instance is not None:
raise ValueError('instance active')
image, flavor, block_devices, nova_args, meta = yield self.renderWorkerPropsOnStart(build)
res = yield threads.deferToThread(
self._start_instance, image, flavor, block_devices, nova_args, meta
)
return res
def _start_instance(self, image_uuid, flavor_uuid, block_devices, nova_args, meta):
# ensure existing, potentially duplicated, workers are stopped
self._stop_instance(None, True)
# then try to start new one
boot_args = [self.workername, image_uuid, flavor_uuid]
boot_kwargs = {"meta": meta, "block_device_mapping_v2": block_devices, **nova_args}
instance = self.novaclient.servers.create(*boot_args, **boot_kwargs)
# There is an issue when using sessions that the status is not
# available on the first try. Trying again will work fine. Fetch the
# instance to avoid that.
try:
instance = self.novaclient.servers.get(instance.id)
except NotFound as e:
log.msg(
'{class_name} {name} instance {instance.id} ({instance.name}) never found',
class_name=self.__class__.__name__,
name=self.workername,
instance=instance,
)
raise LatentWorkerFailedToSubstantiate(instance.id, BUILD) from e
self.instance = instance
log.msg(
f'{self.__class__.__name__} {self.workername} starting instance {instance.id} '
f'(image {image_uuid})'
)
duration = 0
interval = self._poll_resolution
while instance.status.startswith(BUILD):
time.sleep(interval)
duration += interval
if duration % 60 == 0:
log.msg(
f'{self.__class__.__name__} {self.workername} has waited {duration // 60} '
f'minutes for instance {instance.id}'
)
try:
instance = self.novaclient.servers.get(instance.id)
except NotFound as e:
log.msg(
f'{self.__class__.__name__} {self.workername} instance {instance.id} '
f'({instance.name}) went missing'
)
raise LatentWorkerFailedToSubstantiate(instance.id, instance.status) from e
if instance.status == ACTIVE:
minutes = duration // 60
seconds = duration % 60
log.msg(
f'{self.__class__.__name__} {self.workername} instance {instance.id} '
f'({instance.name}) started in about {minutes} minutes {seconds} seconds'
)
return [
instance.id,
image_uuid,
f'{minutes // 60:02d}:{minutes % 60:02d}:{seconds:02d}',
]
else:
self.failed_to_start(instance.id, instance.status)
return None # This is just to silence warning, above line throws an exception
def stop_instance(self, fast=False):
instance = self.instance
self.instance = None
self.resetWorkerPropsOnStop()
self._stop_instance(instance, fast)
def _stop_instance(self, instance_param, fast):
instances = []
try:
if instance_param is None:
filter_f = (
lambda instance: instance.metadata.get("BUILDBOT:instance", "")
== self.masterhash
)
instances = list(filter(filter_f, self.novaclient.servers.findall(name=self.name)))
else:
instances = [self.novaclient.servers.get(instance_param.id)]
except NotFound:
# If can't find the instance, then it's already gone.
log.msg(
f'{self.__class__.__name__} {self.workername} instance {instance_param.id} '
f'({instance_param.name}) already terminated'
)
for instance in instances:
if instance.status not in (DELETED, UNKNOWN):
instance.delete()
log.msg(
f'{self.__class__.__name__} {self.workername} terminating instance '
f'{instance.id} ({instance.name})'
)
| 15,037 | Python | .tac | 354 | 32.084746 | 99 | 0.606787 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,104 | buildbot_tac.tmpl | buildbot_buildbot/master/buildbot/scripts/buildbot_tac.tmpl | import os
from twisted.application import service
from buildbot.master import BuildMaster
{% if relocatable -%}
basedir = '.'
{% else -%}
basedir = {{ basedir|repr }}
{%- endif %}
{% if not no_logrotate -%}
rotateLength = {{ '%d' | format(log_size) }}
maxRotatedFiles = {{ ('%d' | format(log_count)) if log_count != None else 'None' }}
{%- endif %}
configfile = {{ config|repr }}
# Default umask for server
umask = None
# if this is a relocatable tac file, get the directory containing the TAC
if basedir == '.':
basedir = os.path.abspath(os.path.dirname(__file__))
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
{% if not no_logrotate -%}
from twisted.python.logfile import LogFile
from twisted.python.log import ILogObserver, FileLogObserver
logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength,
maxRotatedFiles=maxRotatedFiles)
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
{%- endif %}
m = BuildMaster(basedir, configfile, umask)
m.setServiceParent(application)
{% if not no_logrotate -%}
m.log_rotation.rotateLength = rotateLength
m.log_rotation.maxRotatedFiles = maxRotatedFiles
{%- endif %}
| 1,309 | Python | .tac | 34 | 36.264706 | 94 | 0.736013 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,105 | workers-openstack.rst | buildbot_buildbot/master/docs/manual/configuration/workers-openstack.rst | .. -*- rst -*-
.. bb:worker:: OpenStackLatentWorker
OpenStack
=========
.. @cindex OpenStackLatentWorker
.. py:class:: buildbot.worker.openstack.OpenStackLatentWorker
`OpenStack <http://openstack.org/>`_ is a series of interconnected components that facilitates managing compute, storage, and network resources in a data center.
It is available under the Apache License and has a REST interface along with a Python client.
This document will guide you through setup of an OpenStack latent worker:
.. contents::
:depth: 1
:local:
Install dependencies
--------------------
OpenStackLatentWorker requires python-novaclient to work, you can install it with pip install python-novaclient.
Get an Account in an OpenStack cloud
------------------------------------
Setting up OpenStack is outside the domain of this document.
There are four account details necessary for the Buildbot master to interact with your OpenStack cloud: username, password, a tenant name, and the auth URL to use.
Create an Image
---------------
OpenStack supports a large number of image formats.
OpenStack maintains a short list of prebuilt images; if the desired image is not listed, The `OpenStack Compute Administration Manual <http://docs.openstack.org/trunk/openstack-compute/admin/content/index.html>`_ is a good resource for creating new images.
You need to configure the image with a buildbot worker to connect to the master on boot.
Configure the Master with an OpenStackLatentWorker
--------------------------------------------------
With the configured image in hand, it is time to configure the buildbot master to create OpenStack instances of it.
You will need the aforementioned account details.
These are the same details set in either environment variables or passed as options to an OpenStack client.
:class:`OpenStackLatentWorker` accepts the following arguments:
``name``
The worker name.
``password``
A password for the worker to login to the master with.
``flavor``
A string containing the flavor name or UUID to use for the instance.
``image``
A string containing the image name or UUID to use for the instance.
``os_username``
``os_password``
``os_tenant_name``
``os_user_domain``
``os_project_domain``
``os_auth_url``
The OpenStack authentication needed to create and delete instances.
These are the same as the environment variables with uppercase names of the arguments.
``os_auth_args``
Arguments passed directly to keystone.
If this is specified, other authentication parameters (see above) are ignored.
You can use ``auth_type`` to specify auth plugin to load.
See `OpenStack documentation <https://docs.openstack.org/python-keystoneclient/>` for more information.
Usually this should contain ``auth_url``, ``username``, ``password``, ``project_domain_name``
and ``user_domain_name``.
``block_devices``
A list of dictionaries.
Each dictionary specifies a block device to set up during instance creation.
The values support using properties from the build and will be rendered when the instance is started.
Supported keys
``uuid``
(required):
The image, snapshot, or volume UUID.
``volume_size``
(optional):
Size of the block device in GiB.
If not specified, the minimum size in GiB to contain the source will be calculated and used.
``device_name``
(optional): defaults to ``vda``.
The name of the device in the instance; e.g. vda or xda.
``source_type``
(optional): defaults to ``image``.
The origin of the block device.
Valid values are ``image``, ``snapshot``, or ``volume``.
``destination_type``
(optional): defaults to ``volume``.
Destination of block device: ``volume`` or ``local``.
``delete_on_termination``
(optional): defaults to ``True``.
Controls if the block device will be deleted when the instance terminates.
``boot_index``
(optional): defaults to ``0``.
Integer used for boot order.
``meta``
A dictionary of string key-value pairs to pass to the instance.
These will be available under the ``metadata`` key from the metadata service.
``nova_args``
(optional)
A dict that will be appended to the arguments when creating a VM.
Buildbot uses the OpenStack Nova version 2 API by default (see client_version).
``client_version``
(optional)
A string containing the Nova client version to use.
Defaults to ``2``.
Supports using ``2.X``, where X is a micro-version.
Use ``1.1`` for the previous, deprecated, version.
If using ``1.1``, note that an older version of novaclient will be needed so it won't switch to using ``2``.
``region``
(optional)
A string specifying region where to instantiate the worker.
Here is the simplest example of configuring an OpenStack latent worker.
.. code-block:: python
from buildbot.plugins import worker
c['workers'] = [
worker.OpenStackLatentWorker('bot2', 'sekrit',
flavor=1, image='8ac9d4a4-5e03-48b0-acde-77a0345a9ab1',
os_username='user', os_password='password',
os_tenant_name='tenant',
os_auth_url='http://127.0.0.1:35357/v2.0')
]
The ``image`` argument also supports being given a callable.
The callable will be passed the list of available images and must return the image to use.
The invocation happens in a separate thread to prevent blocking the build master when interacting with OpenStack.
.. code-block:: python
from buildbot.plugins import worker
def find_image(images):
# Sort oldest to newest.
def key_fn(x):
return x.created
candidate_images = sorted(images, key=key_fn)
# Return the oldest candidate image.
return candidate_images[0]
c['workers'] = [
worker.OpenStackLatentWorker('bot2', 'sekrit',
flavor=1, image=find_image,
os_username='user', os_password='password',
os_tenant_name='tenant',
os_auth_url='http://127.0.0.1:35357/v2.0')
]
The ``block_devices`` argument is minimally manipulated to provide some defaults and passed directly to novaclient.
The simplest example is an image that is converted to a volume and the instance boots from that volume.
When the instance is destroyed, the volume will be terminated as well.
.. code-block:: python
from buildbot.plugins import worker
c['workers'] = [
worker.OpenStackLatentWorker('bot2', 'sekrit',
flavor=1, image='8ac9d4a4-5e03-48b0-acde-77a0345a9ab1',
os_username='user', os_password='password',
os_tenant_name='tenant',
os_auth_url='http://127.0.0.1:35357/v2.0',
block_devices=[
{'uuid': '3f0b8868-67e7-4a5b-b685-2824709bd486',
'volume_size': 10}])
]
The ``nova_args`` can be used to specify additional arguments for the novaclient.
For example network mappings, which is required if your OpenStack tenancy has more than one network, and default cannot be determined.
Please refer to your OpenStack manual whether it wants net-id or net-name.
Other useful parameters are ``availability_zone``, ``security_groups`` and ``config_drive``.
Refer to `Python bindings to the OpenStack Nova API <http://docs.openstack.org/developer/python-novaclient/>`_ for more information.
It is found on section Servers, method create.
.. code-block:: python
from buildbot.plugins import worker
c['workers'] = [
worker.OpenStackLatentWorker('bot2', 'sekrit',
flavor=1, image='8ac9d4a4-5e03-48b0-acde-77a0345a9ab1',
os_username='user', os_password='password',
os_tenant_name='tenant',
os_auth_url='http://127.0.0.1:35357/v2.0',
nova_args={
'nics': [
{'net-id':'uid-of-network'}
]})
]
:class:`OpenStackLatentWorker` supports all other configuration from the standard :class:`Worker`.
The ``missing_timeout`` and ``notify_on_missing`` specify how long to wait for an OpenStack instance to attach before considering the attempt to have failed and email addresses to alert, respectively.
``missing_timeout`` defaults to 20 minutes.
| 8,488 | Python | .tac | 164 | 44.914634 | 256 | 0.684115 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,106 | buildbot.tac | buildbot_buildbot/master/docker-example/pypy/master/buildbot.tac | import sys
from buildbot.master import BuildMaster
from twisted.application import service
from twisted.python.log import FileLogObserver, ILogObserver
basedir = '/usr/src/app'
configfile = 'master.cfg'
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
m = BuildMaster(basedir, configfile, umask=None)
m.setServiceParent(application)
| 512 | Python | .tac | 12 | 41.333333 | 72 | 0.832661 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,107 | buildbot.tac | buildbot_buildbot/master/docker-example/pypy/worker/buildbot.tac | import fnmatch
import os
import sys
from twisted.application import service
from twisted.python.log import FileLogObserver
from twisted.python.log import ILogObserver
from buildslave.bot import BuildSlave
# setup worker
basedir = os.path.abspath(os.path.dirname(__file__))
application = service.Application('buildbot-worker')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
# and worker on the same process!
buildmaster_host = os.environ.get("BUILDMASTER", 'localhost')
port = int(os.environ.get("BUILDMASTER_PORT", 9989))
workername = os.environ.get("WORKERNAME", 'docker')
passwd = os.environ.get("WORKERPASS")
# delete the password from the environ so that it is not leaked in the log
blacklist = os.environ.get("WORKER_ENVIRONMENT_BLACKLIST", "WORKERPASS").split()
for name in list(os.environ.keys()):
for toremove in blacklist:
if fnmatch.fnmatch(name, toremove):
del os.environ[name]
keepalive = 600
umask = None
maxdelay = 300
allow_shutdown = None
usepty=False
s = BuildSlave(buildmaster_host, port, workername, passwd, basedir,
keepalive, usepty, umask=umask, maxdelay=maxdelay,
allow_shutdown=allow_shutdown)
s.setServiceParent(application)
| 1,239 | Python | .tac | 31 | 36.935484 | 80 | 0.773144 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,108 | buildbot.tac | buildbot_buildbot/master/contrib/docker/master/buildbot.tac | import sys
from twisted.application import service
from twisted.python.log import FileLogObserver
from twisted.python.log import ILogObserver
from buildbot.master import BuildMaster
basedir = '/var/lib/buildbot'
configfile = 'master.cfg'
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
m = BuildMaster(basedir, configfile, umask=None)
m.setServiceParent(application)
| 548 | Python | .tac | 13 | 40.769231 | 72 | 0.837736 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,109 | buildbot.tac | buildbot_buildbot/worker/docker/buildbot.tac | import fnmatch
import os
import sys
from twisted.application import service
from twisted.python.log import FileLogObserver
from twisted.python.log import ILogObserver
from buildbot_worker.bot import Worker
# setup worker
basedir = os.environ.get("BUILDBOT_BASEDIR",
os.path.abspath(os.path.dirname(__file__)))
application = service.Application('buildbot-worker')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
# and worker on the same process!
buildmaster_host = os.environ.get("BUILDMASTER", 'localhost')
port = int(os.environ.get("BUILDMASTER_PORT", 9989))
workername = os.environ.get("WORKERNAME", 'docker')
passwd = os.environ.get("WORKERPASS")
# delete the password from the environ so that it is not leaked in the log
blacklist = os.environ.get("WORKER_ENVIRONMENT_BLACKLIST", "WORKERPASS").split()
for name in list(os.environ.keys()):
for toremove in blacklist:
if fnmatch.fnmatch(name, toremove):
del os.environ[name]
keepalive = 600
umask = None
maxdelay = 300
allow_shutdown = None
maxretries = 10
delete_leftover_dirs = False
s = Worker(buildmaster_host, port, workername, passwd, basedir,
keepalive, umask=umask, maxdelay=maxdelay,
allow_shutdown=allow_shutdown, maxRetries=maxretries,
delete_leftover_dirs=delete_leftover_dirs)
s.setServiceParent(application)
| 1,368 | Python | .tac | 34 | 37.205882 | 80 | 0.772247 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,110 | setup.py | aitjcize_cppman/setup.py | #!/usr/bin/env python
from distutils.core import setup
_package_data = [
'lib/index.db',
'lib/pager.sh',
'lib/cppman.vim'
]
_data_files = [
('share/doc/cppman', ['README.rst', 'AUTHORS', 'COPYING', 'ChangeLog']),
('share/man/man1', ['misc/cppman.1']),
('share/bash-completion/completions', ['misc/completions/cppman.bash']),
('share/zsh/vendor-completions/', ['misc/completions/zsh/_cppman']),
('share/fish/vendor_completions.d/', ['misc/completions/fish/cppman.fish'])
]
setup(
name = 'cppman',
version = '0.5.7',
description = 'C++ 98/11/14/17/20 manual pages for Linux/MacOS',
author = 'Wei-Ning Huang (AZ)',
author_email = '[email protected]',
url = 'https://github.com/aitjcize/cppman',
license = 'GPL',
packages = ['cppman', 'cppman.formatter'],
package_data = {'cppman': _package_data},
data_files = _data_files,
scripts = ['bin/cppman'],
install_requires=['beautifulsoup4', 'html5lib'],
classifiers = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Documentation',
],
)
| 1,446 | Python | .py | 36 | 31.722222 | 83 | 0.566856 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,111 | test.py | aitjcize_cppman/test/test.py | #!/usr/bin/env python
import sys
import os
import os.path
sys.path.insert(0, os.path.normpath(os.getcwd()))
from cppman.formatter import cplusplus, cppreference
cplusplus.func_test()
cppreference.func_test()
| 211 | Python | .py | 8 | 25 | 52 | 0.815 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,112 | util.py | aitjcize_cppman/cppman/util.py | # -*- coding: utf-8 -*-
#
# util.py - Misc utilities
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import os
import shutil
import subprocess
import urllib.request
import bs4
from cppman import environ
# User-Agent header value to use with all requests
_USER_AGENT = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/111.0"
def update_mandb_path():
"""Add $XDG_CACHE_HOME/cppman/man to $HOME/.manpath"""
manpath_file = os.path.join(environ.HOME, ".manpath")
man_dir = environ.cache_dir
manindex_dir = environ.manindex_dir
lines = []
""" read all lines """
try:
with open(manpath_file, 'r') as f:
lines = f.readlines()
except IOError:
return
""" remove MANDATORY_MANPATH and MANDB_MAP entry """
lines = [line for line in lines if man_dir not in line]
with open(manpath_file, 'w') as f:
if environ.config.UpdateManPath:
lines.append('MANDATORY_MANPATH\t%s\n' % man_dir)
lines.append('MANDB_MAP\t\t\t%s\t%s\n' % (man_dir, manindex_dir))
f.writelines(lines)
def update_man3_link():
man3_path = os.path.join(environ.cache_dir, 'man3')
if os.path.lexists(man3_path):
if os.path.islink(man3_path):
if os.readlink(man3_path) == environ.config.Source:
return
else:
os.unlink(man3_path)
else:
raise RuntimeError("Can't create link since `%s' already exists" %
man3_path)
try:
os.makedirs(os.path.join(environ.cache_dir, environ.config.Source))
except Exception:
pass
os.symlink(environ.config.Source, man3_path)
def get_width():
"""Get terminal width"""
# Get terminal size
columns, lines = shutil.get_terminal_size()
width = min(columns * 39 // 40, columns - 2)
return width
def groff2man(data):
"""Read groff-formatted text and output man pages."""
width = get_width()
cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % (width, width)
handle = subprocess.Popen(
cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
man_text, stderr = handle.communicate(data)
return man_text
def html2man(data, formatter):
"""Convert HTML text from cplusplus.com to man pages."""
groff_text = formatter(data)
man_text = groff2man(groff_text)
return man_text
def fixupHTML(data):
return str(bs4.BeautifulSoup(data, "html5lib"))
def urlopen(url, *args, **kwargs):
"""A wrapper around urllib.request.urlopen() which adds custom headers"""
if isinstance(url, urllib.request.Request):
req = url
else:
req = urllib.request.Request(url)
req.add_header('User-Agent', _USER_AGENT)
return urllib.request.urlopen(req, *args, **kwargs)
def build_opener(*args, **kwargs):
"""A wrapper around urllib.request.build_opener() which adds custom headers"""
opener = urllib.request.build_opener(*args, **kwargs)
opener.addheaders = [('User-Agent', _USER_AGENT)]
return opener
| 3,899 | Python | .py | 101 | 33.455446 | 94 | 0.678855 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,113 | config.py | aitjcize_cppman/cppman/config.py | # -*- coding: utf-8 -*-
#
# config.py
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import configparser
import os
class Config(object):
PAGERS = ['vim', 'nvim', 'less', 'system']
SOURCES = ['cplusplus.com', 'cppreference.com']
DEFAULTS = {
'Source': 'cppreference.com',
'UpdateManPath': 'false',
'Pager': 'vim'
}
def __init__(self, configfile):
self._configfile = configfile
if not os.path.exists(configfile):
self.set_default()
else:
self._config = configparser.RawConfigParser()
self._config.read(self._configfile)
def __getattr__(self, name):
try:
value = self._config.get('Settings', name)
except configparser.NoOptionError:
value = self.DEFAULTS[name]
setattr(self, name, value)
self._config.read(self._configfile)
return self.parse_bool(value)
def __setattr__(self, name, value):
if not name.startswith('_'):
self._config.set('Settings', name, value)
self.save()
self.__dict__[name] = self.parse_bool(value)
def set_default(self):
"""Set config to default."""
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
self._config = configparser.RawConfigParser()
self._config.add_section('Settings')
for key, val in self.DEFAULTS.items():
self._config.set('Settings', key, val)
with open(self._configfile, 'w') as f:
self._config.write(f)
def save(self):
"""Store config back to file."""
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
with open(self._configfile, 'w') as f:
self._config.write(f)
def parse_bool(self, val):
if type(val) == str:
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
return val
| 2,853 | Python | .py | 80 | 28.4625 | 73 | 0.618789 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,114 | crawler.py | aitjcize_cppman/cppman/crawler.py | # -*- coding: utf-8 -*-
#
# crawler.py
#
# Copyright (C) 2010 - 2016 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import print_function
import re
import sys
import time
from threading import Lock, Thread
from urllib.parse import urljoin, urlparse, urlunparse
import urllib.request
import urllib.error
import http.client
import cppman.util
from bs4 import BeautifulSoup
# See https://tools.ietf.org/html/rfc3986#section-3.3
_CONTAINS_DISALLOWED_URL_PCHAR_RE = re.compile('[\x00-\x20\x7f]')
class NoRedirection(urllib.request.HTTPErrorProcessor):
"""A handler that disables redirection"""
def http_response(self, request, response):
if response.code in Crawler.F_REDIRECT_CODES:
return response
return super().http_response(request, response)
https_response = http_response
class Crawler(object):
F_ANY, F_SAME_HOST, F_SAME_PATH = list(range(3))
F_REDIRECT_CODES = (301, 302)
def __init__(self):
self.queued = set()
self.targets = set()
self.failed_targets = set()
self.max_failed_retries = 3
self.failed_retry = 0
self.downloaded = False
self.threads = []
self.concurrency = 0
self.max_outstanding = 16
self.max_depth = 0
self.follow_mode = self.F_SAME_HOST
self.content_type_filter = '(text/html)'
self.url_filters = []
self.prefix_filter = '^(#|javascript:|mailto:)'
self.targets_lock = Lock()
self.concurrency_lock = Lock()
def set_content_type_filter(self, cf):
self.content_type_filter = '(%s)' % ('|'.join(cf))
def add_url_filter(self, uf):
self.url_filters.append(uf)
def set_follow_mode(self, mode):
if mode > 2:
raise RuntimeError('invalid follow mode %s.' % mode)
self.follow_mode = mode
def set_concurrency_level(self, level):
self.max_outstanding = level
def set_max_depth(self, max_depth):
self.max_depth = max_depth
def link_parser(self, url, content):
links = re.findall(r'''href\s*=\s*['"]\s*([^'"]+)['"]''', content)
links = [self._fix_link(url, link) for link in links]
return links
def crawl(self, url, path=None):
self.url = urlparse(url)
if path:
self.url = self.url._replace(path=path)
self.url = self.url._replace(fragment="")
self.failed_targets = set()
self.downloaded = True
self.failed_retry = self.max_failed_retries
self._add_target(url, 1)
while True:
self._spawn_new_worker()
while True:
with self.concurrency_lock:
threads = list(self.threads)
if not threads:
break
try:
for t in threads:
t.join(1)
if not t.is_alive():
with self.concurrency_lock:
self.threads.remove(t)
except KeyboardInterrupt:
sys.exit(1)
n_failed = len(self.failed_targets)
if n_failed == 0:
break
if self.downloaded: # at least one URL succeeded
self.failed_retry = self.max_failed_retries
else:
self.failed_retry -= 1
if self.failed_retry <= 0:
print("No retries are left to download failed URLs")
break
print("Some URLs failed to download ({}). Retrying ({})...".format(
n_failed, self.failed_retry))
self.targets = self.failed_targets
self.failed_targets = set()
self.downloaded = False
time.sleep(2)
if self.failed_targets:
print("=== Failed URLs ({}):".format(len(self.failed_targets)))
for depth, url in self.failed_targets:
print("{} (depth {})".format(url, depth))
print("=== Done {}".format(url))
def process_document(self, url, content, depth):
"""callback to insert index"""
# Should be implemented by a derived class. Make pylint happy
return True
def _fix_link(self, root, link):
# Encode invalid characters
link = re.sub(_CONTAINS_DISALLOWED_URL_PCHAR_RE,
lambda m: '%{:02X}'.format(ord(m.group())), link.strip())
link = urlparse(link)
if (link.fragment != ""):
link = link._replace(fragment="")
return urljoin(root, urlunparse(link))
def _valid_link(self, link):
if not link:
return False
link = urlparse(link)
if self.follow_mode == self.F_ANY:
return True
elif self.follow_mode == self.F_SAME_HOST:
return self.url.hostname == link.hostname
elif self.follow_mode == self.F_SAME_PATH:
return self.url.hostname == link.hostname and \
link.path.startswith(self.url.path)
return False
def _add_target(self, url, depth):
if not self._valid_link(url):
return
if self.max_depth and depth > self.max_depth:
return
with self.targets_lock:
if url in self.queued:
return
self.queued.add(url)
self.targets.add((depth, url))
def _target_failed(self, url, depth):
with self.targets_lock:
self.failed_targets.add((depth, url))
def _spawn_new_worker(self):
with self.concurrency_lock:
if self.concurrency < self.max_outstanding:
self.concurrency += 1
t = Thread(target=self._worker, args=(self.concurrency,))
t.daemon = True
self.threads.append(t)
t.start()
def _worker(self, sid):
while True:
with self.targets_lock:
if not self.targets:
break
depth, url = sorted(self.targets)[0]
self.targets.remove((depth, url))
opener = cppman.util.build_opener(NoRedirection)
request_error = None
try:
res = opener.open(url, timeout=10)
with self.targets_lock:
self.downloaded = True
except urllib.error.HTTPError as err:
if err.code == 404:
continue
request_error = err
except Exception as err:
request_error = err
if request_error is not None:
print("URL failed ({}): {}".format(url, request_error))
self._target_failed(url, depth)
continue
if res.status in self.F_REDIRECT_CODES:
target = self._fix_link(url, res.getheader('location'))
self._add_target(target, depth+1)
continue
# Check content type
try:
if not re.search(
self.content_type_filter,
res.getheader('Content-Type')):
continue
except TypeError: # getheader result is None
print("Getting Content-Type failed ({})".format(url))
continue
try:
content = res.read().decode()
except http.client.HTTPException as err:
print("Content read() failed ({}): {}".format(url, err))
self._target_failed(url, depth)
continue
if self.process_document(url, content, depth):
# Find links in document
links = self.link_parser(url, content)
for link in links:
self._add_target(link, depth+1)
self._spawn_new_worker()
with self.concurrency_lock:
self.concurrency -= 1
| 8,733 | Python | .py | 219 | 29.059361 | 79 | 0.570434 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,115 | __init__.py | aitjcize_cppman/cppman/__init__.py | # -*- coding: utf-8 -*-
#
# __init__.py
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import os
package_dir = os.path.dirname(__file__)
def get_lib_path(filename):
return os.path.join(package_dir, 'lib', filename)
| 979 | Python | .py | 25 | 37.84 | 73 | 0.754737 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,116 | environ.py | aitjcize_cppman/cppman/environ.py | # -*- coding: utf-8 -*-
#
# environ.py
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import os
from cppman import get_lib_path
from cppman.config import Config
HOME = os.environ["HOME"]
XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", os.path.join(HOME, ".cache"))
XDG_CONFIG_HOME = os.getenv("XDG_CONFIG_HOME", os.path.join(HOME, ".config"))
cache_dir = os.path.join(XDG_CACHE_HOME, 'cppman')
manindex_dir = os.path.join(cache_dir, 'manindex')
config_dir = os.path.join(XDG_CONFIG_HOME, 'cppman')
config_file = os.path.join(config_dir, 'cppman.cfg')
config = Config(config_file)
try:
os.makedirs(cache_dir)
os.makedirs(manindex_dir)
os.makedirs(config_dir)
update_man3_link()
except:
pass
index_db_re = os.path.join(cache_dir, 'index.db')
index_db = index_db_re if os.path.exists(index_db_re) \
else get_lib_path('index.db')
pager = config.Pager
pager_config = get_lib_path('cppman.vim')
pager_script = get_lib_path('pager.sh')
source = config.Source
if source not in config.SOURCES:
source = config.SOURCES[0]
config.Source = source
| 1,864 | Python | .py | 51 | 34.705882 | 77 | 0.743063 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,117 | main.py | aitjcize_cppman/cppman/main.py | # -*- coding: utf-8 -*-
#
# main.py
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import collections
import gzip
import html
import importlib
import os
import os.path
import re
import shutil
import sqlite3
import subprocess
import sys
from bs4 import BeautifulSoup
from cppman import environ, util
from cppman.crawler import Crawler
from urllib.parse import urlparse, unquote
def _sort_crawl(entry):
""" Sorting entries for putting '(1)' indexes behind keyword
1. keywords that have 'std::' in them have highest priority
2. priority if 'std::' is inside their name
3. sorting by keyword
4. sorting by name
"""
id, title, keyword, count = entry
hasStd1 = keyword.find("std::")
if hasStd1 == -1:
hasStd1 = 1
else:
hasStd1 = 0
hasStd2 = title.find("std::")
if hasStd2 == -1:
hasStd2 = 1
else:
hasStd2 = 0
return (hasStd1, hasStd2, keyword, title)
def _sort_search(entry, pattern):
""" Sort results
0. exact match goes first
1. sort by 'std::' (an entry with `std::` goes before an entry without)
2. sort by which position the keyword appears
"""
title, keyword, url = entry
if keyword == pattern:
# Exact match - lowest key value
return (-1, -1, 0, keyword)
hasStd1 = keyword.find("std::")
if hasStd1 == -1:
hasStd1 = 1
else:
hasStd1 = 0
hasStd2 = title.find("std::")
if hasStd2 == -1:
hasStd2 = 1
else:
hasStd2 = 0
return (hasStd1, hasStd2, keyword.find(pattern), keyword)
# Return the longest prefix of all list elements.
def _commonprefix(s1, s2):
"""" Given two strings, returns the longest common leading prefix """
if len(s1) > len(s2):
s1, s2 = s2, s1;
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def _removeprefix(string, prefix):
if prefix and string.startswith(prefix):
return string[len(prefix):]
return string
def _removesuffix(string, suffix):
if suffix and string.endswith(suffix):
return string[:-len(suffix)]
return string
class Cppman(Crawler):
""" Manage cpp man pages, indexes. """
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
def rebuild_index(self):
""" Rebuild index database from cplusplus.com and cppreference.com. """
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
try:
self.add_url_filter(r'\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
sources = [('cplusplus.com', 'https://cplusplus.com/reference/', None),
('cppreference.com', 'https://en.cppreference.com/w/cpp', '/w/cpp')]
for table, url, path in sources:
""" Drop and recreate tables. """
self.db_cursor.execute(
'DROP TABLE IF EXISTS "%s"'
% table)
self.db_cursor.execute(
'DROP TABLE IF EXISTS "%s_keywords"'
% table)
self.db_cursor.execute(
'CREATE TABLE "%s" ('
'id INTEGER NOT NULL PRIMARY KEY, '
'title VARCHAR(255) NOT NULL UNIQUE, '
'url VARCHAR(255) NOT NULL UNIQUE'
')' % table)
self.db_cursor.execute(
'CREATE TABLE "%s_keywords" ('
'id INTEGER NOT NULL, '
'keyword VARCHAR(255), '
'FOREIGN KEY(id) REFERENCES "%s"(id)'
')' % (table, table))
""" Crawl and insert all entries. """
self.results = collections.defaultdict(list)
self.crawl(url)
results = self._results_with_unique_title()
for title in results:
""" 1. insert title """
self.db_cursor.execute(
'INSERT INTO "%s" (title, url) VALUES (?, ?)'
% table, (title, results[title]["url"]))
lastRow = self.db_cursor.execute(
'SELECT last_insert_rowid()').fetchall()[0][0]
""" 2. insert all keywords """
for k in results[title]["keywords"]:
self.db_cursor.execute(
'INSERT INTO "%s_keywords" (id, keyword) '
'VALUES (?, ?)'
% table, (lastRow, k))
""" 3. add all aliases """
for title in results:
for (k, a) in results[title]["aliases"]:
""" search for combinations of words
e.g. std::basic_string::append
"""
sql_results = self.db_cursor.execute(
'SELECT id, keyword FROM "%s_keywords" '
'WHERE keyword LIKE "%%::%s::%%" '
'OR keyword LIKE "%s::%%" '
'OR keyword LIKE "%s" '
'OR keyword LIKE "%s %%" '
'OR keyword LIKE "%s)%%" '
'OR keyword LIKE "%s,%%"'
% (table, k, k, k, k, k, k)).fetchall()
for id, keyword in sql_results:
keyword = keyword.replace("%s" % k, "%s" % a)
self.db_cursor.execute(
'INSERT INTO "%s_keywords" (id, keyword) '
'VALUES (?, ?)'
% table, (id, keyword))
self.db_conn.commit()
""" remove duplicate keywords that link the same page """
self.db_cursor.execute(
'DELETE FROM "%s_keywords" WHERE rowid NOT IN ('
'SELECT min(rowid) FROM "%s_keywords" '
'GROUP BY id, keyword '
')' % (table, table)).fetchall()
""" give duplicate keywords with different links entry numbers """
results = self.db_cursor.execute(
'SELECT t3.id, t3.title, t2.keyword, t1.count '
'FROM ('
' SELECT keyword, COUNT(*) AS count FROM "%s_keywords" '
' GROUP BY keyword HAVING count > 1) AS t1 '
'JOIN "%s_keywords" AS t2 '
'JOIN "%s" AS t3 '
'WHERE t1.keyword = t2.keyword AND t3.id = t2.id '
'ORDER BY t2.keyword, t3.title'
% (table, table, table)).fetchall()
keywords = {}
results = sorted(results, key=_sort_crawl)
for id, title, keyword, count in results:
if not keyword in keywords:
keywords[keyword] = 0
keywords[keyword] += 1
new_keyword = "%s (%s)" % (keyword, keywords[keyword])
self.db_cursor.execute(
'UPDATE "%s_keywords" SET keyword=? WHERE '
'id=? AND keyword=?'
% table, (new_keyword, id, keyword))
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, url, content, depth):
"""callback to insert index"""
print("Indexing '%s' (depth %s)..." % (url, depth))
name = self._extract_name(content).replace('\n', '')
keywords = self._extract_keywords(content)
entry = {'url': url, 'keywords': set(), 'aliases': set()}
self.results[name].append(entry)
for n in self._parse_title(name):
""" add as keyword """
entry["keywords"].add(n)
""" add as keyword without std:: """
if n.find("std::") != -1:
entry["keywords"].add(n.replace('std::', ''))
""" add with all keywords variations """
for k in keywords:
""" add std:: to typedef if original type is in std namespace """
if n.find("std::") != -1 and k.find("std::") == -1:
k = "std::" + k;
entry["aliases"].add((n, k))
prefix = _commonprefix(n, k)
if len(prefix) > 2 and prefix[-2:] == "::":
""" Create names and keyword without prefixes """
new_name = n[len(prefix):]
new_key = k[len(prefix):]
entry["aliases"].add((new_name, new_key))
if k.find("std::") != -1:
entry["aliases"].add(
(n, k.replace('std::', '')))
return True
def _results_with_unique_title(self):
"""process crawling results and return title -> entry dictionary;
add part of the path to entries having the same title
"""
results = dict()
for title, entries in self.results.items():
if len(entries) == 1:
results[title] = entries[0]
else:
paths = [_removesuffix(urlparse(entry['url'])[2], '/') for entry in entries]
prefix = os.path.commonpath(paths)
if prefix:
prefix += '/'
suffix = '/' + os.path.basename(paths[0])
for path in paths:
if not path.endswith(suffix):
suffix = ''
break
for index, entry in enumerate(entries):
path = _removeprefix(paths[index], prefix)
path = _removesuffix(path, suffix)
results["{} ({})".format(title, unquote(path))] = entry
return results
def _extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<[hH]1[^>]*>(.+?)</[hH]1>', data, re.DOTALL).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return html.unescape(name)
def _parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr)
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def _parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
""" remove all template stuff """
title = re.sub(r" ?<[^>]+>", "", title)
m = re.match(
r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self._parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self._parse_expression(n)
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def _extract_keywords(self, text):
"""
extract aliases like std::string, template specializations like std::atomic_bool
and helper functions like std::is_same_v
"""
soup = BeautifulSoup(text, "lxml")
names = []
# search for typedef list
for x in soup.find_all('table'):
# just searching for "Type" is not enough, see std::is_same
p = x.find_previous_sibling('h3')
if p:
if p.get_text().strip() == "Member types":
continue
typedefTable = False
for tr in x.find_all('tr'):
tds = tr.find_all('td')
if len(tds) == 2:
if re.match(r"\s*Type\s*", tds[0].get_text()):
typedefTable = True
elif typedefTable:
res = re.search(r'^\s*(\S*)\s+.*$', tds[0].get_text())
if res and res.group(1):
names.append(res.group(1))
elif not typedefTable:
break
if typedefTable:
break
# search for "Helper variable template" list
for x in soup.find_all('h3'):
variableTemplateHeader = False
if x.find('span', id="Helper_variable_template"):
e = x.find_next_sibling()
while e.name == "":
e = e.find_next_sibling()
if e.name == "table":
for tr in e.find_all('tr'):
text = re.sub('\n', ' ', tr.get_text())
res = re.search(r'^.* (\S+)\s*=.*$', text)
if res:
names.append(res.group(1))
# search for "Helper types" list
for x in soup.find_all('h3'):
variableTemplateHeader = False
if x.find('span', id="Helper_types"):
e = x.find_next_sibling()
while e.name == "":
e = e.find_next_sibling()
if e.name == "table":
for tr in e.find_all('tr'):
text = re.sub('\n', ' ', tr.get_text())
res = re.search(r'^.* (\S+)\s*=.*$', text)
if res:
names.append(res.group(1))
return [html.unescape(n) for n in names]
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT title, url FROM "%s"' % source).fetchall()
for name, url in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(util.urlopen(url).read())
formatter = importlib.import_module(
'cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def _fetch_page_by_keyword(self, keyword):
""" fetches result for a keyword """
return self.cursor.execute(
'SELECT t1.title, t2.keyword, t1.url '
'FROM "%s" AS t1 '
'JOIN "%s_keywords" AS t2 '
'WHERE t1.id = t2.id AND t2.keyword '
'LIKE ? ORDER BY t2.keyword'
% (self.source, self.source), ['%%%s%%' % keyword]).fetchall()
def _search_keyword(self, pattern):
""" multiple fetches for each pattern """
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
self.cursor = conn.cursor()
self.source = environ.source
self.cursor.execute('PRAGMA case_sensitive_like=ON')
results = self._fetch_page_by_keyword("%s" % pattern)
results.extend(self._fetch_page_by_keyword("%s %%" % pattern))
results.extend(self._fetch_page_by_keyword("%% %s" % pattern))
results.extend(self._fetch_page_by_keyword("%% %s %%" % pattern))
results.extend(self._fetch_page_by_keyword("%s%%" % pattern))
if len(results) == 0:
results = self._fetch_page_by_keyword("%%%s%%" % pattern)
conn.close()
return sorted(list(set(results)), key=lambda e: _sort_search(e, pattern))
def man(self, pattern):
"""Call viewer.sh to view man page"""
results = self._search_keyword(pattern)
if len(results) == 0:
raise RuntimeError('No manual entry for %s ' % pattern)
page_name, keyword, url = results[0]
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, pattern)
return pid
def find(self, pattern):
"""Find pages in database."""
results = self._search_keyword(pattern)
pat = re.compile(r'(.*?)(%s)(.*?)( \(.*\))?$' %
re.escape(pattern), re.I)
if results:
for name, keyword, url in results:
if os.isatty(sys.stdout.fileno()):
keyword = pat.sub(
r'\1\033[1;31m\2\033[0m\3\033[1;33m\4\033[0m', keyword)
print("%s - %s" % (keyword, name))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
| 22,210 | Python | .py | 514 | 30.287938 | 92 | 0.50838 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,118 | tableparser.py | aitjcize_cppman/cppman/formatter/tableparser.py | # -*- coding: utf-8 -*-
#
# tableparser.py - format html from cplusplus.com to groff syntax
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import io
import platform
import re
NODE = re.compile(r'<\s*([^/]\w*)\s?(.*?)>(.*?)<\s*/\1.*?>', re.S)
ATTR = re.compile(r'\s*(\w+?)\s*=\s*([\'"])((?:\\.|(?!\2).)*)\2')
class Node(object):
def __init__(self, parent, name, attr_list, body):
self.parent = parent
self.name = name
self.body = body
self.attr = dict((x[0], x[2]) for x in ATTR.findall(attr_list))
if self.name in ['th', 'td']:
self.text = self.strip_tags(self.body)
self.children = []
else:
self.text = ''
self.children = [Node(self, *g) for g in NODE.findall(self.body)]
def __repr__(self):
return "<Node('%s')>" % self.name
def strip_tags(self, html):
if type(html) != str:
html = html.group(3)
return NODE.sub(self.strip_tags, html)
def traverse(self, depth=0):
print('%s%s: %s %s' % (' ' * depth, self.name, self.attr, self.text))
for c in self.children:
c.traverse(depth + 2)
def get_row_width(self):
total = 0
assert self.name == 'tr'
for c in self.children:
if 'colspan' in c.attr:
total += int(c.attr['colspan'])
else:
total += 1
return total
def scan_format(self, index=0, width=0, rowspan=None):
if rowspan is None:
rowspan = {}
format_str = ''
expand_char = 'x' if platform.system() != 'Darwin' else ''
if self.name in ['th', 'td']:
extend = ((width == 3 and index == 1) or
(width != 3 and width < 5 and index == width - 1))
if self.name == 'th':
format_str += 'c%s ' % (expand_char if extend else '')
else:
format_str += 'l%s ' % (expand_char if extend else '')
if 'colspan' in self.attr:
for i in range(int(self.attr['colspan']) - 1):
format_str += 's '
if 'rowspan' in self.attr and int(self.attr['rowspan']) > 1:
rowspan[index] = int(self.attr['rowspan']) - 1
if self.name == 'tr' and len(rowspan) > 0:
ci = 0
for i in range(width):
if i in rowspan:
format_str += '^ '
if rowspan[i] == 1:
del rowspan[i]
else:
rowspan[i] -= 1
else:
# There is a row span, but the current number of column is
# not enough. Pad empty node when this happens.
if ci >= len(self.children):
self.children.append(Node(self, 'td', '', ''))
format_str += self.children[ci].scan_format(i, width,
rowspan)
ci += 1
else:
if self.children and self.children[0].name == 'tr':
width = self.children[0].get_row_width()
for i, c in enumerate(self.children):
format_str += c.scan_format(i, width, rowspan)
if self.name == 'table':
format_str += '.\n'
elif self.name == 'tr':
format_str += '\n'
return format_str
def gen(self, fd, index=0, last=False, rowspan=None):
if rowspan is None:
rowspan = {}
if self.name == 'table':
fd.write('.TS\n')
fd.write('allbox tab(|);\n')
fd.write(self.scan_format())
elif self.name in ['th', 'td']:
fd.write('T{\n%s' % self.text)
if 'rowspan' in self.attr and int(self.attr['rowspan']) > 1:
rowspan[index] = int(self.attr['rowspan']) - 1
else:
fd.write(self.text)
if self.name == 'tr' and len(rowspan) > 0:
total = len(rowspan) + len(self.children)
ci = 0
for i in range(total):
if i in rowspan:
fd.write(r'\^%s' % ('|' if i < total - 1 else ''))
if rowspan[i] == 1:
del rowspan[i]
else:
rowspan[i] -= 1
else:
# There is a row span, but the current number of column is
# not enough. Pad empty node when this happens.
if ci >= len(self.children):
self.children.append(Node(self, 'td', '', ''))
self.children[ci].gen(fd, i, i == total - 1, rowspan)
ci += 1
else:
for i, c in enumerate(self.children):
c.gen(fd, i, i == len(self.children) - 1, rowspan)
if self.name == 'table':
fd.write('.TE\n')
fd.write('.sp\n.sp\n')
elif self.name == 'tr':
fd.write('\n')
elif self.name in ['th', 'td']:
fd.write('\nT}%s' % ('|' if not last else ''))
def parse_table(html):
root = Node(None, 'root', '', html)
fd = io.StringIO()
root.gen(fd)
return fd.getvalue()
| 6,099 | Python | .py | 148 | 29.72973 | 78 | 0.504222 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,119 | cppreference.py | aitjcize_cppman/cppman/formatter/cppreference.py | # -*- coding: utf-8 -*-
#
# formatter.py - format html from cplusplus.com to groff syntax
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import datetime
import re
import string
from functools import partial
from cppman.formatter.tableparser import parse_table
from cppman.util import fixupHTML, html2man, urlopen
def member_table_def(g):
tbl = parse_table('<table>%s</table>' % str(g.group(3)))
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
return '\n.IP "%s"\n%s\n%s\n' % (g.group(1), g.group(2), tbl)
def member_type_function(g):
if g.group(1).find("<a href=") == -1:
return ""
head = re.sub(r'<.*?>', '', g.group(1)).strip()
tail = ''
spectag = re.search(r'^(.*?)(\[(?:static|virtual)\])(.*)$', head)
if spectag:
head = spectag.group(1).strip() + ' ' + spectag.group(3).strip()
tail = ' ' + spectag.group(2)
cppvertag = re.search(
r'^(.*?)(\[(?:(?:since|until) )?C\+\+\d+\]\s*(,\s*)?)+$', head)
if cppvertag:
head = cppvertag.group(1).strip()
tail = ' ' + cppvertag.group(2)
if ',' in head:
head = ', '.join([x.strip() + ' (3)' for x in head.split(',')])
else:
head = head.strip() + ' (3)'
full = (head + tail).replace('"', '\\(dq')
return '\n.IP "%s"\n%s\n' % (full, g.group(2))
NAV_BAR_END = '<div class="t-navbar-sep">.?</div></div>'
# Format replacement RE list
# The '.SE' pseudo macro is described in the function: html2groff
rps = [
# Workaround: remove <p> in t-dcl
(r'<tr class="t-dcl">(.*?)</tr>',
lambda g: re.sub('<p/?>', '', g.group(1)), re.S),
# Header, Name
(r'<h1.*?>(.*?)</h1>',
r'\n.TH "{{name}}" 3 "%s" "cppreference.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n{{name}} {{shortdesc}}\n.SE\n' % datetime.date.today(),
re.S),
# Defined in header
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END + r'.*?'
r'Defined in header <code>(.*?)</code>(.*?)<tr class="t-dcl-sep">',
r'\n.SH "SYNOPSIS"\n#include \1\n.sp\n'
r'.nf\n\2\n.fi\n.SE\n'
r'\n.SH "DESCRIPTION"\n', re.S),
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<tr class="t-dcl-sep">',
r'\n.SH "SYNOPSIS"\n.nf\n\1\n.fi\n.SE\n'
r'\n.SH "DESCRIPTION"\n', re.S),
# <unordered_map>
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<table class="t-dsc-begin">',
r'\n.SH "DESCRIPTION"\n\1\n', re.S),
# access specifiers
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<h3',
r'\n.SH "DESCRIPTION"\n\1\n<h3', re.S),
(r'<td>\s*\([0-9]+\)\s*</td>', r'', 0),
# Section headers
(r'<div class="t-inherited">.*?<h2>.*?Inherited from\s*(.*?)\s*</h2>',
r'\n.SE\n.IEND\n.IBEGIN \1\n', re.S),
# Remove tags
(r'<span class="edit.*?">.*?</span> ?', r'', re.S),
(r'[edit]', r'', re.S),
(r'\[edit\]', r'', re.S),
(r'<div id="siteSub">.*?</div>', r'', 0),
(r'<div id="contentSub">.*?</div>', r'', 0),
(r'<table class="toc" id="toc"[^>]*>.*?</table>', r'', re.S),
(r'<h2[^>]*>.*?</h2>', r'', re.S),
(r'<div class="coliru-btn coliru-btn-run-init">.*?</div>', r'', re.S),
(r'<tr class="t-dsc-hitem">.*?</tr>', r'', re.S),
# C++11/14/17/20
(r'\(((?:since|until) C\+\+\d+)\)', r' [\1]', re.S),
(r'\((C\+\+\d+)\)', r' [\1]', re.S),
# Subsections
(r'<h5[^>]*>\s*(.*)</h5>', r'\n.SS "\1"\n', 0),
# Group t-lines
(r'<span></span>', r'', re.S),
(r'<span class="t-lines">(?:<span>.+?</span>.*)+</span>',
lambda x: re.sub(r'\s*</span><span>\s*', r', ', x.group(0)), re.S),
# Member type & function second col is group see basic_fstream for example
(r'<tr class="t-dsc">\s*?<td>((?:(?!</td>).)*?)</td>\s*?'
r'<td>((?:(?!</td>).)*?)<table[^>]*>((?:(?!</table>).)*?)</table>'
r'(?:(?!</td>).)*?</td>\s*?</tr>',
member_table_def, re.S),
# Section headers
(r'.*<h3>(.+?)</h3>', r'\n.SE\n.SH "\1"\n', 0),
# Member type & function
(r'<tr class="t-dsc">\n?<td>\s*(.*?)\n?</td>.*?<td>\s*(.*?)</td>.*?</tr>',
member_type_function, re.S),
# Parameters
(r'<tr class="t-par">.*?<td>\s*(.*?)\n?</td>.*?<td>.*?</td>.*?'
r'<td>\s*(.*?)</td>.*?</tr>',
r'\n.IP "\1"\n\2\n', re.S),
# 'ul' tag
(r'<ul>', r'\n.RS 2\n', 0),
(r'</ul>', r'\n.RE\n.sp\n', 0),
# 'li' tag
(r'<li>\s*(.+?)</li>', r'\n.IP \[bu] 3\n\1\n', re.S),
# 'pre' tag
(r'<pre[^>]*>(.+?)</pre\s*>', r'\n.in +2n\n.nf\n\1\n.fi\n.in\n', re.S),
# Footer
(r'<div class="printfooter">',
r'\n.SE\n.IEND\n.SH "REFERENCE"\n'
r'cppreference.com, 2015 - All rights reserved.', re.S),
# C++ version tag
(r'<div title="(C\+\+..)"[^>]*>', r'.sp\n\1\n', 0),
# Output
(r'<p>Output:\n?</p>', r'\n.sp\nOutput:\n', re.S),
# Paragraph
(r'<p>(.*?)</p>', r'\n\1\n.sp\n', re.S),
(r'<div class="t-li1">(.*?)</div>', r'\n\1\n.sp\n', re.S),
(r'<div class="t-li2">(.*?)</div>',
r'\n.RS\n\1\n.RE\n.sp\n', re.S),
# 'br' tag
(r'<br/>', r'\n.br\n', 0),
(r'\n.br\n.br\n', r'\n.sp\n', 0),
# 'dd' 'dt' tag
(r'<dt>(.+?)</dt>\s*<dd>(.+?)</dd>', r'\n.IP "\1"\n\2\n', re.S),
# Bold
(r'<strong>(.+?)</strong>', r'\n.B \1\n', 0),
# Any other tags
(r'<script[^>]*>[^<]*</script>', r'', 0),
(r'<.*?>', r'', re.S),
# Escape
(r'^#', r'\#', 0),
(r' ', ' ', 0),
(r'&#(\d+);', lambda g: chr(int(g.group(1))), 0),
# Misc
(r'<', r'<', 0),
(r'>', r'>', 0),
(r'"', r'"', 0),
(r'&', r'&', 0),
(r' ', r' ', 0),
(r'\\([^\^nE])', r'\\\\\1', 0),
(r'>/">', r'', 0),
(r'/">', r'', 0),
# Remove empty sections
(r'\n.SH (.+?)\n+.SE', r'', 0),
# Remove empty lines
(r'\n\s*\n+', r'\n', 0),
(r'\n\n+', r'\n', 0),
# Preserve \n" in EXAMPLE
(r'\\n', r'\\en', 0),
# Remove leading whitespace
(r'^\s+', r'', re.S),
# Trailing white-spaces
(r'\s+\n', r'\n', re.S),
# Remove extra whitespace and newline in .SH/SS/IP section
(r'.(SH|SS|IP) "\s*(.*?)\s*\n?"', r'.\1 "\2"', 0),
# Remove extra whitespace before .IP bullet
(r'(.IP \\\\\[bu\] 3)\n\s*(.*?)\n', r'\1\n\2\n', 0),
# Remove extra '\n' before C++ version Tag (don't do it in table)
(r'(?<!T{)\n\s*(\[(:?since|until) C\+\+\d+\])', r' \1', re.S)
]
def html2groff(data, name):
"""Convert HTML text from cppreference.com to Groff-formatted text."""
# Remove header and footer
try:
data = data[data.index('<div id="cpp-content-base">'):]
data = data[:data.index('<div class="printfooter">') + 25]
except ValueError:
pass
# Remove non-printable characters
data = ''.join([x for x in data if x in string.printable])
for table in re.findall(
r'<table class="(?:wikitable|dsctable)"[^>]*>.*?</table>',
data, re.S):
tbl = parse_table(table)
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
data = data.replace(table, tbl)
# Pre replace all
for rp in rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
# Remove non-printable characters
data = ''.join([x for x in data if x in string.printable])
# Upper case all section headers
for st in re.findall(r'.SH .*\n', data):
data = data.replace(st, st.upper())
# Add tags to member/inherited member functions
# e.g. insert -> vector::insert
#
# .SE is a pseudo macro I created which means 'SECTION END'
# The reason I use it is because I need a marker to know where section
# ends.
# re.findall find patterns which does not overlap, which means if I do
# this: secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SH', data, re.S)
# re.findall will skip the later .SH tag and thus skip the later section.
# To fix this, '.SE' is used to mark the end of the section so the next
# '.SH' can be find by re.findall
try:
idx = data.index('.IEND')
except ValueError:
idx = None
def add_header_multi(prefix, g):
if ',' in g.group(1):
res = ', '.join(['%s::%s' % (prefix, x.strip())
for x in g.group(1).split(',')])
else:
res = '%s::%s' % (prefix, g.group(1))
return '\n.IP "%s"' % res
if idx:
class_name = name
if class_name.startswith('std::'):
normalized_class_name = class_name[len('std::'):]
else:
normalized_class_name = class_name
class_member_content = data[:idx]
secs = re.findall(r'\.SH "(.+?)"(.+?)\.SE', class_member_content, re.S)
for sec, content in secs:
# Member functions
if (('MEMBER' in sec and
'NON-MEMBER' not in sec and
'INHERITED' not in sec and
'MEMBER TYPES' != sec) or
'CONSTANTS' == sec):
content2 = re.sub(r'\n\.IP "([^:]+?)"',
partial(add_header_multi, class_name),
content)
# Replace (constructor) (destructor)
content2 = re.sub(r'\(constructor\)', r'%s' %
normalized_class_name, content2)
content2 = re.sub(r'\(destructor\)', r'~%s' %
normalized_class_name, content2)
data = data.replace(content, content2)
blocks = re.findall(r'\.IBEGIN\s*(.+?)\s*\n(.+?)\.IEND', data, re.S)
for inherited_class, content in blocks:
content2 = re.sub(r'\.SH "(.+?)"', r'\n.SH "\1 INHERITED FROM %s"'
% inherited_class.upper(), content)
data = data.replace(content, content2)
secs = re.findall(r'\.SH "(.+?)"(.+?)\.SE', content, re.S)
for sec, content in secs:
# Inherited member functions
if 'MEMBER' in sec and \
sec != 'MEMBER TYPES':
content2 = re.sub(r'\n\.IP "(.+)"',
partial(add_header_multi, inherited_class),
content)
data = data.replace(content, content2)
# Remove unneeded pseudo macro
data = re.sub('(?:\n.SE|.IBEGIN.*?\n|\n.IEND)', '', data)
# Replace all macros
desc_re = re.search(r'.SH "DESCRIPTION"\n.*?([^\n\s].*?)\n', data)
shortdesc = ''
# not empty description
if desc_re and not desc_re.group(1).startswith('.SH'):
shortdesc = '- ' + desc_re.group(1)
def dereference(g):
d = dict(name=name, shortdesc=shortdesc)
if g.group(1) in d:
return d[g.group(1)]
data = re.sub('{{(.*?)}}', dereference, data)
return data
def func_test():
"""Test if there is major format changes in cplusplus.com"""
ifs = urlopen('http://en.cppreference.com/w/cpp/container/vector')
result = html2groff(fixupHTML(ifs.read()), 'std::vector')
assert '.SH "NAME"' in result
assert '.SH "SYNOPSIS"' in result
assert '.SH "DESCRIPTION"' in result
def test():
"""Simple Text"""
ifs = urlopen('http://en.cppreference.com/w/cpp/container/vector')
print(html2groff(fixupHTML(ifs.read()), 'std::vector'), end=' ')
# with open('test.html') as ifs:
# data = fixupHTML(ifs.read())
# print html2groff(data, 'std::vector'),
if __name__ == '__main__':
test()
| 12,313 | Python | .py | 297 | 34.754209 | 79 | 0.517287 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,120 | cplusplus.py | aitjcize_cppman/cppman/formatter/cplusplus.py | # -*- coding: utf-8 -*-
#
# formatter.py - format html from cplusplus.com to groff syntax
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import datetime
import re
from cppman.formatter.tableparser import parse_table
from cppman.util import fixupHTML, html2man, urlopen
# Format replacement RE list
# The '.SE' pseudo macro is described in the function: html2groff
pre_rps = [
# Snippet, ugly hack: we don't want to treat code listing as table
(r'<table class="snippet">(.*?)</table>',
r'\n.in +2n\n\1\n.in\n.sp\n', re.S),
]
rps = [
# Header, Name
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*'
r'<h1>(.*?)</h1>\s*<div class="C_prototype"[^>]*>'
r'(.*?)</div>\s*<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\3" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\3 - \5\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n#include \2\n.sp\n\4\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*'
r'<h1>(.*?)</h1>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\3" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\3 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n#include \2\n.sp\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \3\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*<div id="I_description"[^>]*>'
'(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div class="C_prototype"[^>]*>(.*?)</div>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n\3\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'<span alt="[^"]*?" class="C_ico cpp11warning"[^>]*>',
r' [C++11]', re.S),
# Remove empty #include
(r'#include \n.sp\n', r'', 0),
# Remove empty sections
(r'\n.SH (.+?)\n+.SE', r'', 0),
# Section headers
(r'.*<h3>(.+?)</h3>', r'\n.SE\n.SH "\1"\n', 0),
# 'ul' tag
(r'<ul>', r'\n.RS 2\n', 0),
(r'</ul>', r'\n.RE\n.sp\n', 0),
# 'li' tag
(r'<li>\s*(.+?)</li>', r'\n.IP \[bu] 3\n\1\n', re.S),
# 'pre' tag
(r'<pre[^>]*>(.+?)</pre\s*>', r'\n.nf\n\1\n.fi\n', re.S),
# Subsections
(r'<b>(.+?)</b>:<br/>', r'.SS \1\n', 0),
# Member functions / See Also table
# Without C++11 tag
(r'<dl class="links"><dt><a href="[^"]*"><b>([^ ]+?)</b></a></dt><dd>'
r'([^<]*?)<span class="typ">\s*\(([^<]*?)\n?\)</span></dd></dl>',
r'\n.IP "\1 (3)"\n\2 (\3)\n', re.S),
# With C++11 tag
(r'<dl class="links"><dt><a href="[^"]*"><b>([^ ]+?) <b class="C_cpp11" '
r'title="(.+?)"></b></b></a></dt><dd>'
r'([^<]*?)<span class="typ">\s*\((.*?)\n?\)</span></dd></dl>',
r'\n.IP "\1 (3) [\2]"\n\3 (\4)\n', re.S),
# Footer
(r'<div id="CH_bb">.*$',
r'\n.SE\n.SH "REFERENCE"\n'
r'cplusplus.com, 2000-2015 - All rights reserved.', re.S),
# C++ version tag
(r'<div.+?title="(C\+\+..)"[^>]*>', r'.sp\n\1\n', 0),
# 'br' tag
(r'<br/>', r'\n.br\n', 0),
(r'\n.br\n.br\n', r'\n.sp\n', 0),
# 'dd' 'dt' tag
(r'<dt>(.+?)</dt>\s*<dd>(.+?)</dd>', r'.IP "\1"\n\2\n', re.S),
# Bold
(r'<strong>(.+?)</strong>', r'\n.B \1\n', 0),
# Remove row number in EXAMPLE
(r'<td class="rownum">.*?</td>', r'', re.S),
# Any other tags
(r'<script[^>]*>[^<]*</script>', r'', 0),
(r'<.*?>', r'', re.S),
# Misc
(r'<', r'<', 0),
(r'>', r'>', 0),
(r'"', r'"', 0),
(r'&', r'&', 0),
(r' ', r' ', 0),
(r'\\([^\^nE])', r'\\\\\1', 0),
(r'>/">', r'', 0),
(r'/">', r'', 0),
# Remove empty lines
(r'\n\s*\n+', r'\n', 0),
(r'\n\n+', r'\n', 0),
# Preserve \n" in EXAMPLE
(r'\\n', r'\\en', 0),
]
def escape_pre_section(table):
"""Escape <pre> section in table."""
def replace_newline(g):
return g.group(1).replace('\n', '\n.br\n')
return re.sub('<pre.*?>(.*?)</pre>', replace_newline, table, flags=re.S)
def html2groff(data, name):
"""Convert HTML text from cplusplus.com to Groff-formatted text."""
# Remove sidebar
try:
data = data[data.index('<div class="C_doc">'):]
except ValueError:
pass
# Pre replace all
for rp in pre_rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
for table in re.findall(r'<table.*?>.*?</table>', data, re.S):
tbl = parse_table(escape_pre_section(table))
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
data = data.replace(table, tbl)
# Replace all
for rp in rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
# Upper case all section headers
for st in re.findall(r'.SH .*\n', data):
data = data.replace(st, st.upper())
# Add tags to member/inherited member functions
# e.g. insert -> vector::insert
#
# .SE is a pseudo macro I created which means 'SECTION END'
# The reason I use it is because I need a marker to know where section
# ends.
# re.findall find patterns which does not overlap, which means if I do
# this: secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SH', data, re.S)
# re.findall will skip the later .SH tag and thus skip the later section.
# To fix this, '.SE' is used to mark the end of the section so the next
# '.SH' can be find by re.findall
page_type = re.search(r'\n\.SH "TYPE"\n(.+?)\n', data)
if page_type and 'class' in page_type.group(1):
class_name = re.search(
r'\n\.SH "NAME"\n(?:.*::)?(.+?) ', data).group(1)
secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SE', data, re.S)
for sec, content in secs:
# Member functions
if ('MEMBER' in sec and
'NON-MEMBER' not in sec and
'INHERITED' not in sec and
sec != 'MEMBER TYPES'):
content2 = re.sub(r'\n\.IP "([^:]+?)"', r'\n.IP "%s::\1"'
% class_name, content)
# Replace (constructor) (destructor)
content2 = re.sub(r'\(constructor\)', r'%s' % class_name,
content2)
content2 = re.sub(r'\(destructor\)', r'~%s' % class_name,
content2)
data = data.replace(content, content2)
# Inherited member functions
elif 'MEMBER' in sec and 'INHERITED' in sec:
inherit = re.search(r'.+?INHERITED FROM (.+)',
sec).group(1).lower()
content2 = re.sub(r'\n\.IP "(.+)"', r'\n.IP "%s::\1"'
% inherit, content)
data = data.replace(content, content2)
# Remove pseudo macro '.SE'
data = data.replace('\n.SE', '')
return data
def func_test():
"""Test if there is major format changes in cplusplus.com"""
ifs = urlopen('http://www.cplusplus.com/printf')
result = html2groff(fixupHTML(ifs.read()), 'printf')
assert '.SH "NAME"' in result
assert '.SH "TYPE"' in result
assert '.SH "DESCRIPTION"' in result
def test():
"""Simple Text"""
ifs = urlopen('http://www.cplusplus.com/vector')
print(html2groff(fixupHTML(ifs.read()), 'std::vector'), end=' ')
# with open('test.html') as ifs:
# print html2groff(fixupHTML(ifs.read()), 'std::vector'),
if __name__ == '__main__':
test()
| 9,129 | Python | .py | 216 | 35.893519 | 78 | 0.519802 | aitjcize/cppman | 1,286 | 81 | 25 | GPL-3.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,121 | rebuildmo.py | cinemagoer_cinemagoer/rebuildmo.py | # Copyright 2022 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This script builds the .mo files, from the .po files.
"""
import glob
import os
import os.path
import sys
from subprocess import check_call
def rebuildmo():
created = []
locale_dir = os.path.join("imdb", "locale")
po_files = os.path.join(locale_dir, "imdbpy-*.po")
for po_file in sorted(glob.glob(po_files)):
lang = os.path.basename(po_file)[7:-3]
lang_dir = os.path.join(locale_dir, lang)
mo_dir = os.path.join(lang_dir, "LC_MESSAGES")
mo_file = os.path.join(mo_dir, "imdbpy.mo")
if os.path.exists(mo_file) and (os.stat(po_file).st_mtime < os.stat(mo_file).st_mtime):
continue
if not os.path.exists(mo_dir):
os.makedirs(mo_dir)
check_call([sys.executable, "msgfmt.py", "-o", mo_file, po_file])
created.append(lang)
return created
if __name__ == '__main__':
languages = rebuildmo()
if len(languages) > 0:
print('Created locale for: %s.' % ' '.join(languages))
| 1,745 | Python | .py | 43 | 36.55814 | 95 | 0.689858 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,122 | setup.py | cinemagoer_cinemagoer/setup.py | import glob
import sys
from subprocess import CalledProcessError, check_call
import setuptools
# version of the software from imdb/version.py
exec(compile(open('imdb/version.py').read(), 'imdb/version.py', 'exec'))
home_page = 'https://cinemagoer.github.io/'
long_desc = """Cinemagoer is a Python package useful to retrieve and
manage the data of the IMDb movie database about movies, people,
characters and companies.
Cinemagoer and its authors are not affiliated in any way to
Internet Movie Database Inc.; see the DISCLAIMER.txt file for
details about data licenses.
Platform-independent and written in Python 3
Cinemagoer package can be very easily used by programmers and developers
to provide access to the IMDb's data to their programs.
Some simple example scripts - useful for the end users - are included
in this package; other Cinemagoer-based programs are available at the
home page: %s
""" % home_page
dwnl_url = 'https://cinemagoer.github.io/downloads/'
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Environment :: Web Environment
Intended Audience :: Developers
Intended Audience :: End Users/Desktop
License :: OSI Approved :: GNU General Public License (GPL)
Natural Language :: English
Natural Language :: Italian
Natural Language :: Turkish
Programming Language :: Python
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.5
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Operating System :: OS Independent
Topic :: Database :: Front-Ends
Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries
Topic :: Software Development :: Libraries :: Python Modules
"""
keywords = ['imdb', 'movie', 'people', 'database', 'cinema', 'film', 'person',
'cast', 'actor', 'actress', 'director', 'sql', 'character',
'company', 'package', 'plain text data files',
'keywords', 'top250', 'bottom100', 'xml']
scripts = glob.glob('./bin/*.py')
params = {
# Meta-information.
'name': 'cinemagoer',
'version': __version__,
'description': 'Python package to access the IMDb\'s database',
'long_description': long_desc,
'author': 'Davide Alberani',
'author_email': '[email protected]',
'maintainer': 'Davide Alberani',
'maintainer_email': '[email protected]',
'license': 'GPL',
'platforms': 'any',
'keywords': keywords,
'classifiers': [_f for _f in classifiers.split("\n") if _f],
'url': home_page,
'project_urls': {
'Source': 'https://github.com/cinemagoer/cinemagoer',
},
'download_url': dwnl_url,
'scripts': scripts,
'package_data': {
# Here, the "*" represents any possible language ID.
'imdb.locale': [
'imdbpy.pot',
'imdbpy-*.po',
'*/LC_MESSAGES/imdbpy.mo',
],
},
'install_requires': ['SQLAlchemy', 'lxml'],
'extras_require': {
'dev': [
'flake8',
'flake8-isort',
'pytest',
'pytest-cov',
'tox',
],
'doc': [
'sphinx',
'sphinx_rtd_theme'
]
},
'packages': setuptools.find_packages(),
'entry_points': """
[console_scripts]
imdbpy=imdb.cli:main
"""
}
ERR_MSG = """
====================================================================
ERROR
=====
Aaargh! An error! An error!
Curse my metal body, I wasn't fast enough. It's all my fault!
Anyway, if you were trying to build a package or install Cinemagoer to your
system, looks like we're unable to fetch or install some dependencies.
The best solution is to resolve these dependencies (maybe you're
not connected to Internet?)
The caught exception, is re-raise below:
"""
def runRebuildmo():
"""Call the function to rebuild the locales."""
try:
check_call([sys.executable, "rebuildmo.py"])
except CalledProcessError as e:
print('ERROR: unable to rebuild .mo files; caught exception %s' % e)
def hasCommand():
"""Return true if at least one command is found on the command line."""
args = sys.argv[1:]
if '--help' in args:
return False
if '-h' in args:
return False
if 'clean' in args:
return False
for arg in args:
if arg and not arg.startswith('-'):
return True
return False
try:
if hasCommand():
runRebuildmo()
except SystemExit:
print(ERR_MSG)
setuptools.setup(**params)
| 4,791 | Python | .py | 137 | 30.321168 | 78 | 0.6586 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,123 | msgfmt.py | cinemagoer_cinemagoer/msgfmt.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Written by Martin v. Löwis <[email protected]>
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation.
Usage: msgfmt.py [OPTIONS] filename.po
Options:
-o file
--output-file=file
Specify the output file to write to. If omitted, output will go to a
file named filename.mo (based off the input file name).
-h
--help
Print this message and exit.
-V
--version
Display version information and exit.
"""
import os
import sys
import ast
import getopt
import struct
import array
from email.parser import HeaderParser
__version__ = "1.1"
MESSAGES = {}
def usage(code, msg=''):
sys.stderr.write(__doc__)
if msg:
sys.stderr.write(msg)
sys.exit(code)
def add(id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary."
global MESSAGES
if not fuzzy and str:
MESSAGES[id] = str
def generate():
"Return the generated output."
global MESSAGES
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
if sys.version_info < (3, 2):
output += array.array("i", offsets).tostring()
else:
output += array.array("i", offsets).tobytes()
output += ids
output += strs
return output
def make(filename, outfile):
# Clear "MESSAGES" to prevent translations to be mixed up
# when calling this function on multiple ".po" files.
global MESSAGES
MESSAGES = {}
ID = 1
STR = 2
# Compute .mo name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.mo'
try:
lines = open(infile, 'rb').readlines()
except IOError as msg:
print(msg)
sys.exit(1)
section = None
fuzzy = 0
# Start off assuming utf-8, so everything decodes without failure,
# until we know the exact encoding
encoding = 'utf-8'
# Parse the catalog
lno = 0
for l in lines:
l = l.decode(encoding)
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid') and not l.startswith('msgid_plural'):
if section == STR:
add(msgid, msgstr, fuzzy)
if not msgid:
# See whether there is an encoding declaration
p = HeaderParser()
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
if charset:
encoding = charset
section = ID
l = l[5:]
msgid = msgstr = b''
is_plural = False
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
print('msgid_plural not preceded by msgid on %s:%d' % (infile, lno))
sys.exit(1)
l = l[12:]
msgid += b'\0' # separator of singular and plural
is_plural = True
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
if l.startswith('msgstr['):
if not is_plural:
print('plural without msgid_plural on %s:%d' % (infile, lno))
sys.exit(1)
l = l.split(']', 1)[1]
if msgstr:
msgstr += b'\0' # Separator of the various plural forms
else:
if is_plural:
print('indexed msgstr required for plural on %s:%d' % (infile, lno))
sys.exit(1)
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
l = ast.literal_eval(l)
if section == ID:
msgid += l.encode(encoding)
elif section == STR:
msgstr += l.encode(encoding) if sys.version_info.major >= 3 else l
else:
print('Syntax error on %s:%d' % (infile, lno), \
'before:')
print(l)
sys.exit(1)
# Add last entry
if section == STR:
add(msgid, msgstr, fuzzy)
# Compute output
output = generate()
try:
open(outfile,"wb").write(output)
except IOError as msg:
print(msg)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hVo:',
['help', 'version', 'output-file='])
except getopt.error as msg:
usage(1, msg)
outfile = None
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print("msgfmt.py", __version__)
sys.exit(0)
elif opt in ('-o', '--output-file'):
outfile = arg
# do it
if not args:
print('No input file given')
print("Try `msgfmt --help' for more information.")
return
for filename in args:
make(filename, outfile)
if __name__ == '__main__':
main()
| 7,102 | Python | .py | 206 | 25.563107 | 89 | 0.550182 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,124 | generatepot.py | cinemagoer_cinemagoer/generatepot.py | # Copyright 2009 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This script generates the imdbpy.pot file, from the DTD.
"""
import re
import sys
from datetime import datetime as dt
DEFAULT_MESSAGES = {}
ELEMENT_PATTERN = r"""<!ELEMENT\s+([^\s]+)"""
re_element = re.compile(ELEMENT_PATTERN)
POT_HEADER_TEMPLATE = r"""# Gettext message file for imdbpy
msgid ""
msgstr ""
"Project-Id-Version: imdbpy\n"
"POT-Creation-Date: %(now)s\n"
"PO-Revision-Date: YYYY-MM-DD HH:MM+0000\n"
"Last-Translator: YOUR NAME <YOUR@EMAIL>\n"
"Language-Team: TEAM NAME <TEAM@EMAIL>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Language-Code: en\n"
"Language-Name: English\n"
"Preferred-Encodings: utf-8\n"
"Domain: imdbpy\n"
"""
if len(sys.argv) != 2:
print("Usage: %s dtd_file" % sys.argv[0])
sys.exit()
dtdfilename = sys.argv[1]
dtd = open(dtdfilename).read()
elements = re_element.findall(dtd)
uniq = set(elements)
elements = list(uniq)
print(POT_HEADER_TEMPLATE % {
'now': dt.strftime(dt.now(), "%Y-%m-%d %H:%M+0000")
})
for element in sorted(elements):
if element in DEFAULT_MESSAGES:
print('# Default: %s' % DEFAULT_MESSAGES[element])
else:
print('# Default: %s' % element.replace('-', ' ').capitalize())
print('msgid "%s"' % element)
print('msgstr ""')
# use this part instead of the line above to generate the po file for English
# if element in DEFAULT_MESSAGES:
# print 'msgstr "%s"' % DEFAULT_MESSAGES[element]
# else:
# print 'msgstr "%s"' % element.replace('-', ' ').capitalize()
print()
| 2,354 | Python | .py | 65 | 34.046154 | 81 | 0.709776 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,125 | get_top_bottom_movies.py | cinemagoer_cinemagoer/bin/get_top_bottom_movies.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_top_bottom_movies.py
Usage: get_top_bottom_movies
Return top and bottom 10 movies, by ratings.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 1:
print('No arguments are required.')
sys.exit(2)
i = imdb.IMDb()
top250 = i.get_top250_movies()
bottom100 = i.get_bottom100_movies()
for label, ml in [('top 10', top250[:10]), ('bottom 10', bottom100[:10])]:
print('')
print('%s movies' % label)
print('rating\tvotes\ttitle')
for movie in ml:
outl = '%s\t%s\t%s' % (movie.get('rating'), movie.get('votes'),
movie['long imdb title'])
print(outl)
| 809 | Python | .py | 28 | 24.571429 | 74 | 0.63342 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,126 | get_first_company.py | cinemagoer_cinemagoer/bin/get_first_company.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_first_company.py
Usage: get_first_company "company name"
Search for the given name and print the best matching result.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "company name"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
try:
# Do the search, and get the results (a list of company objects).
results = i.search_company(name)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not results:
print('No matches for "%s", sorry.' % name)
sys.exit(0)
# Print only the first result.
print(' Best match for "%s"' % name)
# This is a company instance.
company = results[0]
# So far the company object only contains basic information like the
# name; retrieve main information:
i.update(company)
print(company.summary())
| 1,114 | Python | .py | 38 | 26.552632 | 79 | 0.707823 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,127 | search_company.py | cinemagoer_cinemagoer/bin/search_company.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
search_company.py
Usage: search_company "company name"
Search for the given name and print the results.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You bad boy! You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "company name"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
try:
# Do the search, and get the results (a list of company objects).
results = i.search_company(name)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
# Print the results.
print(' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name))
print('companyID\t: imdbID : name')
# Print the long imdb name for every company.
for company in results:
outp = '%s\t\t: %s : %s' % (company.companyID, i.get_imdbID(company),
company['long imdb name'])
print(outp)
| 1,260 | Python | .py | 38 | 27.631579 | 79 | 0.626964 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,128 | get_company.py | cinemagoer_cinemagoer/bin/get_company.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_company.py
Usage: get_company "company_id"
Show some info about the company with the given company_id (e.g. '0071509'
for "Columbia Pictures [us]", using 'http' or 'mobile').
Notice that company_id, using 'sql', are not the same IDs used on the web.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "company_id"' % sys.argv[0])
sys.exit(2)
company_id = sys.argv[1]
i = imdb.IMDb()
try:
# Get a company object with the data about the company identified by
# the given company_id.
company = i.get_company(company_id)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not company:
print('It seems that there\'s no company with company_id "%s"' % company_id)
sys.exit(4)
# XXX: this is the easier way to print the main info about a company;
# calling the summary() method of a company object will returns a string
# with the main information about the company.
# Obviously it's not really meaningful if you want to know how
# to access the data stored in a company object, so look below; the
# commented lines show some ways to retrieve information from a
# company object.
print(company.summary())
| 1,472 | Python | .py | 41 | 33.243902 | 80 | 0.723749 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,129 | get_movie.py | cinemagoer_cinemagoer/bin/get_movie.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_movie.py
Usage: get_movie "movie_id"
Show some info about the movie with the given movie_id (e.g. '0133093'
for "The Matrix", using 'http' or 'mobile').
Notice that movie_id, using 'sql', are not the same IDs used on the web.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "movie_id"' % sys.argv[0])
sys.exit(2)
movie_id = sys.argv[1]
i = imdb.IMDb()
try:
# Get a Movie object with the data about the movie identified by
# the given movie_id.
movie = i.get_movie(movie_id)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not movie:
print('It seems that there\'s no movie with movie_id "%s"' % movie_id)
sys.exit(4)
# XXX: this is the easier way to print the main info about a movie;
# calling the summary() method of a Movie object will returns a string
# with the main information about the movie.
# Obviously it's not really meaningful if you want to know how
# to access the data stored in a Movie object, so look below; the
# commented lines show some ways to retrieve information from a
# Movie object.
print(movie.summary())
# Show some info about the movie.
# This is only a short example; you can get a longer summary using
# 'print movie.summary()' and the complete set of information looking for
# the output of the movie.keys() method.
#
# print '==== "%s" / movie_id: %s ====' % (movie['title'], movie_id)
# XXX: use the IMDb instance to get the IMDb web URL for the movie.
# imdbURL = i.get_imdbURL(movie)
# if imdbURL:
# print 'IMDb URL: %s' % imdbURL
#
# XXX: many keys return a list of values, like "genres".
# genres = movie.get('genres')
# if genres:
# print 'Genres: %s' % ' '.join(genres)
#
# XXX: even when only one value is present (e.g.: movie with only one
# director), fields that can be multiple are ALWAYS a list.
# Note that the 'name' variable is a Person object, but since its
# __str__() method returns a string with the name, we can use it
# directly, instead of name['name']
# director = movie.get('director')
# if director:
# print 'Director(s): ',
# for name in director:
# sys.stdout.write('%s ' % name)
# print ''
#
# XXX: notice that every name in the cast is a Person object, with a
# currentRole instance variable, which is a string for the played role.
# cast = movie.get('cast')
# if cast:
# print 'Cast: '
# cast = cast[:5]
# for name in cast:
# print ' %s (%s)' % (name['name'], name.currentRole)
# XXX: some information are not lists of strings or Person objects, but simple
# strings, like 'rating'.
# rating = movie.get('rating')
# if rating:
# print 'Rating: %s' % rating
# XXX: an example of how to use information sets; retrieve the "trivia"
# info set; check if it contains some data, select and print a
# random entry.
# import random
# i.update(movie, info=['trivia'])
# trivia = movie.get('trivia')
# if trivia:
# rand_trivia = trivia[random.randrange(len(trivia))]
# print 'Random trivia: %s' % rand_trivia
| 3,336 | Python | .py | 91 | 34.901099 | 79 | 0.685334 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,130 | search_person.py | cinemagoer_cinemagoer/bin/search_person.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
search_person.py
Usage: search_person "person name"
Search for the given name and print the results.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You bad boy! You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "person name"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
try:
# Do the search, and get the results (a list of Person objects).
results = i.search_person(name)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
# Print the results.
print(' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name))
print('personID\t: imdbID : name')
# Print the long imdb name for every person.
for person in results:
outp = '%s\t: %s : %s' % (person.personID, i.get_imdbID(person),
person['long imdb name'])
print(outp)
| 1,243 | Python | .py | 38 | 27.236842 | 79 | 0.624161 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,131 | get_first_movie.py | cinemagoer_cinemagoer/bin/get_first_movie.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_first_movie.py
Usage: get_first_movie "movie title"
Search for the given title and print the best matching result.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "movie title"' % sys.argv[0])
sys.exit(2)
title = sys.argv[1]
i = imdb.IMDb()
try:
# Do the search, and get the results (a list of Movie objects).
results = i.search_movie(title)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not results:
print('No matches for "%s", sorry.' % title)
sys.exit(0)
# Print only the first result.
print(' Best match for "%s"' % title)
# This is a Movie instance.
movie = results[0]
# So far the Movie object only contains basic information like the
# title and the year; retrieve main information:
i.update(movie)
print(movie.summary())
| 1,113 | Python | .py | 38 | 26.526316 | 79 | 0.704717 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,132 | imdbpy2sql.py | cinemagoer_cinemagoer/bin/imdbpy2sql.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
imdbpy2sql.py script.
This script puts the data of the plain text data files into a
SQL database.
Copyright 2005-2020 Davide Alberani <[email protected]>
2006 Giuseppe "Cowo" Corbelli <cowo --> lugbs.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import os
import sys
import getopt
import time
import re
import warnings
import operator
import dbm
from itertools import islice, chain
try:
from hashlib import md5
except ImportError:
from md5 import md5
from gzip import GzipFile
from imdb.parser.sql.dbschema import DB_SCHEMA, dropTables, createTables, createIndexes
from imdb.parser.sql import soundex
from imdb.utils import analyze_title, analyze_name, date_and_notes, \
build_name, build_title, normalizeName, normalizeTitle, _articles, \
build_company_name, analyze_company_name, canonicalTitle
from imdb._exceptions import IMDbParserError, IMDbError
from imdb.parser.sql.alchemyadapter import getDBTables, setConnection
HELP = """imdbpy2sql.py usage:
%s -d /directory/with/PlainTextDataFiles/ -u URI [-c /directory/for/CSV_files] [-i table,dbm] [--CSV-OPTIONS] [--COMPATIBILITY-OPTIONS]
# NOTE: URI is something along the line:
scheme://[user[:password]@]host[:port]/database[?parameters]
Examples:
mysql://user:password@host/database
postgres://user:password@host/database
sqlite:/tmp/imdb.db
sqlite:/C|/full/path/to/database
# NOTE: CSV mode (-c path):
A directory is used to store CSV files; on supported
database servers it should be really fast.
# NOTE: imdbIDs store/restore (-i method):
Valid options are 'table' (imdbIDs stored in a temporary
table of the database) or 'dbm' (imdbIDs stored on a dbm
file - this is the default if CSV is used).
# NOTE: --CSV-OPTIONS can be:
--csv-ext STRING files extension (.csv)
--csv-only-write exit after the CSV files are written.
--csv-only-load load an existing set of CSV files.
# NOTE: --COMPATIBILITY-OPTIONS can be one of:
--mysql-innodb insert data into a MySQL MyISAM db,
and then convert it to InnoDB.
--mysql-force-myisam force the creation of MyISAM tables.
--ms-sqlserver compatibility mode for Microsoft SQL Server
and SQL Express.
--sqlite-transactions uses transactions, to speed-up SQLite.
See README.sqldb for more information.
""" % sys.argv[0]
# Directory containing the IMDb's Plain Text Data Files.
IMDB_PTDF_DIR = None
# URI used to connect to the database.
URI = None
# List of tables of the database.
DB_TABLES = []
# Max allowed recursion, inserting data.
MAX_RECURSION = 10
# Method used to (re)store imdbIDs.
IMDBIDS_METHOD = None
# If set, this directory is used to output CSV files.
CSV_DIR = None
CSV_CURS = None
CSV_ONLY_WRITE = False
CSV_ONLY_LOAD = False
CSV_EXT = '.csv'
CSV_EOL = '\n'
CSV_DELIMITER = ','
CSV_QUOTE = '"'
CSV_ESCAPE = '"'
CSV_NULL = 'NULL'
CSV_QUOTEINT = False
CSV_LOAD_SQL = None
CSV_MYSQL = "LOAD DATA LOCAL INFILE '%(file)s' INTO TABLE `%(table)s` FIELDS TERMINATED BY '%(delimiter)s' ENCLOSED BY '%(quote)s' ESCAPED BY '%(escape)s' LINES TERMINATED BY '%(eol)s'"
CSV_PGSQL = "COPY %(table)s FROM '%(file)s' WITH DELIMITER AS '%(delimiter)s' NULL AS '%(null)s' QUOTE AS '%(quote)s' ESCAPE AS '%(escape)s' CSV"
CSV_DB2 = "CALL SYSPROC.ADMIN_CMD('LOAD FROM %(file)s OF del MODIFIED BY lobsinfile INSERT INTO %(table)s')"
# Temporary fix for old style titles.
# FIX_OLD_STYLE_TITLES = True
# Store custom queries specified on the command line.
CUSTOM_QUERIES = {}
# Allowed time specification, for custom queries.
ALLOWED_TIMES = ('BEGIN', 'BEFORE_DROP', 'BEFORE_CREATE', 'AFTER_CREATE',
'BEFORE_MOVIES', 'BEFORE_COMPANIES', 'BEFORE_CAST',
'BEFORE_RESTORE', 'BEFORE_INDEXES', 'END', 'BEFORE_MOVIES_TODB',
'AFTER_MOVIES_TODB', 'BEFORE_PERSONS_TODB',
'AFTER_PERSONS_TODB', 'BEFORE_SQLDATA_TODB',
'AFTER_SQLDATA_TODB', 'BEFORE_AKAMOVIES_TODB',
'AFTER_AKAMOVIES_TODB', 'BEFORE_CHARACTERS_TODB',
'AFTER_CHARACTERS_TODB', 'BEFORE_COMPANIES_TODB',
'AFTER_COMPANIES_TODB', 'BEFORE_EVERY_TODB',
'AFTER_EVERY_TODB', 'BEFORE_CSV_LOAD', 'BEFORE_CSV_TODB',
'AFTER_CSV_TODB')
# Shortcuts for some compatibility options.
MYSQLFORCEMYISAM_OPTS = ['-e',
'AFTER_CREATE:FOR_EVERY_TABLE:ALTER TABLE %(table)s ENGINE=MyISAM;']
MYSQLINNODB_OPTS = ['-e',
'AFTER_CREATE:FOR_EVERY_TABLE:ALTER TABLE %(table)s ENGINE=MyISAM;',
'-e',
'BEFORE_INDEXES:FOR_EVERY_TABLE:ALTER TABLE %(table)s ENGINE=InnoDB;']
SQLSERVER_OPTS = ['-e', 'BEFORE_MOVIES_TODB:SET IDENTITY_INSERT %(table)s ON;',
'-e', 'AFTER_MOVIES_TODB:SET IDENTITY_INSERT %(table)s OFF;',
'-e', 'BEFORE_PERSONS_TODB:SET IDENTITY_INSERT %(table)s ON;',
'-e', 'AFTER_PERSONS_TODB:SET IDENTITY_INSERT %(table)s OFF;',
'-e', 'BEFORE_COMPANIES_TODB:SET IDENTITY_INSERT %(table)s ON;',
'-e', 'AFTER_COMPANIES_TODB:SET IDENTITY_INSERT %(table)s OFF;',
'-e', 'BEFORE_CHARACTERS_TODB:SET IDENTITY_INSERT %(table)s ON;',
'-e', 'AFTER_CHARACTERS_TODB:SET IDENTITY_INSERT %(table)s OFF;',
'-e', 'BEFORE_AKAMOVIES_TODB:SET IDENTITY_INSERT %(table)s ON;',
'-e', 'AFTER_AKAMOVIES_TODB:SET IDENTITY_INSERT %(table)s OFF;']
SQLITE_OPTS = ['-e', 'BEGIN:PRAGMA synchronous = OFF;',
'-e', 'BEFORE_EVERY_TODB:BEGIN TRANSACTION;',
'-e', 'AFTER_EVERY_TODB:COMMIT;',
'-e', 'BEFORE_INDEXES:BEGIN TRANSACTION;',
'e', 'END:COMMIT;']
if '--mysql-innodb' in sys.argv[1:]:
sys.argv += MYSQLINNODB_OPTS
if '--mysql-force-myisam' in sys.argv[1:]:
sys.argv += MYSQLFORCEMYISAM_OPTS
if '--ms-sqlserver' in sys.argv[1:]:
sys.argv += SQLSERVER_OPTS
if '--sqlite-transactions' in sys.argv[1:]:
sys.argv += SQLITE_OPTS
# Manage arguments list.
try:
optlist, args = getopt.getopt(sys.argv[1:], 'u:d:e:c:i:h',
['uri=', 'data=', 'execute=',
'mysql-innodb', 'ms-sqlserver',
'sqlite-transactions',
'fix-old-style-titles',
'mysql-force-myisam',
'csv-only-write',
'csv-only-load',
'csv=', 'csv-ext=',
'imdbids=', 'help'])
except getopt.error as e:
print('Troubles with arguments.')
print(HELP)
sys.exit(2)
for opt in optlist:
if opt[0] in ('-d', '--data'):
IMDB_PTDF_DIR = opt[1]
elif opt[0] in ('-u', '--uri'):
URI = opt[1]
elif opt[0] in ('-c', '--csv'):
CSV_DIR = opt[1]
elif opt[0] == '--csv-ext':
CSV_EXT = opt[1]
elif opt[0] in ('-i', '--imdbids'):
IMDBIDS_METHOD = opt[1]
elif opt[0] in ('-e', '--execute'):
if opt[1].find(':') == -1:
print('WARNING: wrong command syntax: "%s"' % opt[1])
continue
when, cmd = opt[1].split(':', 1)
if when not in ALLOWED_TIMES:
print('WARNING: unknown time: "%s"' % when)
continue
if when == 'BEFORE_EVERY_TODB':
for nw in ('BEFORE_MOVIES_TODB', 'BEFORE_PERSONS_TODB',
'BEFORE_SQLDATA_TODB', 'BEFORE_AKAMOVIES_TODB',
'BEFORE_CHARACTERS_TODB', 'BEFORE_COMPANIES_TODB'):
CUSTOM_QUERIES.setdefault(nw, []).append(cmd)
elif when == 'AFTER_EVERY_TODB':
for nw in ('AFTER_MOVIES_TODB', 'AFTER_PERSONS_TODB',
'AFTER_SQLDATA_TODB', 'AFTER_AKAMOVIES_TODB',
'AFTER_CHARACTERS_TODB', 'AFTER_COMPANIES_TODB'):
CUSTOM_QUERIES.setdefault(nw, []).append(cmd)
else:
CUSTOM_QUERIES.setdefault(when, []).append(cmd)
elif opt[0] == '--fix-old-style-titles':
warnings.warn('The --fix-old-style-titles argument is obsolete.')
elif opt[0] == '--csv-only-write':
CSV_ONLY_WRITE = True
elif opt[0] == '--csv-only-load':
CSV_ONLY_LOAD = True
elif opt[0] in ('-h', '--help'):
print(HELP)
sys.exit(0)
if IMDB_PTDF_DIR is None:
print('You must supply the directory with the plain text data files')
print(HELP)
sys.exit(2)
if URI is None:
print('You must supply the URI for the database connection')
print(HELP)
sys.exit(2)
if IMDBIDS_METHOD not in (None, 'dbm', 'table'):
print('the method to (re)store imdbIDs must be one of "dbm" or "table"')
print(HELP)
sys.exit(2)
if (CSV_ONLY_WRITE or CSV_ONLY_LOAD) and not CSV_DIR:
print('You must specify the CSV directory with the -c argument')
print(HELP)
sys.exit(3)
# Some warnings and notices.
URIlower = URI.lower()
if URIlower.startswith('mysql'):
if '--mysql-force-myisam' in sys.argv[1:] and \
'--mysql-innodb' in sys.argv[1:]:
print('\nWARNING: there is no sense in mixing the --mysql-innodb and\n'
'--mysql-force-myisam command line options!\n')
elif '--mysql-innodb' in sys.argv[1:]:
print("\nNOTICE: you've specified the --mysql-innodb command line\n"
"option; you should do this ONLY IF your system uses InnoDB\n"
"tables or you really want to use InnoDB; if you're running\n"
"a MyISAM-based database, please omit any option; if you\n"
"want to force MyISAM usage on a InnoDB-based database,\n"
"try the --mysql-force-myisam command line option, instead.\n")
elif '--mysql-force-myisam' in sys.argv[1:]:
print("\nNOTICE: you've specified the --mysql-force-myisam command\n"
"line option; you should do this ONLY IF your system uses\n"
"InnoDB tables and you want to use MyISAM tables, instead.\n")
else:
print("\nNOTICE: IF you're using InnoDB tables, data insertion can\n"
"be very slow; you can switch to MyISAM tables - forcing it\n"
"with the --mysql-force-myisam option - OR use the\n"
"--mysql-innodb command line option, but DON'T USE these if\n"
"you're already working on MyISAM tables, because it will\n"
"force MySQL to use InnoDB, and performances will be poor.\n")
elif URIlower.startswith('mssql') and \
'--ms-sqlserver' not in sys.argv[1:]:
print("\nWARNING: you're using MS SQLServer without the --ms-sqlserver\n"
"command line option: if something goes wrong, try using it.\n")
elif URIlower.startswith('sqlite') and \
'--sqlite-transactions' not in sys.argv[1:]:
print("\nWARNING: you're using SQLite without the --sqlite-transactions\n"
"command line option: you'll have very poor performances! Try\n"
"using it.\n")
if ('--mysql-force-myisam' in sys.argv[1:] and
not URIlower.startswith('mysql')) or ('--mysql-innodb' in
sys.argv[1:] and not URIlower.startswith('mysql')) or ('--ms-sqlserver'
in sys.argv[1:] and not URIlower.startswith('mssql')) or \
('--sqlite-transactions' in sys.argv[1:] and
not URIlower.startswith('sqlite')):
print("\nWARNING: you've specified command line options that don't\n"
"belong to the database server you're using: proceed at your\n"
"own risk!\n")
if CSV_DIR:
if URIlower.startswith('mysql'):
CSV_LOAD_SQL = CSV_MYSQL
elif URIlower.startswith('postgres'):
CSV_LOAD_SQL = CSV_PGSQL
elif URIlower.startswith('ibm'):
CSV_LOAD_SQL = CSV_DB2
CSV_NULL = ''
else:
print("\nERROR: importing CSV files is not supported for this database")
if not CSV_ONLY_WRITE:
sys.exit(3)
DB_TABLES = getDBTables(URI)
for t in DB_TABLES:
globals()[t._imdbpyName] = t
#-----------------------
# CSV Handling.
class CSVCursor(object):
"""Emulate a cursor object, but instead it writes data to a set
of CSV files."""
def __init__(self, csvDir, csvExt=CSV_EXT, csvEOL=CSV_EOL,
delimeter=CSV_DELIMITER, quote=CSV_QUOTE, escape=CSV_ESCAPE,
null=CSV_NULL, quoteInteger=CSV_QUOTEINT):
"""Initialize a CSVCursor object; csvDir is the directory where the
CSV files will be stored."""
self.csvDir = csvDir
self.csvExt = csvExt
self.csvEOL = csvEOL
self.delimeter = delimeter
self.quote = quote
self.escape = escape
self.escaped = '%s%s' % (escape, quote)
self.null = null
self.quoteInteger = quoteInteger
self._fdPool = {}
self._lobFDPool = {}
self._counters = {}
def buildLine(self, items, tableToAddID=False, rawValues=(),
lobFD=None, lobFN=None):
"""Build a single text line for a set of information."""
# FIXME: there are too many special cases to handle, and that
# affects performances: management of LOB files, at least,
# must be moved away from here.
quote = self.quote
null = self.null
escaped = self.escaped
quoteInteger = self.quoteInteger
if not tableToAddID:
r = []
else:
_counters = self._counters
r = [_counters[tableToAddID]]
_counters[tableToAddID] += 1
r += list(items)
for idx, val in enumerate(r):
if val is None:
r[idx] = null
continue
if (not quoteInteger) and isinstance(val, int):
r[idx] = str(val)
continue
if lobFD and idx == 3:
continue
val = str(val)
if quote:
val = '%s%s%s' % (quote, val.replace(quote, escaped), quote)
r[idx] = val
# Add RawValue(s), if present.
rinsert = r.insert
if tableToAddID:
shift = 1
else:
shift = 0
for idx, item in rawValues:
rinsert(idx + shift, item)
if lobFD:
# XXX: totally tailored to suit person_info.info column!
val3 = r[3]
val3len = len(val3 or '') or -1
if val3len == -1:
val3off = 0
else:
val3off = lobFD.tell()
r[3] = '%s.%d.%d/' % (lobFN, val3off, val3len)
lobFD.write(val3)
# Build the line and add the end-of-line.
ret = '%s%s' % (self.delimeter.join(r), self.csvEOL)
ret = ret.encode('latin1', 'ignore')
return ret
def executemany(self, sqlstr, items):
"""Emulate the executemany method of a cursor, but writes the
data in a set of CSV files."""
# XXX: find a safer way to get the table/file name!
tName = sqlstr.split()[2]
lobFD = None
lobFN = None
doLOB = False
# XXX: ugly special case, to create the LOB file.
if URIlower.startswith('ibm') and tName == 'person_info':
doLOB = True
# Open the file descriptor or get it from the pool.
if tName in self._fdPool:
tFD = self._fdPool[tName]
lobFD = self._lobFDPool.get(tName)
lobFN = getattr(lobFD, 'name', None)
if lobFN:
lobFN = os.path.basename(lobFN)
else:
tFD = open(os.path.join(CSV_DIR, tName + self.csvExt), 'wb')
self._fdPool[tName] = tFD
if doLOB:
lobFN = '%s.lob' % tName
lobFD = open(os.path.join(CSV_DIR, lobFN), 'wb')
self._lobFDPool[tName] = lobFD
buildLine = self.buildLine
tableToAddID = False
if tName in ('cast_info', 'movie_info', 'person_info',
'movie_companies', 'movie_link', 'aka_name',
'complete_cast', 'movie_info_idx', 'movie_keyword'):
tableToAddID = tName
if tName not in self._counters:
self._counters[tName] = 1
# Identify if there are RawValue in the VALUES (...) portion of
# the query.
parIdx = sqlstr.rfind('(')
rawValues = []
vals = sqlstr[parIdx + 1:-1]
if parIdx != 0:
vals = sqlstr[parIdx + 1:-1]
for idx, item in enumerate(vals.split(', ')):
if item[0] in ('%', '?', ':'):
continue
rawValues.append((idx, item))
# Write these lines.
tFD.writelines(buildLine(i, tableToAddID=tableToAddID,
rawValues=rawValues, lobFD=lobFD, lobFN=lobFN)
for i in items)
# Flush to disk, so that no truncaded entries are ever left.
# XXX: is this a good idea?
tFD.flush()
def fileNames(self):
"""Return the list of file names."""
return [fd.name for fd in list(self._fdPool.values())]
def buildFakeFileNames(self):
"""Populate the self._fdPool dictionary with fake objects
taking file names from the content of the self.csvDir directory."""
class _FakeFD(object):
pass
for fname in os.listdir(self.csvDir):
if not fname.endswith(CSV_EXT):
continue
fpath = os.path.join(self.csvDir, fname)
if not os.path.isfile(fpath):
continue
fd = _FakeFD()
fd.name = fname
self._fdPool[fname[:-len(CSV_EXT)]] = fd
def close(self, tName):
"""Close a given table/file."""
if tName in self._fdPool:
self._fdPool[tName].close()
def closeAll(self):
"""Close all open file descriptors."""
for fd in list(self._fdPool.values()):
fd.close()
for fd in list(self._lobFDPool.values()):
fd.close()
def loadCSVFiles():
"""Load every CSV file into the database."""
CSV_REPL = {'quote': CSV_QUOTE, 'delimiter': CSV_DELIMITER,
'escape': CSV_ESCAPE, 'null': CSV_NULL, 'eol': CSV_EOL}
for fName in CSV_CURS.fileNames():
connectObject.commit()
tName = os.path.basename(fName[:-len(CSV_EXT)])
cfName = os.path.join(CSV_DIR, fName)
CSV_REPL['file'] = cfName
CSV_REPL['table'] = tName
sqlStr = CSV_LOAD_SQL % CSV_REPL
print(' * LOADING CSV FILE %s...' % cfName)
sys.stdout.flush()
executeCustomQueries('BEFORE_CSV_TODB')
try:
CURS.execute(sqlStr)
try:
res = CURS.fetchall()
if res:
print('LOADING OUTPUT:', res)
except:
pass
except Exception as e:
print('ERROR: unable to import CSV file %s: %s' % (cfName, str(e)))
continue
connectObject.commit()
executeCustomQueries('AFTER_CSV_TODB')
#-----------------------
conn = setConnection(URI, DB_TABLES)
if CSV_DIR:
# Go for a CSV ride...
CSV_CURS = CSVCursor(CSV_DIR)
# Extract exceptions to trap.
try:
OperationalError = conn.module.OperationalError
except AttributeError as e:
warnings.warn('Unable to import OperationalError; report this as a bug, '
'since it will mask important exceptions: %s' % e)
OperationalError = Exception
try:
IntegrityError = conn.module.IntegrityError
except AttributeError as e:
warnings.warn('Unable to import IntegrityError')
IntegrityError = Exception
connectObject = conn.getConnection()
# Cursor object.
CURS = connectObject.cursor()
# Name of the database and style of the parameters.
DB_NAME = conn.dbName
PARAM_STYLE = conn.paramstyle
def _get_imdbids_method():
"""Return the method to be used to (re)store
imdbIDs (one of 'dbm' or 'table')."""
if IMDBIDS_METHOD:
return IMDBIDS_METHOD
if CSV_DIR:
return 'dbm'
return 'table'
def tableName(table):
"""Return a string with the name of the table in the current db."""
return table.sqlmeta.table
def colName(table, column):
"""Return a string with the name of the column in the current db."""
if column == 'id':
return table.sqlmeta.idName
return table.sqlmeta.columns[column].dbName
class RawValue(object):
"""String-like objects to store raw SQL parameters, that are not
intended to be replaced with positional parameters, in the query."""
def __init__(self, s, v):
self.string = s
self.value = v
def __str__(self):
return self.string
def _makeConvNamed(cols):
"""Return a function to be used to convert a list of parameters
from positional style to named style (convert from a list of
tuples to a list of dictionaries."""
nrCols = len(cols)
def _converter(params):
for paramIndex, paramSet in enumerate(params):
d = {}
for i in range(nrCols):
d[cols[i]] = paramSet[i]
params[paramIndex] = d
return params
return _converter
def createSQLstr(table, cols, command='INSERT'):
"""Given a table and a list of columns returns a sql statement
useful to insert a set of data in the database.
Along with the string, also a function useful to convert parameters
from positional to named style is returned."""
sqlstr = '%s INTO %s ' % (command, tableName(table))
colNames = []
values = []
convCols = []
count = 1
def _valStr(s, index):
if DB_NAME in ('mysql', 'postgres'):
return '%s'
elif PARAM_STYLE == 'format':
return '%s'
elif PARAM_STYLE == 'qmark':
return '?'
elif PARAM_STYLE == 'numeric':
return ':%s' % index
elif PARAM_STYLE == 'named':
return ':%s' % s
elif PARAM_STYLE == 'pyformat':
return '%(' + s + ')s'
return '%s'
for col in cols:
if isinstance(col, RawValue):
colNames.append(colName(table, col.string))
values.append(str(col.value))
elif col == 'id':
colNames.append(table.sqlmeta.idName)
values.append(_valStr('id', count))
convCols.append(col)
count += 1
else:
colNames.append(colName(table, col))
values.append(_valStr(col, count))
convCols.append(col)
count += 1
sqlstr += '(%s) ' % ', '.join(colNames)
sqlstr += 'VALUES (%s)' % ', '.join(values)
if DB_NAME not in ('mysql', 'postgres') and \
PARAM_STYLE in ('named', 'pyformat'):
converter = _makeConvNamed(convCols)
else:
# Return the list itself.
converter = lambda x: x
return sqlstr, converter
def _(s, truncateAt=None):
"""Nicely print a string to sys.stdout, optionally
truncating it a the given char."""
if truncateAt is not None:
s = s[:truncateAt]
return s
if not hasattr(os, 'times'):
def times():
"""Fake times() function."""
return 0.0, 0.0, 0.0, 0.0, 0.0
os.times = times
# Show time consumed by the single function call.
CTIME = int(time.time())
BEGIN_TIME = CTIME
CTIMES = os.times()
BEGIN_TIMES = CTIMES
def _minSec(*t):
"""Return a tuple of (mins, secs, ...) - two for every item passed."""
l = []
for i in t:
l.extend(divmod(int(i), 60))
return tuple(l)
def t(s, sinceBegin=False):
"""Pretty-print timing information."""
global CTIME, CTIMES
nt = int(time.time())
ntimes = os.times()
if not sinceBegin:
ct = CTIME
cts = CTIMES
else:
ct = BEGIN_TIME
cts = BEGIN_TIMES
print('# TIME', s,
': %dmin, %dsec (wall) %dmin, %dsec (user) %dmin, %dsec (system)'
% _minSec(nt - ct, ntimes[0] - cts[0], ntimes[1] - cts[1]))
if not sinceBegin:
CTIME = nt
CTIMES = ntimes
def title_soundex(title):
"""Return the soundex code for the given title; the (optional) starting
article is pruned. It assumes to receive a title without year/imdbIndex
or kind indications, but just the title string, as the one in the
analyze_title(title)['title'] value."""
if not title:
return None
# Convert to canonical format.
title = canonicalTitle(title)
ts = title.split(', ')
# Strip the ending article, if any.
if ts[-1].lower() in _articles:
title = ', '.join(ts[:-1])
return soundex(title)
def name_soundexes(name, character=False):
"""Return three soundex codes for the given name; the name is assumed
to be in the 'surname, name' format, without the imdbIndex indication,
as the one in the analyze_name(name)['name'] value.
The first one is the soundex of the name in the canonical format.
The second is the soundex of the name in the normal format, if different
from the first one.
The third is the soundex of the surname, if different from the
other two values."""
if not name:
return None, None, None
s1 = soundex(name)
name_normal = normalizeName(name)
s2 = soundex(name_normal)
if s1 == s2:
s2 = None
if not character:
namesplit = name.split(', ')
s3 = soundex(namesplit[0])
else:
s3 = soundex(name.split(' ')[-1])
if s3 and s3 in (s1, s2):
s3 = None
return s1, s2, s3
# Tags to identify where the meaningful data begin/end in files.
MOVIES = 'movies.list.gz'
MOVIES_START = ('MOVIES LIST', '===========', '')
MOVIES_STOP = '--------------------------------------------------'
CAST_START = ('Name', '----')
CAST_STOP = '-----------------------------'
RAT_START = ('MOVIE RATINGS REPORT', '',
'New Distribution Votes Rank Title')
RAT_STOP = '\n'
RAT_TOP250_START = ('note: for this top 250', '', 'New Distribution')
RAT_BOT10_START = ('BOTTOM 10 MOVIES', '', 'New Distribution')
TOPBOT_STOP = '\n'
AKAT_START = ('AKA TITLES LIST', '=============', '', '', '')
AKAT_IT_START = ('AKA TITLES LIST ITALIAN', '=======================', '', '')
AKAT_DE_START = ('AKA TITLES LIST GERMAN', '======================', '')
AKAT_ISO_START = ('AKA TITLES LIST ISO', '===================', '')
AKAT_HU_START = ('AKA TITLES LIST HUNGARIAN', '=========================', '')
AKAT_NO_START = ('AKA TITLES LIST NORWEGIAN', '=========================', '')
AKAN_START = ('AKA NAMES LIST', '=============', '')
AV_START = ('ALTERNATE VERSIONS LIST', '=======================', '', '')
MINHASH_STOP = '-------------------------'
GOOFS_START = ('GOOFS LIST', '==========', '')
QUOTES_START = ('QUOTES LIST', '=============')
CC_START = ('CRAZY CREDITS', '=============')
BIO_START = ('BIOGRAPHY LIST', '==============')
BUS_START = ('BUSINESS LIST', '=============', '')
BUS_STOP = ' ====='
CER_START = ('CERTIFICATES LIST', '=================')
COL_START = ('COLOR INFO LIST', '===============')
COU_START = ('COUNTRIES LIST', '==============')
DIS_START = ('DISTRIBUTORS LIST', '=================', '')
GEN_START = ('8: THE GENRES LIST', '==================', '')
KEY_START = ('8: THE KEYWORDS LIST', '====================', '')
LAN_START = ('LANGUAGE LIST', '=============')
LOC_START = ('LOCATIONS LIST', '==============', '')
MIS_START = ('MISCELLANEOUS COMPANY LIST', '============================')
MIS_STOP = '--------------------------------------------------------------------------------'
PRO_START = ('PRODUCTION COMPANIES LIST', '=========================', '')
RUN_START = ('RUNNING TIMES LIST', '==================')
SOU_START = ('SOUND-MIX LIST', '==============')
SFX_START = ('SFXCO COMPANIES LIST', '====================', '')
TCN_START = ('TECHNICAL LIST', '==============', '', '')
LSD_START = ('LASERDISC LIST', '==============', '------------------------')
LIT_START = ('LITERATURE LIST', '===============', '')
LIT_STOP = 'COPYING POLICY'
LINK_START = ('MOVIE LINKS LIST', '================', '')
MPAA_START = ('MPAA RATINGS REASONS LIST', '=========================')
PLOT_START = ('PLOT SUMMARIES LIST', '===================', '')
RELDATE_START = ('RELEASE DATES LIST', '==================')
SNDT_START = ('SOUNDTRACKS', '=============', '', '', '')
TAGL_START = ('TAG LINES LIST', '==============', '', '')
TAGL_STOP = '-----------------------------------------'
TRIV_START = ('FILM TRIVIA', '===========', '')
COMPCAST_START = ('CAST COVERAGE TRACKING LIST', '===========================')
COMPCREW_START = ('CREW COVERAGE TRACKING LIST', '===========================')
COMP_STOP = '---------------'
GzipFileRL = GzipFile.readline
class SourceFile(GzipFile):
"""Instances of this class are used to read gzipped files,
starting from a defined line to a (optionally) given end."""
def __init__(self, filename=None, mode=None, start=(), stop=None,
pwarning=1, *args, **kwds):
filename = os.path.join(IMDB_PTDF_DIR, filename)
try:
GzipFile.__init__(self, filename, mode, *args, **kwds)
except IOError as e:
if not pwarning:
raise
print('WARNING WARNING WARNING')
print('WARNING unable to read the "%s" file.' % filename)
print('WARNING The file will be skipped, and the contained')
print('WARNING information will NOT be stored in the database.')
print('WARNING Complete error: ', e)
# re-raise the exception.
raise
self.start = start
for item in start:
itemlen = len(item)
for line in self:
line = line.decode('latin1')
if line[:itemlen] == item:
break
self.set_stop(stop)
def set_stop(self, stop):
if stop is not None:
self.stop = stop
self.stoplen = len(self.stop)
self.readline = self.readline_checkEnd
else:
self.readline = self.readline_NOcheckEnd
def readline_NOcheckEnd(self, size=-1):
line = GzipFile.readline(self, size)
return str(line, 'latin_1', 'ignore')
def readline_checkEnd(self, size=-1):
line = GzipFile.readline(self, size)
if self.stop is not None and line[:self.stoplen] == self.stop:
return ''
return str(line, 'latin_1', 'ignore')
def getByHashSections(self):
return getSectionHash(self)
def getByNMMVSections(self):
return getSectionNMMV(self)
def getSectionHash(fp):
"""Return sections separated by lines starting with #."""
curSectList = []
curSectListApp = curSectList.append
curTitle = ''
joiner = ''.join
for line in fp:
if line and line[0] == '#':
if curSectList and curTitle:
yield curTitle, joiner(curSectList)
curSectList[:] = []
curTitle = ''
curTitle = line[2:]
else:
curSectListApp(line)
if curSectList and curTitle:
yield curTitle, joiner(curSectList)
curSectList[:] = []
curTitle = ''
NMMVSections = dict([(x, None) for x in ('MV: ', 'NM: ', 'OT: ', 'MOVI')])
def getSectionNMMV(fp):
"""Return sections separated by lines starting with 'NM: ', 'MV: ',
'OT: ' or 'MOVI'."""
curSectList = []
curSectListApp = curSectList.append
curNMMV = ''
joiner = ''.join
for line in fp:
if line[:4] in NMMVSections:
if curSectList and curNMMV:
yield curNMMV, joiner(curSectList)
curSectList[:] = []
curNMMV = ''
if line[:4] == 'MOVI':
curNMMV = line[6:]
else:
curNMMV = line[4:]
elif not (line and line[0] == '-'):
curSectListApp(line)
if curSectList and curNMMV:
yield curNMMV, joiner(curSectList)
curSectList[:] = []
curNMMV = ''
def counter(initValue=1):
"""A counter implemented using a generator."""
i = initValue
while 1:
yield i
i += 1
class _BaseCache(dict):
"""Base class for Movie and Person basic information."""
def __init__(self, d=None, flushEvery=100000):
dict.__init__(self)
# Flush data into the SQL database every flushEvery entries.
self.flushEvery = flushEvery
self._tmpDict = {}
self._flushing = 0
self._deferredData = {}
self._recursionLevel = 0
self._table_name = ''
self._id_for_custom_q = ''
if d is not None:
for k, v in d.items():
self[k] = v
def __setitem__(self, key, counter):
"""Every time a key is set, its value is the counter;
every flushEvery, the temporary dictionary is
flushed to the database, and then zeroed."""
if counter % self.flushEvery == 0:
self.flush()
dict.__setitem__(self, key, counter)
if not self._flushing:
self._tmpDict[key] = counter
else:
self._deferredData[key] = counter
def flush(self, quiet=0, _recursionLevel=0):
"""Flush to the database."""
if self._flushing:
return
self._flushing = 1
if _recursionLevel >= MAX_RECURSION:
print('WARNING recursion level exceded trying to flush data')
print('WARNING this batch of data is lost (%s).' % self.className)
self._tmpDict.clear()
return
if self._tmpDict:
# Horrible hack to know if AFTER_%s_TODB has run.
_after_has_run = False
keys = {'table': self._table_name}
try:
executeCustomQueries('BEFORE_%s_TODB' % self._id_for_custom_q,
_keys=keys, _timeit=False)
self._toDB(quiet)
executeCustomQueries('AFTER_%s_TODB' % self._id_for_custom_q,
_keys=keys, _timeit=False)
_after_has_run = True
self._tmpDict.clear()
except OperationalError as e:
# XXX: I'm not sure this is the right thing (and way)
# to proceed.
if not _after_has_run:
executeCustomQueries('AFTER_%s_TODB' % self._id_for_custom_q,
_keys=keys, _timeit=False)
# Dataset too large; split it in two and retry.
# XXX: new code!
# the same class instance (self) is used, instead of
# creating two separated objects.
_recursionLevel += 1
self._flushing = 0
firstHalf = {}
poptmpd = self._tmpDict.popitem
originalLength = len(self._tmpDict)
for x in range(1, 1 + originalLength // 2):
k, v = poptmpd()
firstHalf[k] = v
self._secondHalf = self._tmpDict
self._tmpDict = firstHalf
print(' * TOO MANY DATA (%s items in %s), recursion: %s' %
(originalLength,
self.className,
_recursionLevel))
print(' * SPLITTING (run 1 of 2), recursion: %s' %
_recursionLevel)
self.flush(quiet=quiet, _recursionLevel=_recursionLevel)
self._tmpDict = self._secondHalf
print(' * SPLITTING (run 2 of 2), recursion: %s' %
_recursionLevel)
self.flush(quiet=quiet, _recursionLevel=_recursionLevel)
self._tmpDict.clear()
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print('WARNING: %s; unknown exception caught committing the data' % self.className)
print('WARNING: to the database; report this as a bug, since')
print('WARNING: many data (%d items) were lost: %s' %
(len(self._tmpDict), e))
self._flushing = 0
try:
connectObject.commit()
except:
pass
# Flush also deferred data.
if self._deferredData:
self._tmpDict = self._deferredData
self.flush(quiet=1)
self._deferredData = {}
connectObject.commit()
def populate(self):
"""Populate the dictionary from the database."""
raise NotImplementedError
def _toDB(self, quiet=0):
"""Write the dictionary to the database."""
raise NotImplementedError
def add(self, key, miscData=None):
"""Insert a new key and return its value."""
c = next(self.counter)
if miscData is not None:
for d_name, data in miscData:
getattr(self, d_name)[c] = data
self[key] = c
return c
def addUnique(self, key, miscData=None):
"""Insert a new key and return its value; if the key is already
in the dictionary, its previous value is returned."""
if key in self:
return self[key]
else:
return self.add(key, miscData)
def fetchsome(curs, size=20000):
"""Yes, I've read the Python Cookbook! :-)"""
while 1:
res = curs.fetchmany(size)
if not res:
break
for r in res:
yield r
class MoviesCache(_BaseCache):
"""Manage the movies list."""
className = 'MoviesCache'
counter = counter()
def __init__(self, *args, **kwds):
_BaseCache.__init__(self, *args, **kwds)
self.movieYear = {}
self._table_name = tableName(Title)
self._id_for_custom_q = 'MOVIES'
self.sqlstr, self.converter = createSQLstr(Title, ('id', 'title',
'imdbIndex', 'kindID', 'productionYear',
'imdbID', 'phoneticCode', 'episodeOfID',
'seasonNr', 'episodeNr', 'seriesYears',
'md5sum'))
def populate(self):
print(' * POPULATING %s...' % self.className)
titleTbl = tableName(Title)
movieidCol = colName(Title, 'id')
titleCol = colName(Title, 'title')
kindidCol = colName(Title, 'kindID')
yearCol = colName(Title, 'productionYear')
imdbindexCol = colName(Title, 'imdbIndex')
episodeofidCol = colName(Title, 'episodeOfID')
seasonNrCol = colName(Title, 'seasonNr')
episodeNrCol = colName(Title, 'episodeNr')
sqlPop = 'SELECT %s, %s, %s, %s, %s, %s, %s, %s FROM %s;' % \
(movieidCol, titleCol, kindidCol, yearCol, imdbindexCol,
episodeofidCol, seasonNrCol, episodeNrCol, titleTbl)
CURS.execute(sqlPop)
_oldcacheValues = Title.sqlmeta.cacheValues
Title.sqlmeta.cacheValues = False
for x in fetchsome(CURS, self.flushEvery):
mdict = {'title': x[1], 'kind': KIND_STRS[x[2]],
'year': x[3], 'imdbIndex': x[4]}
if mdict['imdbIndex'] is None:
del mdict['imdbIndex']
if mdict['year'] is None:
del mdict['year']
else:
mdict['year'] = str(mdict['year'])
episodeOfID = x[5]
if episodeOfID is not None:
s = Title.get(episodeOfID)
series_d = {'title': s.title,
'kind': str(KIND_STRS[s.kindID]),
'year': s.productionYear, 'imdbIndex': s.imdbIndex}
if series_d['imdbIndex'] is None:
del series_d['imdbIndex']
if series_d['year'] is None:
del series_d['year']
else:
series_d['year'] = str(series_d['year'])
mdict['episode of'] = series_d
title = build_title(mdict, ptdf=True)
dict.__setitem__(self, title, x[0])
self.counter = counter(Title.select().count() + 1)
Title.sqlmeta.cacheValues = _oldcacheValues
def _toDB(self, quiet=0):
if not quiet:
print(' * FLUSHING %s...' % self.className)
sys.stdout.flush()
l = []
lapp = l.append
for k, v in self._tmpDict.items():
try:
t = analyze_title(k)
except IMDbParserError:
if k and k.strip():
print('WARNING %s._toDB() invalid title:' % self.className, end=' ')
print(_(k))
continue
tget = t.get
episodeOf = None
kind = tget('kind')
if kind == 'episode':
# Series title.
stitle = build_title(tget('episode of'), ptdf=True)
episodeOf = self.addUnique(stitle)
del t['episode of']
year = self.movieYear.get(v)
if year is not None and year != '????':
try:
t['year'] = int(year)
except ValueError:
pass
elif kind in ('tv series', 'tv mini series'):
t['series years'] = self.movieYear.get(v)
title = tget('title')
soundex = title_soundex(title)
lapp((v, title, tget('imdbIndex'), KIND_IDS[kind],
tget('year'), None, soundex, episodeOf,
tget('season'), tget('episode'), tget('series years'),
md5(k.encode('latin1')).hexdigest()))
self._runCommand(l)
def _runCommand(self, dataList):
if not CSV_DIR:
CURS.executemany(self.sqlstr, self.converter(dataList))
else:
CSV_CURS.executemany(self.sqlstr, dataList)
def addUnique(self, key, miscData=None):
"""Insert a new key and return its value; if the key is already
in the dictionary, its previous value is returned."""
if key.endswith('{{SUSPENDED}}'):
return None
if key in self:
return self[key]
else:
return self.add(key, miscData)
class PersonsCache(_BaseCache):
"""Manage the persons list."""
className = 'PersonsCache'
counter = counter()
def __init__(self, *args, **kwds):
_BaseCache.__init__(self, *args, **kwds)
self.personGender = {}
self._table_name = tableName(Name)
self._id_for_custom_q = 'PERSONS'
self.sqlstr, self.converter = createSQLstr(Name, ['id', 'name',
'imdbIndex', 'imdbID', 'gender', 'namePcodeCf',
'namePcodeNf', 'surnamePcode', 'md5sum'])
def populate(self):
print(' * POPULATING PersonsCache...')
nameTbl = tableName(Name)
personidCol = colName(Name, 'id')
nameCol = colName(Name, 'name')
imdbindexCol = colName(Name, 'imdbIndex')
CURS.execute('SELECT %s, %s, %s FROM %s;' % (personidCol, nameCol,
imdbindexCol, nameTbl))
_oldcacheValues = Name.sqlmeta.cacheValues
Name.sqlmeta.cacheValues = False
for x in fetchsome(CURS, self.flushEvery):
nd = {'name': x[1]}
if x[2]:
nd['imdbIndex'] = x[2]
name = build_name(nd)
dict.__setitem__(self, name, x[0])
self.counter = counter(Name.select().count() + 1)
Name.sqlmeta.cacheValues = _oldcacheValues
def _toDB(self, quiet=0):
if not quiet:
print(' * FLUSHING PersonsCache...')
sys.stdout.flush()
l = []
lapp = l.append
for k, v in self._tmpDict.items():
try:
t = analyze_name(k)
except IMDbParserError:
if k and k.strip():
print('WARNING PersonsCache._toDB() invalid name:', _(k))
continue
tget = t.get
name = tget('name')
namePcodeCf, namePcodeNf, surnamePcode = name_soundexes(name)
gender = self.personGender.get(v)
lapp((v, name, tget('imdbIndex'), None, gender,
namePcodeCf, namePcodeNf, surnamePcode,
md5(k.encode('latin1')).hexdigest()))
if not CSV_DIR:
CURS.executemany(self.sqlstr, self.converter(l))
else:
CSV_CURS.executemany(self.sqlstr, l)
class CharactersCache(_BaseCache):
"""Manage the characters list."""
counter = counter()
className = 'CharactersCache'
def __init__(self, *args, **kwds):
_BaseCache.__init__(self, *args, **kwds)
self._table_name = tableName(CharName)
self._id_for_custom_q = 'CHARACTERS'
self.sqlstr, self.converter = createSQLstr(CharName, ['id', 'name',
'imdbIndex', 'imdbID', 'namePcodeNf',
'surnamePcode', 'md5sum'])
def populate(self):
print(' * POPULATING CharactersCache...')
nameTbl = tableName(CharName)
personidCol = colName(CharName, 'id')
nameCol = colName(CharName, 'name')
imdbindexCol = colName(CharName, 'imdbIndex')
CURS.execute('SELECT %s, %s, %s FROM %s;' % (personidCol, nameCol,
imdbindexCol, nameTbl))
_oldcacheValues = CharName.sqlmeta.cacheValues
CharName.sqlmeta.cacheValues = False
for x in fetchsome(CURS, self.flushEvery):
nd = {'name': x[1]}
if x[2]:
nd['imdbIndex'] = x[2]
name = build_name(nd)
dict.__setitem__(self, name, x[0])
self.counter = counter(CharName.select().count() + 1)
CharName.sqlmeta.cacheValues = _oldcacheValues
def _toDB(self, quiet=0):
if not quiet:
print(' * FLUSHING CharactersCache...')
sys.stdout.flush()
l = []
lapp = l.append
for k, v in self._tmpDict.items():
try:
t = analyze_name(k)
except IMDbParserError:
if k and k.strip():
print('WARNING CharactersCache._toDB() invalid name:', _(k))
continue
tget = t.get
name = tget('name')
namePcodeCf, namePcodeNf, surnamePcode = name_soundexes(name,
character=True)
lapp((v, name, tget('imdbIndex'), None,
namePcodeCf, surnamePcode, md5(k.encode('latin1')).hexdigest()))
if not CSV_DIR:
CURS.executemany(self.sqlstr, self.converter(l))
else:
CSV_CURS.executemany(self.sqlstr, l)
class CompaniesCache(_BaseCache):
"""Manage the companies list."""
counter = counter()
className = 'CompaniesCache'
def __init__(self, *args, **kwds):
_BaseCache.__init__(self, *args, **kwds)
self._table_name = tableName(CompanyName)
self._id_for_custom_q = 'COMPANIES'
self.sqlstr, self.converter = createSQLstr(CompanyName, ['id', 'name',
'countryCode', 'imdbID', 'namePcodeNf',
'namePcodeSf', 'md5sum'])
def populate(self):
print(' * POPULATING CharactersCache...')
nameTbl = tableName(CompanyName)
companyidCol = colName(CompanyName, 'id')
nameCol = colName(CompanyName, 'name')
countryCodeCol = colName(CompanyName, 'countryCode')
CURS.execute('SELECT %s, %s, %s FROM %s;' % (companyidCol, nameCol,
countryCodeCol, nameTbl))
_oldcacheValues = CompanyName.sqlmeta.cacheValues
CompanyName.sqlmeta.cacheValues = False
for x in fetchsome(CURS, self.flushEvery):
nd = {'name': x[1]}
if x[2]:
nd['country'] = x[2]
name = build_company_name(nd)
dict.__setitem__(self, name, x[0])
self.counter = counter(CompanyName.select().count() + 1)
CompanyName.sqlmeta.cacheValues = _oldcacheValues
def _toDB(self, quiet=0):
if not quiet:
print(' * FLUSHING CompaniesCache...')
sys.stdout.flush()
l = []
lapp = l.append
for k, v in self._tmpDict.items():
try:
t = analyze_company_name(k)
except IMDbParserError:
if k and k.strip():
print('WARNING CompaniesCache._toDB() invalid name:', _(k))
continue
tget = t.get
name = tget('name')
namePcodeNf = soundex(name)
namePcodeSf = None
country = tget('country')
if k != name:
namePcodeSf = soundex(k)
lapp((v, name, country, None, namePcodeNf, namePcodeSf,
md5(k.encode('latin1')).hexdigest()))
if not CSV_DIR:
CURS.executemany(self.sqlstr, self.converter(l))
else:
CSV_CURS.executemany(self.sqlstr, l)
class KeywordsCache(_BaseCache):
"""Manage the list of keywords."""
counter = counter()
className = 'KeywordsCache'
def __init__(self, *args, **kwds):
_BaseCache.__init__(self, *args, **kwds)
self._table_name = tableName(CompanyName)
self._id_for_custom_q = 'KEYWORDS'
self.flushEvery = 10000
self.sqlstr, self.converter = createSQLstr(Keyword, ['id', 'keyword',
'phoneticCode'])
def populate(self):
print(' * POPULATING KeywordsCache...')
nameTbl = tableName(CompanyName)
keywordidCol = colName(Keyword, 'id')
keyCol = colName(Keyword, 'name')
CURS.execute('SELECT %s, %s FROM %s;' % (keywordidCol, keyCol,
nameTbl))
_oldcacheValues = Keyword.sqlmeta.cacheValues
Keyword.sqlmeta.cacheValues = False
for x in fetchsome(CURS, self.flushEvery):
dict.__setitem__(self, x[1], x[0])
self.counter = counter(Keyword.select().count() + 1)
Keyword.sqlmeta.cacheValues = _oldcacheValues
def _toDB(self, quiet=0):
if not quiet:
print(' * FLUSHING KeywordsCache...')
sys.stdout.flush()
l = []
lapp = l.append
for k, v in self._tmpDict.items():
keySoundex = soundex(k)
lapp((v, k, keySoundex))
if not CSV_DIR:
CURS.executemany(self.sqlstr, self.converter(l))
else:
CSV_CURS.executemany(self.sqlstr, l)
class SQLData(dict):
"""Variable set of information, to be stored from time to time
to the SQL database."""
def __init__(self, table=None, cols=None, sqlString='', converter=None,
d={}, flushEvery=20000, counterInit=1):
if not sqlString:
if not (table and cols):
raise TypeError('"table" or "cols" unspecified')
sqlString, converter = createSQLstr(table, cols)
elif converter is None:
raise TypeError('"sqlString" or "converter" unspecified')
dict.__init__(self)
self.counterInit = counterInit
self.counter = counterInit
self.flushEvery = flushEvery
self.sqlString = sqlString
self.converter = converter
self._recursionLevel = 1
self._table = table
self._table_name = tableName(table)
for k, v in list(d.items()):
self[k] = v
def __setitem__(self, key, value):
"""The value is discarded, the counter is used as the 'real' key
and the user's 'key' is used as its values."""
counter = self.counter
if counter % self.flushEvery == 0:
self.flush()
dict.__setitem__(self, counter, key)
self.counter += 1
def add(self, key):
self[key] = None
def flush(self, _resetRecursion=1):
if not self:
return
# XXX: it's safer to flush MoviesCache and PersonsCache, to preserve consistency
CACHE_MID.flush(quiet=1)
CACHE_PID.flush(quiet=1)
if _resetRecursion:
self._recursionLevel = 1
if self._recursionLevel >= MAX_RECURSION:
print('WARNING recursion level exceded trying to flush data')
print('WARNING this batch of data is lost.')
self.clear()
self.counter = self.counterInit
return
keys = {'table': self._table_name}
_after_has_run = False
try:
executeCustomQueries('BEFORE_SQLDATA_TODB', _keys=keys,
_timeit=False)
self._toDB()
executeCustomQueries('AFTER_SQLDATA_TODB', _keys=keys,
_timeit=False)
_after_has_run = True
self.clear()
self.counter = self.counterInit
except OperationalError as e:
if not _after_has_run:
executeCustomQueries('AFTER_SQLDATA_TODB', _keys=keys,
_timeit=False)
print(' * TOO MANY DATA (%s items), SPLITTING (run #%d)...' %
(len(self), self._recursionLevel))
self._recursionLevel += 1
newdata = self.__class__(table=self._table,
sqlString=self.sqlString,
converter=self.converter)
newdata._recursionLevel = self._recursionLevel
newflushEvery = self.flushEvery // 2
if newflushEvery < 1:
print('WARNING recursion level exceded trying to flush data')
print('WARNING this batch of data is lost.')
self.clear()
self.counter = self.counterInit
return
self.flushEvery = newflushEvery
newdata.flushEvery = newflushEvery
popitem = self.popitem
dsi = dict.__setitem__
for x in range(len(self) // 2):
k, v = popitem()
dsi(newdata, k, v)
newdata.flush(_resetRecursion=0)
del newdata
self.flush(_resetRecursion=0)
self.clear()
self.counter = self.counterInit
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print('WARNING: SQLData; unknown exception caught committing the data')
print('WARNING: to the database; report this as a bug, since')
print('WARNING: many data (%d items) were lost: %s' %
(len(self), e))
connectObject.commit()
def _toDB(self):
print(' * FLUSHING SQLData...')
if not CSV_DIR:
CURS.executemany(self.sqlString, self.converter(list(self.values())))
else:
CSV_CURS.executemany(self.sqlString, list(self.values()))
# Miscellaneous functions.
def unpack(line, headers, sep='\t'):
"""Given a line, split at seps and return a dictionary with key
from the header list.
E.g.:
line = ' 0000000124 8805 8.4 Incredibles, The (2004)'
header = ('votes distribution', 'votes', 'rating', 'title')
seps=(' ',)
will returns: {'votes distribution': '0000000124', 'votes': '8805',
'rating': '8.4', 'title': 'Incredibles, The (2004)'}
"""
r = {}
ls1 = [_f for _f in line.split(sep) if _f]
for index, item in enumerate(ls1):
try:
name = headers[index]
except IndexError:
name = 'item%s' % index
r[name] = item.strip()
return r
def _parseMinusList(fdata):
"""Parse a list of lines starting with '- '."""
rlist = []
tmplist = []
for line in fdata:
if line and line[:2] == '- ':
if tmplist:
rlist.append(' '.join(tmplist))
l = line[2:].strip()
if l:
tmplist[:] = [l]
else:
tmplist[:] = []
else:
l = line.strip()
if l:
tmplist.append(l)
if tmplist:
rlist.append(' '.join(tmplist))
return rlist
def _parseColonList(lines, replaceKeys):
"""Parser for lists with "TAG: value" strings."""
out = {}
for line in lines:
line = line.strip()
if not line:
continue
cols = line.split(':', 1)
if len(cols) < 2:
continue
k = cols[0]
k = replaceKeys.get(k, k)
v = ' '.join(cols[1:]).strip()
if k not in out:
out[k] = []
out[k].append(v)
return out
# Functions used to manage data files.
def readMovieList():
"""Read the movies.list.gz file."""
try:
mdbf = SourceFile(MOVIES, start=MOVIES_START, stop=MOVIES_STOP)
except IOError:
return
count = 0
for line in mdbf:
line_d = unpack(line, ('title', 'year'))
title = line_d['title']
yearData = None
# Collect 'year' column for tv "series years" and episodes' year.
if title[0] == '"':
yearData = [('movieYear', line_d['year'])]
mid = CACHE_MID.addUnique(title, yearData)
if mid is None:
continue
if count % 10000 == 0:
print('SCANNING movies:', _(title), end=' ')
print('(movieID: %s)' % mid)
count += 1
CACHE_MID.flush()
CACHE_MID.movieYear.clear()
mdbf.close()
def doCast(fp, roleid, rolename):
"""Populate the cast table."""
pid = None
count = 0
name = ''
roleidVal = RawValue('roleID', roleid)
sqldata = SQLData(table=CastInfo, cols=['personID', 'movieID',
'personRoleID', 'note', 'nrOrder', roleidVal])
if rolename == 'miscellaneous crew':
sqldata.flushEvery = 10000
for line in fp:
if line and line[0] != '\t':
if line[0] == '\n':
continue
sl = [_f for _f in line.split('\t') if _f]
if len(sl) != 2:
continue
name, line = sl
miscData = None
if rolename == 'actor':
miscData = [('personGender', 'm')]
elif rolename == 'actress':
miscData = [('personGender', 'f')]
pid = CACHE_PID.addUnique(name.strip(), miscData)
line = line.strip()
ll = line.split(' ')
title = ll[0]
note = None
role = None
order = None
for item in ll[1:]:
if not item:
continue
if item[0] == '[':
# Quite inefficient, but there are some very strange
# cases of garbage in the plain text data files to handle...
role = item[1:]
if role[-1:] == ']':
role = role[:-1]
if role[-1:] == ')':
nidx = role.find('(')
if nidx != -1:
note = role[nidx:]
role = role[:nidx].rstrip()
if not role:
role = None
elif item[0] == '(':
if note is None:
note = item
else:
note = '%s %s' % (note, item)
elif item[0] == '<':
textor = item[1:-1]
try:
order = int(textor)
except ValueError:
os = textor.split(',')
if len(os) == 3:
try:
order = ((int(os[2]) - 1) * 1000) + \
((int(os[1]) - 1) * 100) + (int(os[0]) - 1)
except ValueError:
pass
movieid = CACHE_MID.addUnique(title)
if movieid is None:
continue
if role is not None:
roles = [_f for _f in [x.strip() for x in role.split('/')] if _f]
for role in roles:
cid = CACHE_CID.addUnique(role)
sqldata.add((pid, movieid, cid, note, order))
else:
sqldata.add((pid, movieid, None, note, order))
if count % 10000 == 0:
print('SCANNING %s:' % rolename, end=' ')
print(_(name))
count += 1
sqldata.flush()
CACHE_PID.flush()
CACHE_PID.personGender.clear()
CACHE_CID.flush()
print('CLOSING %s...' % rolename)
def castLists():
"""Read files listed in the 'role' column of the 'roletypes' table."""
rt = [(x.id, x.role) for x in RoleType.select()]
for roleid, rolename in rt:
if rolename == 'guest':
continue
fname = rolename
fname = fname.replace(' ', '-')
if fname == 'actress':
fname = 'actresses.list.gz'
elif fname == 'miscellaneous-crew':
fname = 'miscellaneous.list.gz'
else:
fname = fname + 's.list.gz'
print('DOING', fname)
try:
f = SourceFile(fname, start=CAST_START, stop=CAST_STOP)
except IOError:
if rolename == 'actress':
CACHE_CID.flush()
if not CSV_DIR:
CACHE_CID.clear()
continue
doCast(f, roleid, rolename)
f.close()
if rolename == 'actress':
CACHE_CID.flush()
if not CSV_DIR:
CACHE_CID.clear()
t('castLists(%s)' % rolename)
def doAkaNames():
"""People's akas."""
pid = None
count = 0
try:
fp = SourceFile('aka-names.list.gz', start=AKAN_START)
except IOError:
return
sqldata = SQLData(table=AkaName, cols=['personID', 'name', 'imdbIndex',
'namePcodeCf', 'namePcodeNf', 'surnamePcode',
'md5sum'])
for line in fp:
if line and line[0] != ' ':
if line[0] == '\n':
continue
pid = CACHE_PID.addUnique(line.strip())
else:
line = line.strip()
if line[:5] == '(aka ':
line = line[5:]
if line[-1:] == ')':
line = line[:-1]
try:
name_dict = analyze_name(line)
except IMDbParserError:
if line:
print('WARNING doAkaNames wrong name:', _(line))
continue
name = name_dict.get('name')
namePcodeCf, namePcodeNf, surnamePcode = name_soundexes(name)
sqldata.add((pid, name, name_dict.get('imdbIndex'),
namePcodeCf, namePcodeNf, surnamePcode,
md5(line.encode('latin1')).hexdigest()))
if count % 10000 == 0:
print('SCANNING akanames:', _(line))
count += 1
sqldata.flush()
fp.close()
class AkasMoviesCache(MoviesCache):
"""A MoviesCache-like class used to populate the AkaTitle table."""
className = 'AkasMoviesCache'
counter = counter()
def __init__(self, *args, **kdws):
MoviesCache.__init__(self, *args, **kdws)
self.flushEvery = 50000
self._mapsIDsToTitles = True
self.notes = {}
self.ids = {}
self._table_name = tableName(AkaTitle)
self._id_for_custom_q = 'AKAMOVIES'
self.sqlstr, self.converter = createSQLstr(AkaTitle, ('id', 'movieID',
'title', 'imdbIndex', 'kindID', 'productionYear',
'phoneticCode', 'episodeOfID', 'seasonNr',
'episodeNr', 'note', 'md5sum'))
def flush(self, *args, **kwds):
CACHE_MID.flush(quiet=1)
super(AkasMoviesCache, self).flush(*args, **kwds)
def _runCommand(self, dataList):
new_dataList = []
new_dataListapp = new_dataList.append
while dataList:
item = list(dataList.pop())
# Remove the imdbID.
del item[5]
# id used to store this entry.
the_id = item[0]
# id of the referred title.
original_title_id = self.ids.get(the_id) or 0
new_item = [the_id, original_title_id]
md5sum = item[-1]
new_item += item[1:-2]
new_item.append(self.notes.get(the_id))
new_item.append(md5sum)
new_dataListapp(tuple(new_item))
new_dataList.reverse()
if not CSV_DIR:
CURS.executemany(self.sqlstr, self.converter(new_dataList))
else:
CSV_CURS.executemany(self.sqlstr, new_dataList)
CACHE_MID_AKAS = AkasMoviesCache()
def doAkaTitles():
"""Movies' akas."""
mid = None
count = 0
for fname, start in (('aka-titles.list.gz', AKAT_START),
('italian-aka-titles.list.gz', AKAT_IT_START),
('german-aka-titles.list.gz', AKAT_DE_START),
('iso-aka-titles.list.gz', AKAT_ISO_START),
(os.path.join('contrib', 'hungarian-aka-titles.list.gz'),
AKAT_HU_START),
(os.path.join('contrib', 'norwegian-aka-titles.list.gz'),
AKAT_NO_START)):
incontrib = 0
pwarning = 1
# Looks like that the only up-to-date AKA file is aka-titles.
obsolete = False
if fname != 'aka-titles.list.gz':
obsolete = True
if start in (AKAT_HU_START, AKAT_NO_START):
pwarning = 0
incontrib = 1
try:
fp = SourceFile(fname, start=start,
stop='---------------------------',
pwarning=pwarning)
except IOError:
continue
isEpisode = False
seriesID = None
doNotAdd = False
for line in fp:
if line and line[0] != ' ':
# Reading the official title.
doNotAdd = False
if line[0] == '\n':
continue
line = line.strip()
if obsolete:
try:
tonD = analyze_title(line)
except IMDbParserError:
if line:
print('WARNING doAkaTitles(obsol O) invalid title:', end=' ')
print(_(line))
continue
tonD['title'] = normalizeTitle(tonD['title'])
line = build_title(tonD, ptdf=True)
# Aka information for titles in obsolete files are
# added only if the movie already exists in the cache.
if line not in CACHE_MID:
doNotAdd = True
continue
mid = CACHE_MID.addUnique(line)
if mid is None:
continue
if line[0] == '"':
try:
titleDict = analyze_title(line)
except IMDbParserError:
if line:
print('WARNING doAkaTitles (O) invalid title:', end=' ')
print(_(line))
continue
if 'episode of' in titleDict:
if obsolete:
titleDict['episode of']['title'] = \
normalizeTitle(titleDict['episode of']['title'])
series = build_title(titleDict['episode of'],
ptdf=True)
seriesID = CACHE_MID.addUnique(series)
if seriesID is None:
continue
isEpisode = True
else:
seriesID = None
isEpisode = False
else:
seriesID = None
isEpisode = False
else:
# Reading an aka title.
if obsolete and doNotAdd:
continue
res = unpack(line.strip(), ('title', 'note'))
note = res.get('note')
if incontrib:
if res.get('note'):
note += ' '
else:
note = ''
if start == AKAT_HU_START:
note += '(Hungary)'
elif start == AKAT_NO_START:
note += '(Norway)'
akat = res.get('title', '')
if akat[:5] == '(aka ':
akat = akat[5:]
if akat[-2:] in ('))', '})'):
akat = akat[:-1]
akat = akat.strip()
if not akat:
continue
if obsolete:
try:
akatD = analyze_title(akat)
except IMDbParserError:
if line:
print('WARNING doAkaTitles(obsol) invalid title:', end=' ')
print(_(akat))
continue
akatD['title'] = normalizeTitle(akatD['title'])
akat = build_title(akatD, ptdf=True)
if count % 10000 == 0:
print('SCANNING %s:' % fname[:-8].replace('-', ' '), end=' ')
print(_(akat))
if isEpisode and seriesID is not None:
# Handle series for which only single episodes have
# aliases.
try:
akaDict = analyze_title(akat)
except IMDbParserError:
if line:
print('WARNING doAkaTitles (epis) invalid title:', end=' ')
print(_(akat))
continue
if 'episode of' in akaDict:
if obsolete:
akaDict['episode of']['title'] = normalizeTitle(
akaDict['episode of']['title'])
akaSeries = build_title(akaDict['episode of'], ptdf=True)
CACHE_MID_AKAS.add(akaSeries, [('ids', seriesID)])
append_data = [('ids', mid)]
if note is not None:
append_data.append(('notes', note))
CACHE_MID_AKAS.add(akat, append_data)
count += 1
fp.close()
CACHE_MID_AKAS.flush()
CACHE_MID_AKAS.clear()
CACHE_MID_AKAS.notes.clear()
CACHE_MID_AKAS.ids.clear()
def doMovieLinks():
"""Connections between movies."""
mid = None
count = 0
sqldata = SQLData(table=MovieLink,
cols=['movieID', 'linkedMovieID', 'linkTypeID'],
flushEvery=10000)
try:
fp = SourceFile('movie-links.list.gz', start=LINK_START)
except IOError:
return
for line in fp:
if line and line[0] != ' ':
if line[0] == '\n':
continue
title = line.strip()
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
if count % 10000 == 0:
print('SCANNING movielinks:', _(title))
else:
if mid is None:
continue
link_txt = line = line.strip()
theid = None
for k, lenkp1, v in MOVIELINK_IDS:
if link_txt and link_txt[0] == '(' \
and link_txt[1:lenkp1 + 1] == k:
theid = v
break
if theid is None:
continue
totitle = line[lenkp1 + 2:-1].strip()
totitleid = CACHE_MID.addUnique(totitle)
if totitleid is None:
continue
sqldata.add((mid, totitleid, theid))
count += 1
sqldata.flush()
fp.close()
def minusHashFiles(fp, funct, defaultid, descr):
"""A file with lines starting with '# ' and '- '."""
sqldata = SQLData(table=MovieInfo,
cols=['movieID', 'infoTypeID', 'info', 'note'])
sqldata.flushEvery = 2500
if descr == 'quotes':
sqldata.flushEvery = 4000
elif descr == 'soundtracks':
sqldata.flushEvery = 3000
elif descr == 'trivia':
sqldata.flushEvery = 3000
count = 0
for title, text in fp.getByHashSections():
title = title.strip()
d = funct(text.split('\n'))
if not d:
print('WARNING skipping empty information about title:', end=' ')
print(_(title))
continue
if not title:
print('WARNING skipping information associated to empty title:', end=' ')
print(_(d[0], truncateAt=40))
continue
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
if count % 5000 == 0:
print('SCANNING %s:' % descr, end=' ')
print(_(title))
for data in d:
sqldata.add((mid, defaultid, data, None))
count += 1
sqldata.flush()
def doMinusHashFiles():
"""Files with lines starting with '# ' and '- '."""
for fname, start in [('alternate versions', AV_START),
('goofs', GOOFS_START), ('crazy credits', CC_START),
('quotes', QUOTES_START),
('soundtracks', SNDT_START),
('trivia', TRIV_START)]:
try:
fp = SourceFile(fname.replace(' ', '-') + '.list.gz', start=start,
stop=MINHASH_STOP)
except IOError:
continue
funct = _parseMinusList
if fname == 'quotes':
funct = getQuotes
index = fname
if index == 'soundtracks':
index = 'soundtrack'
minusHashFiles(fp, funct, INFO_TYPES[index], fname)
fp.close()
def getTaglines():
"""Movie's taglines."""
try:
fp = SourceFile('taglines.list.gz', start=TAGL_START, stop=TAGL_STOP)
except IOError:
return
sqldata = SQLData(table=MovieInfo,
cols=['movieID', 'infoTypeID', 'info', 'note'],
flushEvery=10000)
count = 0
for title, text in fp.getByHashSections():
title = title.strip()
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
for tag in text.split('\n'):
tag = tag.strip()
if not tag:
continue
if count % 10000 == 0:
print('SCANNING taglines:', _(title))
sqldata.add((mid, INFO_TYPES['taglines'], tag, None))
count += 1
sqldata.flush()
fp.close()
def getQuotes(lines):
"""Movie's quotes."""
quotes = []
qttl = []
for line in lines:
if line and line[:2] == ' ' and qttl and qttl[-1] and \
not qttl[-1].endswith('::'):
line = line.lstrip()
if line:
qttl[-1] += ' %s' % line
elif not line.strip():
if qttl:
quotes.append('::'.join(qttl))
qttl[:] = []
else:
line = line.lstrip()
if line:
qttl.append(line)
if qttl:
quotes.append('::'.join(qttl))
return quotes
_bus = {'BT': 'budget',
'WG': 'weekend gross',
'GR': 'gross',
'OW': 'opening weekend',
'RT': 'rentals',
'AD': 'admissions',
'SD': 'filming dates',
'PD': 'production dates',
'ST': 'studios',
'CP': 'copyright holder'
}
_usd = '$'
_gbp = chr(0x00a3)
_eur = chr(0x20ac)
def getBusiness(lines):
"""Movie's business information."""
bd = _parseColonList(lines, _bus)
for k in list(bd.keys()):
nv = []
for v in bd[k]:
v = v.replace('USD ', _usd).replace('GBP ', _gbp).replace('EUR', _eur)
nv.append(v)
bd[k] = nv
return bd
_ldk = {'OT': 'original title',
'PC': 'production country',
'YR': 'year',
'CF': 'certification',
'CA': 'category',
'GR': 'group genre',
'LA': 'language',
'SU': 'subtitles',
'LE': 'length',
'RD': 'release date',
'ST': 'status of availablility',
'PR': 'official retail price',
'RC': 'release country',
'VS': 'video standard',
'CO': 'color information',
'SE': 'sound encoding',
'DS': 'digital sound',
'AL': 'analog left',
'AR': 'analog right',
'MF': 'master format',
'PP': 'pressing plant',
'SZ': 'disc size',
'SI': 'number of sides',
'DF': 'disc format',
'PF': 'picture format',
'AS': 'aspect ratio',
'CC': 'close captions-teletext-ld-g',
'CS': 'number of chapter stops',
'QP': 'quality program',
'IN': 'additional information',
'SL': 'supplement',
'RV': 'review',
'V1': 'quality of source',
'V2': 'contrast',
'V3': 'color rendition',
'V4': 'sharpness',
'V5': 'video noise',
'V6': 'video artifacts',
'VQ': 'video quality',
'A1': 'frequency response',
'A2': 'dynamic range',
'A3': 'spaciality',
'A4': 'audio noise',
'A5': 'dialogue intellegibility',
'AQ': 'audio quality',
'LN': 'number',
'LB': 'label',
'CN': 'catalog number',
'LT': 'laserdisc title'
}
# Handle laserdisc keys.
for key, value in list(_ldk.items()):
_ldk[key] = 'LD %s' % value
def getLaserDisc(lines):
"""Laserdisc information."""
d = _parseColonList(lines, _ldk)
for k, v in d.items():
d[k] = ' '.join(v)
return d
_lit = {'SCRP': 'screenplay-teleplay',
'NOVL': 'novel',
'ADPT': 'adaption',
'BOOK': 'book',
'PROT': 'production process protocol',
'IVIW': 'interviews',
'CRIT': 'printed media reviews',
'ESSY': 'essays',
'OTHR': 'other literature'
}
def getLiterature(lines):
"""Movie's literature information."""
return _parseColonList(lines, _lit)
_mpaa = {'RE': 'mpaa'}
def getMPAA(lines):
"""Movie's mpaa information."""
d = _parseColonList(lines, _mpaa)
for k, v in d.items():
d[k] = ' '.join(v)
return d
re_nameImdbIndex = re.compile(r'\(([IVXLCDM]+)\)')
def nmmvFiles(fp, funct, fname):
"""Files with sections separated by 'MV: ' or 'NM: '."""
count = 0
sqlsP = (PersonInfo, ['personID', 'infoTypeID', 'info', 'note'])
sqlsM = (MovieInfo, ['movieID', 'infoTypeID', 'info', 'note'])
if fname == 'biographies.list.gz':
datakind = 'person'
sqls = sqlsP
guestid = RoleType.select(RoleType.q.role == 'guest')[0].id
roleid = str(guestid)
guestdata = SQLData(table=CastInfo,
cols=['personID', 'movieID', 'personRoleID', 'note',
RawValue('roleID', roleid)], flushEvery=10000)
akanamesdata = SQLData(table=AkaName, cols=['personID', 'name',
'imdbIndex', 'namePcodeCf', 'namePcodeNf', 'surnamePcode',
'md5sum'])
else:
datakind = 'movie'
sqls = sqlsM
guestdata = None
akanamesdata = None
sqldata = SQLData(table=sqls[0], cols=sqls[1])
if fname == 'plot.list.gz':
sqldata.flushEvery = 1100
elif fname == 'literature.list.gz':
sqldata.flushEvery = 5000
elif fname == 'business.list.gz':
sqldata.flushEvery = 10000
elif fname == 'biographies.list.gz':
sqldata.flushEvery = 5000
islaserdisc = False
if fname == 'laserdisc.list.gz':
islaserdisc = True
_ltype = type([])
for ton, text in fp.getByNMMVSections():
ton = ton.strip()
if not ton:
continue
note = None
if datakind == 'movie':
if islaserdisc:
tonD = analyze_title(ton)
tonD['title'] = normalizeTitle(tonD['title'])
ton = build_title(tonD, ptdf=True)
# Skips movies that are not already in the cache, since
# laserdisc.list.gz is an obsolete file.
if ton not in CACHE_MID:
continue
mopid = CACHE_MID.addUnique(ton)
if mopid is None:
continue
else:
mopid = CACHE_PID.addUnique(ton)
if count % 6000 == 0:
print('SCANNING %s:' % fname[:-8].replace('-', ' '), end=' ')
print(_(ton))
d = funct(text.split('\n'))
for k, v in d.items():
if k != 'notable tv guest appearances':
theid = INFO_TYPES.get(k)
if theid is None:
print('WARNING key "%s" of ToN' % k, end=' ')
print(_(ton), end=' ')
print('not in INFO_TYPES')
continue
if type(v) is _ltype:
for i in v:
if k == 'notable tv guest appearances':
# Put "guest" information in the cast table; these
# are a list of Movie object (yes, imdb.Movie.Movie)
# FIXME: no more used?
title = i.get('long imdb canonical title')
if not title:
continue
movieid = CACHE_MID.addUnique(title)
if movieid is None:
continue
crole = i.currentRole
if isinstance(crole, list):
crole = ' / '.join([x.get('long imdb name', '')
for x in crole])
if not crole:
crole = None
guestdata.add((mopid, movieid, crole,
i.notes or None))
continue
if k in ('plot', 'mini biography'):
s = i.split('::')
if len(s) == 2:
note = s[1]
i = s[0]
if i:
sqldata.add((mopid, theid, i, note))
note = None
else:
if v:
sqldata.add((mopid, theid, v, note))
if k in ('nick names', 'birth name') and v:
# Put also the birth name/nick names in the list of aliases.
if k == 'birth name':
realnames = [v]
else:
realnames = v
for realname in realnames:
imdbIndex = re_nameImdbIndex.findall(realname) or None
if imdbIndex:
imdbIndex = imdbIndex[0]
realname = re_nameImdbIndex.sub('', realname)
if realname:
# XXX: check for duplicates?
# if k == 'birth name':
# realname = canonicalName(realname)
# else:
# realname = normalizeName(realname)
namePcodeCf, namePcodeNf, surnamePcode = \
name_soundexes(realname)
akanamesdata.add((mopid, realname, imdbIndex,
namePcodeCf, namePcodeNf, surnamePcode,
md5(realname.encode('latin1')).hexdigest()))
count += 1
if guestdata is not None:
guestdata.flush()
if akanamesdata is not None:
akanamesdata.flush()
sqldata.flush()
# ============
# Code from the old 'local' data access system.
def _parseList(l, prefix, mline=1):
"""Given a list of lines l, strips prefix and join consecutive lines
with the same prefix; if mline is True, there can be multiple info with
the same prefix, and the first line starts with 'prefix: * '."""
resl = []
reslapp = resl.append
ltmp = []
ltmpapp = ltmp.append
fistl = '%s: * ' % prefix
otherl = '%s: ' % prefix
if not mline:
fistl = fistl[:-2]
otherl = otherl[:-2]
firstlen = len(fistl)
otherlen = len(otherl)
parsing = 0
joiner = ' '.join
for line in l:
if line[:firstlen] == fistl:
parsing = 1
if ltmp:
reslapp(joiner(ltmp))
ltmp[:] = []
data = line[firstlen:].strip()
if data:
ltmpapp(data)
elif mline and line[:otherlen] == otherl:
data = line[otherlen:].strip()
if data:
ltmpapp(data)
else:
if ltmp:
reslapp(joiner(ltmp))
ltmp[:] = []
if parsing:
if ltmp:
reslapp(joiner(ltmp))
break
return resl
def _parseBioBy(l):
"""Return a list of biographies."""
bios = []
biosappend = bios.append
tmpbio = []
tmpbioappend = tmpbio.append
joiner = ' '.join
for line in l:
if line[:4] == 'BG: ':
tmpbioappend(line[4:].strip())
elif line[:4] == 'BY: ':
if tmpbio:
biosappend(joiner(tmpbio) + '::' + line[4:].strip())
tmpbio[:] = []
# Cut mini biographies up to 2**16-1 chars, to prevent errors with
# some MySQL versions - when used by the imdbpy2sql.py script.
bios[:] = [bio[:65535] for bio in bios]
return bios
def _parseBiography(biol):
"""Parse the biographies.data file."""
res = {}
bio = ' '.join(_parseList(biol, 'BG', mline=0))
bio = _parseBioBy(biol)
if bio:
res['mini biography'] = bio
for x in biol:
x4 = x[:4]
x6 = x[:6]
if x4 == 'DB: ':
date, notes = date_and_notes(x[4:])
if date:
res['birth date'] = date
if notes:
res['birth notes'] = notes
elif x4 == 'DD: ':
date, notes = date_and_notes(x[4:])
if date:
res['death date'] = date
if notes:
res['death notes'] = notes
elif x6 == 'SP: * ':
res.setdefault('spouse', []).append(x[6:].strip())
elif x4 == 'RN: ':
n = x[4:].strip()
if not n:
continue
try:
rn = build_name(analyze_name(n, canonical=1), canonical=1)
res['birth name'] = rn
except IMDbParserError:
if line:
print('WARNING _parseBiography wrong name:', _(n))
continue
elif x6 == 'AT: * ':
res.setdefault('article', []).append(x[6:].strip())
elif x4 == 'HT: ':
res['height'] = x[4:].strip()
elif x6 == 'PT: * ':
res.setdefault('pictorial', []).append(x[6:].strip())
elif x6 == 'CV: * ':
res.setdefault('magazine cover photo', []).append(x[6:].strip())
elif x4 == 'NK: ':
res.setdefault('nick names', []).append(normalizeName(x[4:]))
elif x6 == 'PI: * ':
res.setdefault('portrayed in', []).append(x[6:].strip())
elif x6 == 'SA: * ':
sal = x[6:].strip().replace(' -> ', '::')
res.setdefault('salary history', []).append(sal)
trl = _parseList(biol, 'TR')
if trl:
res['trivia'] = trl
quotes = _parseList(biol, 'QU')
if quotes:
res['quotes'] = quotes
otherworks = _parseList(biol, 'OW')
if otherworks:
res['other works'] = otherworks
books = _parseList(biol, 'BO')
if books:
res['books'] = books
agent = _parseList(biol, 'AG')
if agent:
res['agent address'] = agent
wherenow = _parseList(biol, 'WN')
if wherenow:
res['where now'] = wherenow[0]
biomovies = _parseList(biol, 'BT')
if biomovies:
res['biographical movies'] = biomovies
tm = _parseList(biol, 'TM')
if tm:
res['trade mark'] = tm
interv = _parseList(biol, 'IT')
if interv:
res['interviews'] = interv
return res
# ============
def doNMMVFiles():
"""Files with large sections, about movies and persons."""
for fname, start, funct in [
('biographies.list.gz', BIO_START, _parseBiography),
('business.list.gz', BUS_START, getBusiness),
('laserdisc.list.gz', LSD_START, getLaserDisc),
('literature.list.gz', LIT_START, getLiterature),
('mpaa-ratings-reasons.list.gz', MPAA_START, getMPAA),
('plot.list.gz', PLOT_START, getPlot)]:
try:
fp = SourceFile(fname, start=start)
except IOError:
continue
if fname == 'literature.list.gz':
fp.set_stop(LIT_STOP)
elif fname == 'business.list.gz':
fp.set_stop(BUS_STOP)
nmmvFiles(fp, funct, fname)
fp.close()
t('doNMMVFiles(%s)' % fname[:-8].replace('-', ' '))
def doMovieCompaniesInfo():
"""Files with information on a single line about movies,
concerning companies."""
sqldata = SQLData(table=MovieCompanies,
cols=['movieID', 'companyID', 'companyTypeID', 'note'])
for dataf in (('distributors.list.gz', DIS_START),
('miscellaneous-companies.list.gz', MIS_START),
('production-companies.list.gz', PRO_START),
('special-effects-companies.list.gz', SFX_START)):
try:
fp = SourceFile(dataf[0], start=dataf[1])
except IOError:
continue
typeindex = dataf[0][:-8].replace('-', ' ')
infoid = COMP_TYPES[typeindex]
count = 0
for line in fp:
data = unpack(line.strip(), ('title', 'company', 'note'))
if 'title' not in data:
continue
if 'company' not in data:
continue
title = data['title']
company = data['company']
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
cid = CACHE_COMPID.addUnique(company)
note = None
if 'note' in data:
note = data['note']
if count % 10000 == 0:
print('SCANNING %s:' % dataf[0][:-8].replace('-', ' '), end=' ')
print(_(data['title']))
sqldata.add((mid, cid, infoid, note))
count += 1
sqldata.flush()
CACHE_COMPID.flush()
fp.close()
t('doMovieCompaniesInfo(%s)' % dataf[0][:-8].replace('-', ' '))
def doMiscMovieInfo():
"""Files with information on a single line about movies."""
for dataf in (('certificates.list.gz', CER_START),
('color-info.list.gz', COL_START),
('countries.list.gz', COU_START),
('genres.list.gz', GEN_START),
('keywords.list.gz', KEY_START),
('language.list.gz', LAN_START),
('locations.list.gz', LOC_START),
('running-times.list.gz', RUN_START),
('sound-mix.list.gz', SOU_START),
('technical.list.gz', TCN_START),
('release-dates.list.gz', RELDATE_START)):
try:
fp = SourceFile(dataf[0], start=dataf[1])
except IOError:
continue
typeindex = dataf[0][:-8].replace('-', ' ')
if typeindex == 'running times':
typeindex = 'runtimes'
elif typeindex == 'technical':
typeindex = 'tech info'
elif typeindex == 'language':
typeindex = 'languages'
if typeindex != 'keywords':
sqldata = SQLData(table=MovieInfo,
cols=['movieID', 'infoTypeID', 'info', 'note'])
else:
sqldata = SQLData(table=MovieKeyword,
cols=['movieID', 'keywordID'])
infoid = INFO_TYPES[typeindex]
count = 0
if dataf[0] == 'locations.list.gz':
sqldata.flushEvery = 10000
else:
sqldata.flushEvery = 20000
for line in fp:
data = unpack(line.strip(), ('title', 'info', 'note'))
if 'title' not in data:
continue
if 'info' not in data:
continue
title = data['title']
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
note = None
if 'note' in data:
note = data['note']
if count % 10000 == 0:
print('SCANNING %s:' % dataf[0][:-8].replace('-', ' '), end=' ')
print(_(data['title']))
info = data['info']
if typeindex == 'keywords':
keywordID = CACHE_KWRDID.addUnique(info)
sqldata.add((mid, keywordID))
else:
sqldata.add((mid, infoid, info, note))
count += 1
sqldata.flush()
if typeindex == 'keywords':
CACHE_KWRDID.flush()
CACHE_KWRDID.clear()
fp.close()
t('doMiscMovieInfo(%s)' % dataf[0][:-8].replace('-', ' '))
def getRating():
"""Movie's rating."""
try:
fp = SourceFile('ratings.list.gz', start=RAT_START, stop=RAT_STOP)
except IOError:
return
sqldata = SQLData(table=MovieInfo, cols=['movieID', 'infoTypeID',
'info', 'note'])
count = 0
for line in fp:
data = unpack(line, ('votes distribution', 'votes', 'rating', 'title'),
sep=' ')
if 'title' not in data:
continue
title = data['title'].strip()
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
if count % 10000 == 0:
print('SCANNING rating:', _(title))
sqldata.add((mid, INFO_TYPES['votes distribution'],
data.get('votes distribution'), None))
sqldata.add((mid, INFO_TYPES['votes'], data.get('votes'), None))
sqldata.add((mid, INFO_TYPES['rating'], data.get('rating'), None))
count += 1
sqldata.flush()
fp.close()
def getTopBottomRating():
"""Movie's rating, scanning for top 250 and bottom 10."""
for what in ('top 250 rank', 'bottom 10 rank'):
if what == 'top 250 rank':
st = RAT_TOP250_START
else:
st = RAT_BOT10_START
try:
fp = SourceFile('ratings.list.gz', start=st, stop=TOPBOT_STOP)
except IOError:
break
sqldata = SQLData(table=MovieInfo,
cols=['movieID',
RawValue('infoTypeID', INFO_TYPES[what]),
'info', 'note'])
count = 1
print('SCANNING %s...' % what)
for line in fp:
data = unpack(line, ('votes distribution', 'votes', 'rank',
'title'), sep=' ')
if 'title' not in data:
continue
title = data['title'].strip()
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
if what == 'top 250 rank':
rank = count
else:
rank = 11 - count
sqldata.add((mid, str(rank), None))
count += 1
sqldata.flush()
fp.close()
def getPlot(lines):
"""Movie's plot."""
plotl = []
plotlappend = plotl.append
plotltmp = []
plotltmpappend = plotltmp.append
for line in lines:
linestart = line[:4]
if linestart == 'PL: ':
plotltmpappend(line[4:])
elif linestart == 'BY: ':
plotlappend('%s::%s' % (' '.join(plotltmp), line[4:].strip()))
plotltmp[:] = []
return {'plot': plotl}
def completeCast():
"""Movie's complete cast/crew information."""
CCKind = {}
cckinds = [(x.id, x.kind) for x in CompCastType.select()]
for k, v in cckinds:
CCKind[v] = k
for fname, start in [('complete-cast.list.gz', COMPCAST_START),
('complete-crew.list.gz', COMPCREW_START)]:
try:
fp = SourceFile(fname, start=start, stop=COMP_STOP)
except IOError:
continue
if fname == 'complete-cast.list.gz':
obj = 'cast'
else:
obj = 'crew'
subID = str(CCKind[obj])
sqldata = SQLData(table=CompleteCast,
cols=['movieID', RawValue('subjectID', subID),
'statusID'])
count = 0
for line in fp:
ll = [x for x in line.split('\t') if x]
if len(ll) != 2:
continue
title = ll[0]
mid = CACHE_MID.addUnique(title)
if mid is None:
continue
if count % 10000 == 0:
print('SCANNING %s:' % fname[:-8].replace('-', ' '), end=' ')
print(_(title))
sqldata.add((mid, CCKind[ll[1].lower().strip()]))
count += 1
fp.close()
sqldata.flush()
# global instances
CACHE_MID = MoviesCache()
CACHE_PID = PersonsCache()
CACHE_CID = CharactersCache()
CACHE_CID.className = 'CharactersCache'
CACHE_COMPID = CompaniesCache()
CACHE_KWRDID = KeywordsCache()
INFO_TYPES = {}
MOVIELINK_IDS = []
KIND_IDS = {}
KIND_STRS = {}
CCAST_TYPES = {}
COMP_TYPES = {}
def readConstants():
"""Read constants from the database."""
global INFO_TYPES, MOVIELINK_IDS, KIND_IDS, KIND_STRS, \
CCAST_TYPES, COMP_TYPES
for x in InfoType.select():
INFO_TYPES[x.info] = x.id
for x in LinkType.select():
MOVIELINK_IDS.append((x.link, len(x.link), x.id))
MOVIELINK_IDS.sort(key=lambda x: operator.length_hint(x[0]), reverse=True)
for x in KindType.select():
KIND_IDS[x.kind] = x.id
KIND_STRS[x.id] = x.kind
for x in CompCastType.select():
CCAST_TYPES[x.kind] = x.id
for x in CompanyType.select():
COMP_TYPES[x.kind] = x.id
def _imdbIDsFileName(fname):
"""Return a file name, adding the optional
CSV_DIR directory."""
return os.path.join(*([_f for _f in [CSV_DIR, fname] if _f]))
def _countRows(tableName):
"""Return the number of rows in a table."""
try:
CURS.execute('SELECT COUNT(*) FROM %s' % tableName)
return (CURS.fetchone() or [0])[0]
except Exception as e:
print('WARNING: unable to count rows of table %s: %s' % (tableName, e))
return 0
def storeNotNULLimdbIDs(cls):
"""Store in a temporary table or in a dbm database a mapping between
md5sum (of title or name) and imdbID, when the latter
is present in the database."""
if cls is Title:
cname = 'movies'
elif cls is Name:
cname = 'people'
elif cls is CompanyName:
cname = 'companies'
else:
cname = 'characters'
table_name = tableName(cls)
md5sum_col = colName(cls, 'md5sum')
imdbID_col = colName(cls, 'imdbID')
print('SAVING imdbID values for %s...' % cname, end=' ')
sys.stdout.flush()
if _get_imdbids_method() == 'table':
try:
try:
CURS.execute('DROP TABLE %s_extract' % table_name)
except:
pass
try:
CURS.execute('SELECT * FROM %s LIMIT 1' % table_name)
except Exception as e:
print('missing "%s" table (ok if this is the first run)' % table_name)
return
query = 'CREATE TEMPORARY TABLE %s_extract AS SELECT %s, %s FROM %s WHERE %s IS NOT NULL' % \
(table_name, md5sum_col, imdbID_col,
table_name, imdbID_col)
CURS.execute(query)
CURS.execute('CREATE INDEX %s_md5sum_idx ON %s_extract (%s)' % (table_name, table_name, md5sum_col))
CURS.execute('CREATE INDEX %s_imdbid_idx ON %s_extract (%s)' % (table_name, table_name, imdbID_col))
rows = _countRows('%s_extract' % table_name)
print('DONE! (%d entries using a temporary table)' % rows)
return
except Exception as e:
print('WARNING: unable to store imdbIDs in a temporary table (falling back to dbm): %s' % e)
try:
db = dbm.open(_imdbIDsFileName('%s_imdbIDs.db' % cname), 'c')
except Exception as e:
print('WARNING: unable to store imdbIDs: %s' % str(e))
return
try:
CURS.execute('SELECT %s, %s FROM %s WHERE %s IS NOT NULL' %
(md5sum_col, imdbID_col, table_name, imdbID_col))
res = CURS.fetchmany(10000)
while res:
db.update(dict((str(x[0]), str(x[1])) for x in res))
res = CURS.fetchmany(10000)
except Exception as e:
print('SKIPPING: unable to retrieve data: %s' % e)
return
print('DONE! (%d entries)' % len(db))
db.close()
return
def iterbatch(iterable, size):
"""Process an iterable 'size' items at a time."""
sourceiter = iter(iterable)
while True:
batchiter = islice(sourceiter, size)
yield chain([next(batchiter)], batchiter)
def restoreImdbIDs(cls):
"""Restore imdbIDs for movies, people, companies and characters."""
if cls is Title:
cname = 'movies'
elif cls is Name:
cname = 'people'
elif cls is CompanyName:
cname = 'companies'
else:
cname = 'characters'
print('RESTORING imdbIDs values for %s...' % cname, end=' ')
sys.stdout.flush()
table_name = tableName(cls)
md5sum_col = colName(cls, 'md5sum')
imdbID_col = colName(cls, 'imdbID')
if _get_imdbids_method() == 'table':
try:
try:
CURS.execute('SELECT * FROM %s_extract LIMIT 1' % table_name)
except Exception as e:
raise Exception('missing "%s_extract" table (ok if this is the first run)' % table_name)
if DB_NAME == 'mysql':
query = 'UPDATE %s INNER JOIN %s_extract USING (%s) SET %s.%s = %s_extract.%s' % \
(table_name, table_name, md5sum_col,
table_name, imdbID_col, table_name, imdbID_col)
else:
query = 'UPDATE %s SET %s = %s_extract.%s FROM %s_extract WHERE %s.%s = %s_extract.%s' % \
(table_name, imdbID_col, table_name,
imdbID_col, table_name, table_name,
md5sum_col, table_name, md5sum_col)
CURS.execute(query)
affected_rows = 'an unknown number of'
try:
CURS.execute('SELECT COUNT(*) FROM %s WHERE %s IS NOT NULL' %
(table_name, imdbID_col))
affected_rows = (CURS.fetchone() or [0])[0]
except Exception as e:
pass
rows = _countRows('%s_extract' % table_name)
print('DONE! (restored %s entries out of %d)' % (affected_rows, rows))
t('restore %s' % cname)
try:
CURS.execute('DROP TABLE %s_extract' % table_name)
except:
pass
return
except Exception as e:
print('INFO: unable to restore imdbIDs using the temporary table (falling back to dbm): %s' % e)
try:
db = dbm.open(_imdbIDsFileName('%s_imdbIDs.db' % cname), 'r')
except Exception as e:
print('INFO: unable to restore imdbIDs (ok if this is the first run)')
return
count = 0
sql = "UPDATE " + table_name + " SET " + imdbID_col + \
" = CASE " + md5sum_col + " %s END WHERE " + \
md5sum_col + " IN (%s)"
def _restore(query, batch):
"""Execute a query to restore a batch of imdbIDs"""
items = list(batch)
case_clause = ' '.join("WHEN '%s' THEN %s" % (k, v) for k, v in items)
where_clause = ', '.join("'%s'" % x[0] for x in items)
success = _executeQuery(query % (case_clause, where_clause))
if success:
return len(items)
return 0
for batch in iterbatch(iter(db.items()), 10000):
count += _restore(sql, batch)
print('DONE! (restored %d entries out of %d)' % (count, len(db)))
t('restore %s' % cname)
db.close()
return
def restoreAll_imdbIDs():
"""Restore imdbIDs for movies, persons, companies and characters."""
# Restoring imdbIDs for movies and persons (moved after the
# built of indexes, so that it can take advantage of them).
runSafely(restoreImdbIDs, 'failed to restore imdbIDs for movies',
None, Title)
runSafely(restoreImdbIDs, 'failed to restore imdbIDs for people',
None, Name)
runSafely(restoreImdbIDs, 'failed to restore imdbIDs for characters',
None, CharName)
runSafely(restoreImdbIDs, 'failed to restore imdbIDs for companies',
None, CompanyName)
def runSafely(funct, fmsg, default, *args, **kwds):
"""Run the function 'funct' with arguments args and
kwds, catching every exception; fmsg is printed out (along
with the exception message) in case of trouble; the return
value of the function is returned (or 'default')."""
try:
return funct(*args, **kwds)
except Exception as e:
print('WARNING: %s: %s' % (fmsg, e))
return default
def _executeQuery(query):
"""Execute a query on the CURS object."""
if len(query) > 60:
s_query = query[:60] + '...'
else:
s_query = query
print('EXECUTING "%s"...' % s_query, end=' ')
sys.stdout.flush()
try:
CURS.execute(query)
print('DONE!')
return True
except Exception as e:
print('FAILED (%s)!' % e)
return False
def executeCustomQueries(when, _keys=None, _timeit=True):
"""Run custom queries as specified on the command line."""
if _keys is None:
_keys = {}
for query in CUSTOM_QUERIES.get(when, []):
print('EXECUTING "%s:%s"...' % (when, query))
sys.stdout.flush()
if query.startswith('FOR_EVERY_TABLE:'):
query = query[16:]
CURS.execute('SHOW TABLES;')
tables = [x[0] for x in CURS.fetchall()]
for table in tables:
try:
keys = {'table': table}
keys.update(_keys)
_executeQuery(query % keys)
if _timeit:
t('%s command' % when)
except Exception as e:
print('FAILED (%s)!' % e)
continue
else:
try:
_executeQuery(query % _keys)
except Exception as e:
print('FAILED (%s)!' % e)
continue
if _timeit:
t('%s command' % when)
def buildIndexesAndFK():
"""Build indexes."""
executeCustomQueries('BEFORE_INDEXES')
print('building database indexes (this may take a while)')
sys.stdout.flush()
# Build database indexes.
idx_errors = createIndexes(DB_TABLES)
for idx_error in idx_errors:
print('ERROR caught exception creating an index: %s' % idx_error)
t('createIndexes()')
sys.stdout.flush()
def restoreCSV():
"""Only restore data from a set of CSV files."""
CSV_CURS.buildFakeFileNames()
print('loading CSV files into the database')
executeCustomQueries('BEFORE_CSV_LOAD')
loadCSVFiles()
t('loadCSVFiles()')
executeCustomQueries('BEFORE_RESTORE')
t('TOTAL TIME TO LOAD CSV FILES', sinceBegin=True)
buildIndexesAndFK()
restoreAll_imdbIDs()
executeCustomQueries('END')
t('FINAL', sinceBegin=True)
# begin the iterations...
def run():
print('RUNNING imdbpy2sql.py')
executeCustomQueries('BEGIN')
# Storing imdbIDs for movies and persons.
runSafely(storeNotNULLimdbIDs, 'failed to read imdbIDs for movies',
None, Title)
runSafely(storeNotNULLimdbIDs, 'failed to read imdbIDs for people',
None, Name)
runSafely(storeNotNULLimdbIDs, 'failed to read imdbIDs for characters',
None, CharName)
runSafely(storeNotNULLimdbIDs, 'failed to read imdbIDs for companies',
None, CompanyName)
# Truncate the current database.
print('DROPPING current database...', end=' ')
sys.stdout.flush()
dropTables(DB_TABLES)
print('DONE!')
executeCustomQueries('BEFORE_CREATE')
# Rebuild the database structure.
print('CREATING new tables...', end=' ')
sys.stdout.flush()
createTables(DB_TABLES)
print('DONE!')
t('dropping and recreating the database')
executeCustomQueries('AFTER_CREATE')
# Read the constants.
readConstants()
# Populate the CACHE_MID instance.
readMovieList()
# Comment readMovieList() and uncomment the following two lines
# to keep the current info in the name and title tables.
# CACHE_MID.populate()
t('readMovieList()')
executeCustomQueries('BEFORE_COMPANIES')
# distributors, miscellaneous-companies, production-companies,
# special-effects-companies.
# CACHE_COMPID.populate()
doMovieCompaniesInfo()
# Do this now, and free some memory.
CACHE_COMPID.flush()
CACHE_COMPID.clear()
executeCustomQueries('BEFORE_CAST')
# actors, actresses, producers, writers, cinematographers, composers,
# costume-designers, directors, editors, miscellaneous,
# production-designers.
castLists()
# CACHE_PID.populate()
# CACHE_CID.populate()
# Aka names and titles.
doAkaNames()
t('doAkaNames()')
doAkaTitles()
t('doAkaTitles()')
# alternate-versions, goofs, crazy-credits, quotes, soundtracks, trivia.
doMinusHashFiles()
t('doMinusHashFiles()')
# biographies, business, laserdisc, literature, mpaa-ratings-reasons, plot.
doNMMVFiles()
# certificates, color-info, countries, genres, keywords, language,
# locations, running-times, sound-mix, technical, release-dates.
doMiscMovieInfo()
# movie-links.
doMovieLinks()
t('doMovieLinks()')
# ratings.
getRating()
t('getRating()')
# taglines.
getTaglines()
t('getTaglines()')
# ratings (top 250 and bottom 10 movies).
getTopBottomRating()
t('getTopBottomRating()')
# complete-cast, complete-crew.
completeCast()
t('completeCast()')
if CSV_DIR:
CSV_CURS.closeAll()
# Flush caches.
CACHE_MID.flush()
CACHE_PID.flush()
CACHE_CID.flush()
CACHE_MID.clear()
CACHE_PID.clear()
CACHE_CID.clear()
t('fushing caches...')
if CSV_ONLY_WRITE:
t('TOTAL TIME TO WRITE CSV FILES', sinceBegin=True)
executeCustomQueries('END')
t('FINAL', sinceBegin=True)
return
if CSV_DIR:
print('loading CSV files into the database')
executeCustomQueries('BEFORE_CSV_LOAD')
loadCSVFiles()
t('loadCSVFiles()')
executeCustomQueries('BEFORE_RESTORE')
t('TOTAL TIME TO INSERT/WRITE DATA', sinceBegin=True)
buildIndexesAndFK()
restoreAll_imdbIDs()
executeCustomQueries('END')
t('FINAL', sinceBegin=True)
_HEARD = 0
def _kdb_handler(signum, frame):
"""Die gracefully."""
global _HEARD
if _HEARD:
print("EHI! DON'T PUSH ME! I'VE HEARD YOU THE FIRST TIME! :-)")
return
print('INTERRUPT REQUEST RECEIVED FROM USER. FLUSHING CACHES...')
_HEARD = 1
# XXX: trap _every_ error?
try:
CACHE_MID.flush()
except IntegrityError:
pass
try:
CACHE_PID.flush()
except IntegrityError:
pass
try:
CACHE_CID.flush()
except IntegrityError:
pass
try:
CACHE_COMPID.flush()
except IntegrityError:
pass
print('DONE! (in %d minutes, %d seconds)' %
divmod(int(time.time()) - BEGIN_TIME, 60))
sys.exit()
if __name__ == '__main__':
import signal
signal.signal(signal.SIGINT, _kdb_handler)
if CSV_ONLY_LOAD:
restoreCSV()
else:
run()
| 115,020 | Python | .py | 2,883 | 28.776968 | 185 | 0.529548 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,133 | s32cinemagoer.py | cinemagoer_cinemagoer/bin/s32cinemagoer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
s32cinemagoer.py script.
This script imports the s3 dataset distributed by IMDb into a SQL database.
Copyright 2017-2018 Davide Alberani <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import os
import glob
import gzip
import logging
import argparse
import sqlalchemy
try:
from tqdm import tqdm
HAS_TQDM = True
except ImportError:
HAS_TQDM = False
from imdb.parser.s3.utils import DB_TRANSFORM, title_soundex, name_soundexes
TSV_EXT = '.tsv.gz'
# how many entries to write to the database at a time.
BLOCK_SIZE = 10000
logger = logging.getLogger()
logger.setLevel(logging.INFO)
metadata = sqlalchemy.MetaData()
def generate_content(fd, headers, table):
"""Generate blocks of rows to be written to the database.
:param fd: a file descriptor for the .tsv.gz file
:type fd: :class:`_io.TextIOWrapper`
:param headers: headers in the file
:type headers: list
:param table: the table that will populated
:type table: :class:`sqlalchemy.Table`
:returns: block of data to insert
:rtype: list
"""
data = []
headers_len = len(headers)
data_transf = {}
table_name = table.name
for column, conf in DB_TRANSFORM.get(table_name, {}).items():
if 'transform' in conf:
data_transf[column] = conf['transform']
for line in fd:
s_line = line.decode('utf-8').strip().split('\t')
if len(s_line) != headers_len:
continue
info = dict(zip(headers, [x if x != r'\N' else None for x in s_line]))
for key, tranf in data_transf.items():
if key not in info:
continue
info[key] = tranf(info[key])
if table_name == 'title_basics':
info['t_soundex'] = title_soundex(info['primaryTitle'])
elif table_name == 'title_akas':
info['t_soundex'] = title_soundex(info['title'])
elif table_name == 'name_basics':
info['ns_soundex'], info['sn_soundex'], info['s_soundex'] = name_soundexes(info['primaryName'])
data.append(info)
if len(data) >= BLOCK_SIZE:
yield data
data = []
if data:
yield data
data = []
def build_table(fn, headers):
"""Build a Table object from a .tsv.gz file.
:param fn: the .tsv.gz file
:type fn: str
:param headers: headers in the file
:type headers: list
"""
logging.debug('building table for file %s' % fn)
table_name = fn.replace(TSV_EXT, '').replace('.', '_')
table_map = DB_TRANSFORM.get(table_name) or {}
columns = []
all_headers = set(headers)
all_headers.update(table_map.keys())
for header in all_headers:
col_info = table_map.get(header) or {}
col_type = col_info.get('type') or sqlalchemy.UnicodeText
if 'length' in col_info and col_type is sqlalchemy.String:
col_type = sqlalchemy.String(length=col_info['length'])
col_args = {
'name': header,
'type_': col_type,
'index': col_info.get('index', False)
}
col_obj = sqlalchemy.Column(**col_args)
columns.append(col_obj)
return sqlalchemy.Table(table_name, metadata, *columns)
def import_file(fn, engine):
"""Import data from a .tsv.gz file.
:param fn: the .tsv.gz file
:type fn: str
:param engine: SQLAlchemy engine
:type engine: :class:`sqlalchemy.engine.base.Engine`
"""
logging.info('begin processing file %s' % fn)
connection = engine.connect()
count = 0
nr_of_lines = 0
fn_basename = os.path.basename(fn)
with gzip.GzipFile(fn, 'rb') as gz_file:
gz_file.readline()
for line in gz_file:
nr_of_lines += 1
with gzip.GzipFile(fn, 'rb') as gz_file:
headers = gz_file.readline().decode('utf-8').strip().split('\t')
logging.debug('headers of file %s: %s' % (fn, ','.join(headers)))
table = build_table(fn_basename, headers)
try:
table.drop()
logging.debug('table %s dropped' % table.name)
except:
pass
insert = table.insert()
metadata.create_all(tables=[table])
if HAS_TQDM and logger.isEnabledFor(logging.DEBUG):
tqdm_ = tqdm
else:
tqdm_ = lambda it, **kwargs: it
try:
for block in generate_content(tqdm_(gz_file, total=nr_of_lines), headers, table):
try:
connection.execute(insert, block)
except Exception as e:
logging.error('error processing data: %d entries lost: %s' % (len(block), e))
continue
count += len(block)
percent = count * 100 / nr_of_lines
except Exception as e:
logging.error('error processing data on table %s: %s' % (table.name, e))
logging.info('processed file %s: %d entries' % (fn, count))
def import_dir(dir_name, engine, cleanup=False):
"""Import data from a series of .tsv.gz files.
:param dir_name: directory containing the .tsv.gz files
:type dir_name: str
:param engine: SQLAlchemy engine
:type engine: :class:`sqlalchemy.engine.base.Engine`
"""
for fn in glob.glob(os.path.join(dir_name, '*%s' % TSV_EXT)):
if not os.path.isfile(fn):
logging.debug('skipping file %s' % fn)
continue
import_file(fn, engine)
if cleanup:
logging.debug('Removing file %s' % fn)
os.remove(fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('tsv_files_dir')
parser.add_argument('db_uri')
parser.add_argument('--verbose', help='increase verbosity and show progress', action='store_true')
parser.add_argument('--cleanup', help='Remove files after they\'re imported', action='store_true')
args = parser.parse_args()
dir_name = args.tsv_files_dir
db_uri = args.db_uri
if args.verbose:
logger.setLevel(logging.DEBUG)
cleanup = args.cleanup
engine = sqlalchemy.create_engine(db_uri, encoding='utf-8', echo=False)
metadata.bind = engine
import_dir(dir_name, engine, cleanup)
| 6,879 | Python | .py | 175 | 32.422857 | 107 | 0.635669 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,134 | get_person.py | cinemagoer_cinemagoer/bin/get_person.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_person.py
Usage: get_person "person_id"
Show some info about the person with the given person_id (e.g. '0000210'
for "Julia Roberts".
Notice that person_id, using 'sql', are not the same IDs used on the web.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "person_id"' % sys.argv[0])
sys.exit(2)
person_id = sys.argv[1]
i = imdb.IMDb()
try:
# Get a Person object with the data about the person identified by
# the given person_id.
person = i.get_person(person_id)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not person:
print('It seems that there\'s no person with person_id "%s"' % person_id)
sys.exit(4)
# XXX: this is the easier way to print the main info about a person;
# calling the summary() method of a Person object will returns a string
# with the main information about the person.
# Obviously it's not really meaningful if you want to know how
# to access the data stored in a Person object, so look below; the
# commented lines show some ways to retrieve information from a
# Person object.
print(person.summary())
# Show some info about the person.
# This is only a short example; you can get a longer summary using
# 'print person.summary()' and the complete set of information looking for
# the output of the person.keys() method.
# print '==== "%s" / person_id: %s ====' % (person['name'], person_id)
# XXX: use the IMDb instance to get the IMDb web URL for the person.
# imdbURL = i.get_imdbURL(person)
# if imdbURL:
# print 'IMDb URL: %s' % imdbURL
# XXX: print the birth date and birth notes.
# d_date = person.get('birth date')
# if d_date:
# print 'Birth date: %s' % d_date
# b_notes = person.get('birth notes')
# if b_notes:
# print 'Birth notes: %s' % b_notes
# XXX: print the last five movies he/she acted in, and the played role.
# movies_acted = person.get('actor') or person.get('actress')
# if movies_acted:
# print 'Last roles played: '
# for movie in movies_acted[:5]:
# print ' %s (in "%s")' % (movie.currentRole, movie['title'])
# XXX: example of the use of information sets.
# import random
# i.update(person, info=['awards'])
# awards = person.get('awards')
# if awards:
# rand_award = awards[random.randrange(len(awards))]
# s = 'Random award: in year '
# s += rand_award.get('year', '')
# s += ' %s "%s"' % (rand_award.get('result', '').lower(),
# rand_award.get('award', ''))
# print s
| 2,782 | Python | .py | 74 | 35.662162 | 79 | 0.678293 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,135 | search_keyword.py | cinemagoer_cinemagoer/bin/search_keyword.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
search_keyword.py
Usage: search_keyword "keyword"
Search for keywords similar to the give one and print the results.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You bad boy! You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "keyword name"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
try:
# Do the search, and get the results (a list of keyword strings).
results = i.search_keyword(name, results=20)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
# Print the results.
print(' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name))
print(' : keyword')
# Print every keyword.
for idx, keyword in enumerate(results):
outp = '%d: %s' % (idx+1, keyword)
print(outp)
| 1,167 | Python | .py | 37 | 26.783784 | 79 | 0.640107 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,136 | get_keyword.py | cinemagoer_cinemagoer/bin/get_keyword.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_keyword.py
Usage: get_keyword "keyword"
search for movies tagged with the given keyword and print the results.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "keyword"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
try:
# Do the search, and get the results (a list of movies).
results = i.get_keyword(name, results=20)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
# Print the results.
print(' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name))
print(' : movie title')
# Print the long imdb title for every movie.
for idx, movie in enumerate(results):
outp = '%d: %s' % (idx+1, movie['long imdb title'])
print(outp)
| 1,111 | Python | .py | 36 | 26.027778 | 79 | 0.627469 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,137 | get_first_person.py | cinemagoer_cinemagoer/bin/get_first_person.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_first_person.py
Usage: get_first_person "person name"
Search for the given name and print the best matching result.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "person name"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
try:
# Do the search, and get the results (a list of Person objects).
results = i.search_person(name)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not results:
print('No matches for "%s", sorry.' % name)
sys.exit(0)
# Print only the first result.
print(' Best match for "%s"' % name)
# This is a Person instance.
person = results[0]
# So far the Person object only contains basic information like the
# name; retrieve main information:
i.update(person)
print(person.summary())
| 1,103 | Python | .py | 38 | 26.263158 | 79 | 0.704762 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,138 | get_movie_list.py | cinemagoer_cinemagoer/bin/get_movie_list.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_movie_list.py
Usage: get_movie_list.py ls091843609
Parse the list and print the results.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "movie list id"' % sys.argv[0])
sys.exit(2)
listId = sys.argv[1]
i = imdb.IMDb()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
try:
# Do the search, and get the results (a list of Movie objects).
results = i.get_movie_list(list_=listId)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
# Print the long imdb title for every movie.
for movie in results:
outp = '%s\t: %s' % (movie['rank'], movie['long imdb title'])
print(outp)
| 952 | Python | .py | 32 | 26.78125 | 79 | 0.69527 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,139 | search_movie.py | cinemagoer_cinemagoer/bin/search_movie.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
search_movie.py
Usage: search_movie "movie title"
Search for the given title and print the results.
"""
import sys
# Import the Cinemagoer package.
try:
import imdb
except ImportError:
print('You bad boy! You need to install the Cinemagoer package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "movie title"' % sys.argv[0])
sys.exit(2)
title = sys.argv[1]
i = imdb.IMDb()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
try:
# Do the search, and get the results (a list of Movie objects).
results = i.search_movie(title)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
# Print the results.
print(' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
title))
print('movieID\t: imdbID : title')
# Print the long imdb title for every movie.
for movie in results:
outp = '%s\t: %s : %s' % (movie.movieID, i.get_imdbID(movie),
movie['long imdb title'])
print(outp)
| 1,239 | Python | .py | 38 | 27.131579 | 79 | 0.622896 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,140 | conf.py | cinemagoer_cinemagoer/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Cinemagoer'
copyright = '2018, Davide Alberani, H. Turgut Uyar'
author = 'Davide Alberani, H. Turgut Uyar'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '6.8'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['imdb.']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cinemagoerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Cinemagoer.tex', 'Cinemagoer Documentation',
'Davide Alberani, H. Turgut Uyar', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cinemagoer', 'Cinemagoer Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Cinemagoer', 'Cinemagoer Documentation',
author, 'Cinemagoer', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 5,011 | Python | .py | 121 | 39.38843 | 79 | 0.66701 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,141 | download_applydiffs.py | cinemagoer_cinemagoer/docs/goodies/download_applydiffs.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# This script downloads and applies any and all imdb diff files which
# have not already been applied to the lists in the ImdbListsPath folder
#
# NOTE: this is especially useful in Windows environment; you have
# to modify the paths in the 'Script configuration' section below,
# accordingly with your needs.
#
# The script will check the imdb list files (compressed or incompressed)
# in ImdbListsPath and assume that the imdb lists were most recently downloaded
# or updated based on the most recently modified list file in that folder.
#
# In order to run correctly, the configuration section below needs to be
# set to the location of the imdb list files and the commands required to
# unGzip, UnTar, patch and Gzip files.
#
# Optional configuration settings are to set the imdb diff files download and/or
# backup folders. If you do not want to keep or backup the downloaded imdb diff
# files then set keepDiffFiles to False and diffFilesBackupFolder to None.
#
# If RunAfterSuccessfulUpdate is set to a value other than None then the program
# specified will be run after the imdb list files have been successfully updated.
# This enables, for example, the script to automatically run imdbPy to rebuild
# the database once the imdb list files have been updated.
#
# If a specific downloaded imdb diff file cannot be applied correctly then this script
# will fail as gracefully as possible.
#
# Copyright 2013 (C) Roy Stead
# Released under the terms of the GPL license.
#
import os
import shutil
import subprocess
import re
import datetime
import logging
from datetime import timedelta
from ftplib import FTP
from random import choice
#############################################
# Script configuration #
#############################################
# The local folders where imdb list and diffs files are stored
#
# If ImdbDiffsPath is set to None then a working folder, "diffs" will be created as a sub-folder of ImdbListsPath
# and will be cleaned up afterwards if you also set keepDiffFiles to False
ImdbListsPath = "Z:\\MovieDB\\data\\lists"
ImdbDiffsPath = None
# The path to the logfile, if desired
logfile = 'Z:\\MovieDB\\data\\logs\\update.log'
# Define the system commands to unZip, unTar, Patch and Gzip a file
# Values are substituted into these template strings at runtime, in the order indicated
#
# Note that this script REQUIRES that the program used to apply patches MUST return 0 on success and non-zero on failure
#
unGzip="\"C:/Program Files/7-Zip/7z.exe\" e %s -o%s" # params = archive, destination folder
unTar=unGzip # params = archive, destination folder
applyPatch="\"Z:/MovieDB/Scripts/patch.exe\" --binary --force --silent %s %s" # params = listfile, diffsfile
progGZip="\"Z:/MovieDB/Scripts/gzip.exe\" %s" # param = file to Gzip
# Specify a program to be run after a successful update of the imdb lists,
# such as a command line to execute imdbPy to rebuild the db from the updated imdb list files
#
# Set to None if no such program should be run
RunAfterSuccessfulUpdate="\"Z:\\MovieDB\\Scripts\\Update db from imdb lists.bat\""
# Folder to copy downloaded imdb diff files to once they have been successfully applied
# Note that ONLY diff files which are successfully applied will be backed up.
#
# Set to None if no such folder
diffFilesBackupFolder=None
# Set keepDiffFiles to false if the script is to delete ImdbDiffsPath and all its files when it's finished
#
# If set to False and diffFilesBackupFolder is not None then diff files will be backed up before being deleted
# (and will not be deleted if there's any problem with backing up the diff files)
keepDiffFiles=True
# Possible FTP servers for downloading imdb diff files and the path to the diff files on each server
ImdbDiffsFtpServers = [ \
{'url': "ftp.fu-berlin.de", 'path': "/pub/misc/movies/database/diffs"}, \
# {'url': "ftp.sunet.se", 'path': "/pub/tv+movies/imdb/diffs"}, \ # Swedish server isn't kept up to date
{'url': "ftp.funet.fi", 'path': "/pub/mirrors/ftp.imdb.com/pub/diffs"} ] # Finish server tends to be updated first
#############################################
# Script Code #
#############################################
logger = None
# Returns the date of the most recent Friday
# The returned datetime object contains ONLY date information, all time data is set to zero
def previousFriday(day):
friday = datetime(day.year, day.month, day.day) - timedelta(days=day.weekday()) + timedelta(days=4)
# Saturday and Sunday are a special case since Python's day of the week numbering starts at Monday = 0
# Note that if day falls on a Friday then the "previous friday" for that date is the same date
if day.weekday() <= 4:
friday -= timedelta(weeks=1)
return friday
# Delete all files and subfolders in the specified folder as well as the folder itself
def deleteFolder(folder):
if os.path.isdir(folder):
shutil.rmtree(folder)
if os.path.isdir(folder):
os.rmdir(folder)
# Create folder and as many parent folders are needed to create the full path
# Returns 0 on success or -1 on failure
def mktree(path):
import os.path as os_path
paths_to_create = []
while not os_path.lexists(path):
paths_to_create.insert(0, path)
head,tail = os_path.split(path)
if len(tail.strip())==0: # Just incase path ends with a / or \
path = head
head,tail = os_path.split(path)
path = head
for path in paths_to_create:
try:
os.mkdir(path)
except Exception:
logger.exception("Error trying to create %p" % path)
return -1
return 0
# Downloads and applies all imdb diff files which have not yet been applied to the current imdb lists
def applyDiffs():
global keepDiffFiles, ImdbListsPath, ImdbDiffsPath, diffFilesBackupFolder
global unGzip, unTar, applyPatch, progGZip, RunAfterSuccessfulUpdate, ImdbDiffsFtpServers
if not os.path.exists(ImdbListsPath):
logger.critical("Please edit this script file and set ImdbListsPath to the current location of your imdb list files")
return
# If no ImdbDiffsPath is specified, create a working folder for the diffs file as a sub-folder of the imdb lists repository
if ImdbDiffsPath is None:
ImdbDiffsPath = os.path.join(ImdbListsPath,"diffs")
# Get the date of the most recent Friday (i.e. the most recently released imdb diffs)
# Note Saturday and Sunday are a special case since Python's day of the week numbering starts at Monday = 0
day = datetime.now()
mostrecentfriday = previousFriday(day)
# Now get the date when the imdb list files in ImdbListsPath were most recently updated.
#
# At the end of this loop, day will contain the most recent date that a list file was
# modified (Note: modified, not created, since Windows changes the creation date on file copies)
#
# This approach assumes that since the imdb list files were last downloaded or updated nobody has
# unzipped a compressed list file and then re-zipped it again without updating all of the imdb
# list files at that time (and also that nobody has manualy changed the file last modified dates).
# Which seem like reasonable assumptions.
#
# An even more robust approach would be to look inside each zipfile and read the date/time stamp
# from the first line of the imdb list file itself but that seems like overkill to me.
day = None
for f in os.listdir(ImdbListsPath):
if re.match(r".*\.list\.gz",f) or re.match(r".*\.list",f):
try:
t = os.path.getmtime(os.path.join(ImdbListsPath,f))
d = datetime.fromtimestamp(t)
if day is None:
day = d
elif d > day:
day = d
except Exception as e:
logger.exception("Unable to read last modified date for file %s" % f)
if day is None:
# No diff files found and unable to read imdb list files
logger.critical("Problem: Unable to check imdb lists in folder %s" % ImdbListsPath)
logger.critical("Solutions: Download imdb lists, change ImdbListsPath value in this script or change access settings for that folder.")
return
# Last update date for imdb list files is the Friday before they were downloaded
imdbListsDate = previousFriday(day)
logger.debug("imdb lists updated up to %s" % imdbListsDate)
if imdbListsDate >= mostrecentfriday:
logger.info("imdb database is already up to date")
return
# Create diffs file folder if it does not already exist
if not os.path.isdir(ImdbDiffsPath):
try:
os.mkdir(ImdbDiffsPath)
except Exception as e:
logger.exception("Unable to create folder for imdb diff files (%s)" % ImdbDiffsPath)
return
# Next we check for the imdb diff files and download any which we need to apply but which are not already downloaded
diffFileDate = imdbListsDate
haveFTPConnection = False
while 1:
if diffFileDate >= mostrecentfriday:
break
diff = "diffs-%s.tar.gz" % diffFileDate.strftime("%y%m%d")
diffFilePath = os.path.join(ImdbDiffsPath, diff)
logger.debug("Need diff file %s" % diff)
if not os.path.isfile(diffFilePath):
# diff file is missing so we need to download it so first make sure we have an FTP connection
if not haveFTPConnection:
try:
# Choose a random ftp server from which to download the imdb diff file(s)
ImdbDiffsFtpServer = choice(ImdbDiffsFtpServers)
ImdbDiffsFtp = ImdbDiffsFtpServer['url']
ImdbDiffsFtpPath = ImdbDiffsFtpServer['path']
# Connect to chosen imdb FTP server
ftp = FTP(ImdbDiffsFtp)
ftp.login()
# Change to the diffs folder on the imdb files server
ftp.cwd(ImdbDiffsFtpPath)
haveFTPConnection = True
except Exception as e:
logger.exception("Unable to connect to FTP server %s" % ImdbDiffsFtp)
return
# Now download the diffs file
logger.info("Downloading ftp://%s%s/%s" % ( ImdbDiffsFtp, ImdbDiffsFtpPath, diff ))
diffFile = open(diffFilePath, 'wb')
try:
ftp.retrbinary("RETR " + diff, diffFile.write)
diffFile.close()
except Exception as e:
# Unable to download diff file. This may be because it's not yet available but is due for release today
code, message = e.message.split(' ', 1)
if code == '550' and diffFileDate == imdbListsDate:
logger.info("Diff file %s not yet available on the imdb diffs server: try again later" % diff)
else:
logger.exception("Unable to download %s" % diff)
# Delete the diffs file placeholder since the file did not download
diffFile.close()
os.remove(diffFilePath)
if os.path.isdir(ImdbDiffsPath) and not keepDiffFiles:
os.rmdir(ImdbDiffsPath)
return
logger.info("Successfully downloaded %s" % diffFilePath)
# Check for the following week's diff file
diffFileDate += timedelta(weeks=1)
# Close FTP connection if we used one
if haveFTPConnection:
ftp.close()
# At this point, we know we need to apply one or more diff files and we
# also know that we have all of the diff files which need to be applied
# so next step is to uncompress our existing list files to a folder so
# we can apply diffs to them.
#
# Note that the script will ONLY apply diffs if ALL of the diff files
# needed to bring the imdb lists up to date are available. It will, however,
# partially-update the imdb list files if one of the later files could not
# be applied for any reason but earlier ones were applied ok (see below).
tmpListsPath = os.path.join(ImdbDiffsPath,"lists")
deleteFolder(tmpListsPath)
try:
os.mkdir(tmpListsPath)
except Exception as e:
logger.exception("Unable to create temporary folder for imdb lists")
return
logger.info("Uncompressing imdb list files")
# Uncompress list files in ImdbListsPath to our temporary folder tmpListsPath
numListFiles = 0
for f in os.listdir(ImdbListsPath):
if re.match(r".*\.list\.gz",f):
try:
cmdUnGzip = unGzip % (os.path.join(ImdbListsPath,f), tmpListsPath)
subprocess.call(cmdUnGzip , shell=True)
except Exception as e:
logger.exception("Unable to uncompress imdb list file using: %s" % cmdUnGzip)
numListFiles += 1
if numListFiles == 0:
# Somebody has deleted or moved the list files since we checked their datetime stamps earlier(!)
logger.critical("No imdb list files found in %s." % ImdbListsPath)
return
# Now we loop through the diff files and apply each one in turn to the uncompressed list files
patchedOKWith = None
while 1:
if imdbListsDate >= mostrecentfriday:
break
diff = "diffs-%s.tar.gz" % imdbListsDate.strftime("%y%m%d")
diffFilePath = os.path.join(ImdbDiffsPath, diff)
logger.info("Applying imdb diff file %s" % diff)
# First uncompress the diffs file to a subdirectory.
#
# If that subdirectory already exists, delete any files from it
# in case they are stale and replace them with files from the
# newly-downloaded imdb diff file
tmpDiffsPath = os.path.join(ImdbDiffsPath,"diffs")
deleteFolder(tmpDiffsPath)
os.mkdir(tmpDiffsPath)
# unZip the diffs file to create a file diffs.tar
try:
cmdUnGzip = unGzip % (diffFilePath, tmpDiffsPath)
subprocess.call(cmdUnGzip, shell=True)
except Exception as e:
logger.exception("Unable to unzip imdb diffs file using: %s" % cmdUnGzip)
return
# unTar the file diffs.tar
tarFile = os.path.join(tmpDiffsPath,"diffs.tar")
patchStatus = 0
if os.path.isfile(tarFile):
try:
cmdUnTar = unTar % (tarFile, tmpDiffsPath)
subprocess.call(cmdUnTar, shell=True)
except Exception as e:
logger.exception("Unable to untar imdb diffs file using: %s" % cmdUnTar)
return
# Clean up tar file and the sub-folder which 7z may have (weirdly) created while unTarring it
os.remove(tarFile)
if os.path.exists(os.path.join(tmpDiffsPath,"diffs")):
os.rmdir(os.path.join(tmpDiffsPath,"diffs"))
# Apply all the patch files to the list files in tmpListsPath
isFirstPatchFile = True
for f in os.listdir(tmpDiffsPath):
if re.match(r".*\.list",f):
logger.info("Patching imdb list file %s" % f)
try:
cmdApplyPatch = applyPatch % (os.path.join(tmpListsPath,f), os.path.join(tmpDiffsPath,f))
patchStatus = subprocess.call(cmdApplyPatch, shell=True)
except Exception as e:
logger.exception("Unable to patch imdb list file using: %s" % cmdApplyPatch)
patchStatus=-1
if patchStatus != 0:
# Patch failed so...
logger.critical("Patch status %s: Wrong diff file for these imdb lists (%s)" % (patchStatus, diff))
# Delete the erroneous imdb diff file
os.remove(diffFilePath)
# Clean up temporary diff files
deleteFolder(tmpDiffsPath)
if patchedOKWith is not None and isFirstPatchFile:
# The previous imdb diffs file succeeded and the current diffs file failed with the
# first attempted patch, so we can keep our updated list files up to this point
logger.warning("Patched OK up to and including imdb diff file %s ONLY" % patchedOKWith)
break
else:
# We've not managed to successfully apply any imdb diff files and this was not the
# first patch attempt from a diff file from this imdb diffs file so we cannot rely
# on the updated imdb lists being accurate, in which case delete them and abandon
logger.critical("Abandoning update: original imdb lists are unchanged")
deleteFolder(tmpListsPath)
return
# Reset isFirstPatchFile flag since we have successfully
# applied at least one patch file from this imdb diffs file
isFirstPatchFile = False
# Clean up the imdb diff files and their temporary folder
deleteFolder(tmpDiffsPath)
# Note the imdb patch file which was successfully applied, if any
if patchStatus == 0:
patchedOKWith = diff
# Backup successfully-applied diff file if required
if diffFilesBackupFolder is not None:
# Create diff files backup folder if it does not already exist
if not os.path.isdir(diffFilesBackupFolder):
if mktree(diffFilesBackupFolder) == -1:
if not keepDiffFiles:
keepDiffFiles = True
logger.warning("diff files will NOT be deleted but may be backed up manually")
# Backup this imdb diff file to the backup folder if that folder exists and this diff file doesn't already exist there
if os.path.isdir(diffFilesBackupFolder):
if not os.path.isfile(os.path.join(diffFilesBackupFolder,diff)):
try:
shutil.copy(diffFilePath,diffFilesBackupFolder)
except Exception as e:
logger.exception("Unable to copy %s to backup folder %s" % (diffFilePath, diffFilesBackupFolder))
if not keepDiffFiles:
keepDiffFiles = True
logger.warning("diff files will NOT be deleted but may be backed up manually")
# Clean up imdb diff file if required
if not keepDiffFiles:
if os.path.isfile(diffFilePath):
os.remove(diffFilePath)
# Next we apply the following week's imdb diff files
imdbListsDate += timedelta(weeks=1)
# List files are all updated so re-Gzip them up and delete the old list files
for f in os.listdir(tmpListsPath):
if re.match(r".*\.list",f):
try:
cmdGZip = progGZip % os.path.join(tmpListsPath,f)
subprocess.call(cmdGZip, shell=True)
except Exception as e:
logger.exception("Unable to Gzip imdb list file using: %s" % cmdGZip)
break
if os.path.isfile(os.path.join(tmpListsPath,f)):
os.remove(os.path.join(tmpListsPath,f))
# Now move the updated and compressed lists to the main lists folder, replacing the old list files
for f in os.listdir(tmpListsPath):
if re.match(r".*\.list.gz",f):
# Delete the original compressed list file from ImdbListsPath if it exists
if os.path.isfile(os.path.join(ImdbListsPath,f)):
os.remove(os.path.join(ImdbListsPath,f))
# Move the updated compressed list file to ImdbListsPath
os.rename(os.path.join(tmpListsPath,f),os.path.join(ImdbListsPath,f))
# Clean up the now-empty tmpListsPath temporary folder and anything left inside it
deleteFolder(tmpListsPath)
# Clean up imdb diff files if required
# Note that this rmdir call will delete the folder only if it is empty. So if that folder was created, used and all
# diff files deleted (possibly after being backed up) above then it should now be empty and will be removed.
#
# However, if the folder previously existed and contained some old diff files then those diff files will not be deleted.
# To delete the folder and ALL of its contents regardless, replace os.rmdir() with a deleteFolder() call
if not keepDiffFiles:
os.rmdir(ImdbDiffsPath)
# deleteFolder(ImdbDiffsPath)
# If the imdb lists were successfully updated, even partially, then run my
# DOS batch file "Update db from imdb lists.bat" to rebuild the imdbPy database
# and relink and reintegrate my shadow tables data into it
if patchedOKWith is not None:
logger.info("imdb lists are updated up to imdb diffs file %s" % patchedOKWith)
if RunAfterSuccessfulUpdate is not None:
logger.info("Now running %s" % RunAfterSuccessfulUpdate)
subprocess.call(RunAfterSuccessfulUpdate, shell=True)
# Set up logging
def initLogging(loggerName, logfilename):
global logger
logger = logging.getLogger(loggerName)
logger.setLevel(logging.DEBUG)
# Logger for file, if logfilename supplied
if logfilename is not None:
fh = logging.FileHandler(logfilename)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter('%(name)s %(levelname)s %(asctime)s %(message)s\t\t\t[%(module)s line %(lineno)d: %(funcName)s%(args)s]', datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(fh)
# Logger for stdout
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
initLogging('__applydiffs__', logfile)
applyDiffs()
| 22,496 | Python | .py | 412 | 44.800971 | 177 | 0.648431 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,142 | conftest.py | cinemagoer_cinemagoer/tests/conftest.py | from pytest import fixture
import logging
import os
import time
from imdb import Cinemagoer
from imdb.parser.http import IMDbURLopener
logging.raiseExceptions = False
DELAY = 0
cache_dir = os.path.join(os.path.dirname(__file__), '.cache')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
retrieve_unicode_orig = IMDbURLopener.retrieve_unicode
def retrieve_unicode_cached(self, url, size=-1):
print(url)
key = "_".join(url.split("/")[3:])
cache_file = os.path.join(cache_dir, key)
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
content = f.read()
else:
time.sleep(DELAY)
content = retrieve_unicode_orig(self, url, size=size)
with open(cache_file, 'w') as f:
f.write(content)
return content
s3_uri = os.getenv('CINEMAGOER_S3_URI')
@fixture(params=['http'] + (['s3'] if s3_uri is not None else []))
def ia(request):
"""Access to IMDb data."""
if request.param == 'http':
IMDbURLopener.retrieve_unicode = retrieve_unicode_cached
yield Cinemagoer('http')
IMDbURLopener.retrieve_unicode = retrieve_unicode_orig
elif request.param == 's3':
yield Cinemagoer('s3', uri=s3_uri)
| 1,235 | Python | .py | 35 | 30.228571 | 66 | 0.675379 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,143 | test_http_movie_sites.py | cinemagoer_cinemagoer/tests/test_http_movie_sites.py | def test_movie_official_sites_should_be_a_list(ia):
movie = ia.get_movie('0133093', info=['official sites']) # Matrix
official_sites = movie.get('official sites', [])
assert len(official_sites) == 3
def test_movie_official_sites_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['official sites']) # Ates Parcasi
assert 'official sites' not in movie
def test_movie_sound_clips_should_be_a_list(ia):
movie = ia.get_movie('0133093', info=['official sites']) # Matrix
sound_clips = movie.get('sound clips', [])
assert len(sound_clips) > 1 and len(sound_clips) < 5
def test_movie_sound_clips_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['official sites']) # Ates Parcasi
assert 'sound clips' not in movie
| 804 | Python | .py | 14 | 53.142857 | 78 | 0.6875 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,144 | test_http_person_bio.py | cinemagoer_cinemagoer/tests/test_http_person_bio.py | from pytest import mark
import re
def test_person_headshot_should_be_an_image_link(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert re.match(r'^https?://.*\.jpg$', person['headshot'])
def test_person_full_size_headshot_should_be_an_image_link(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert re.match(r'^https?://.*\.jpg$', person['full-size headshot'])
def test_person_headshot_if_none_should_be_excluded(ia):
person = ia.get_person('0330139', info=['biography']) # Deni Gordon
assert 'headshot' not in person
def test_person_bio_is_present(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert 'mini biography' in person
def test_person_birth_date_should_be_in_ymd_format(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person.get('birth date') == '1899-05-10'
def test_person_birth_date_without_month_and_date_should_be_in_y00_format(ia):
person = ia.get_person('0565883', info=['biography']) # Belinda McClory
assert person.get('birth date') == '1968-00-00'
def test_person_birth_date_without_itemprop_should_be_in_ymd_format(ia):
person = ia.get_person('0000007', info=['biography']) # Humphrey Bogart
assert person.get('birth date') == '1899-12-25'
def test_person_birth_notes_should_contain_birth_place(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person.get('birth notes') == 'Omaha, Nebraska, USA'
def test_person_death_date_should_be_in_ymd_format(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person.get('death date') == '1987-06-22'
def test_person_death_date_without_itemprop_should_be_in_ymd_format(ia):
person = ia.get_person('0000007', info=['biography']) # Humphrey Bogart
assert person.get('death date') == '1957-01-14'
def test_person_death_date_if_none_should_be_excluded(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert 'death date' not in person
def test_person_death_notes_should_contain_death_place_and_reason(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person['death notes'] == 'in Los Angeles, California, USA (pneumonia)'
def test_person_death_notes_if_none_should_be_excluded(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert 'death notes' not in person
def test_person_birth_name_should_be_normalized(ia):
data = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert data.get('birth name') == 'Julia Fiona Roberts'
def test_person_nicknames_if_single_should_be_a_list_of_names(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert person.get('nick names') == ['Jules']
def test_person_nicknames_if_multiple_should_be_a_list_of_names(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert person.get('nick names') == ['The Wall', 'The One']
def test_person_height_should_be_in_inches_and_meters(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert person.get('height') == '5\' 8" (1.73 m)'
def test_person_height_if_none_should_be_excluded(ia):
person = ia.get_person('0617588', info=['biography']) # Georges Melies
assert 'height' not in person
@mark.skip("FIXME: biography page change: from 'Spouses' it's now 'Family > Spouse")
def test_person_spouse_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
spouses = person.get('spouse', [])
assert len(spouses) == 2
def test_person_trade_mark_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
trade_mark = person.get('trade mark', [])
assert len(trade_mark) == 3
def test_person_trivia_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
trivia = person.get('trivia', [])
assert len(trivia) > 90
def test_person_quotes_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
quotes = person.get('quotes', [])
assert len(quotes) > 30
def test_person_salary_history_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
salary = person.get('salary history', [])
assert len(salary) > 25
| 4,551 | Python | .py | 77 | 54.844156 | 84 | 0.690535 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,145 | test_http_chart_bottom.py | cinemagoer_cinemagoer/tests/test_http_chart_bottom.py | def test_bottom_chart_should_contain_100_entries(ia):
chart = ia.get_bottom100_movies()
assert len(chart) == 100
def test_bottom_chart_entries_should_have_rank(ia):
movies = ia.get_bottom100_movies()
for rank, movie in enumerate(movies):
assert movie['bottom 100 rank'] == rank + 1
def test_bottom_chart_entries_should_have_movie_id(ia):
movies = ia.get_bottom100_movies()
for movie in movies:
assert movie.movieID.isdigit()
def test_bottom_chart_entries_should_have_title(ia):
movies = ia.get_bottom100_movies()
for movie in movies:
assert 'title' in movie
def test_bottom_chart_entries_should_be_movies(ia):
movies = ia.get_bottom100_movies()
for movie in movies:
assert movie['kind'] == 'movie'
def test_bottom_chart_entries_should_have_year(ia):
movies = ia.get_bottom100_movies()
for movie in movies:
assert isinstance(movie['year'], int)
def test_bottom_chart_entries_should_have_low_ratings(ia):
movies = ia.get_bottom100_movies()
for movie in movies:
assert movie['rating'] < 5.0
def test_bottom_chart_entries_should_have_minimal_number_of_votes(ia):
movies = ia.get_bottom100_movies()
for movie in movies:
assert movie['votes'] >= 1500 # limit stated by IMDb
| 1,303 | Python | .py | 31 | 36.709677 | 70 | 0.700318 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,146 | test_http_movie_season_episodes.py | cinemagoer_cinemagoer/tests/test_http_movie_season_episodes.py | from pytest import mark
def test_series_episodes_should_be_a_map_of_seasons_and_episodes(ia):
movie = ia.get_movie('0412142', info=['episodes']) # House M.D.
assert list(sorted(movie.get('episodes'))) == list(range(1, 9))
def test_series_episodes_with_unknown_season_should_have_placeholder_at_end(ia):
movie = ia.get_movie('0436992', info=['episodes']) # Doctor Who
assert list(movie.get("episodes").keys()) == list(range(1, 16)) + ["Unknown"]
@mark.skip('episodes is {} instead of None')
def test_series_episodes_if_none_should_be_excluded(ia):
movie = ia.get_movie('1000252', info=['episodes']) # Doctor Who - Blink
assert 'episodes' not in movie
def test_series_episodes_should_contain_rating(ia):
movie = ia.get_movie('0185906', info=['episodes']) # Band of Brothers
episodes = movie.get('episodes')
rating = episodes[1][1]['rating']
assert 8.3 <= rating <= 9.0
@mark.skip('exact vote count only in JSON')
def test_series_episodes_should_contain_votes(ia):
movie = ia.get_movie('0185906', info=['episodes']) # Band of Brothers
episodes = movie.get('episodes')
votes = episodes[1][1]['votes']
assert votes > 4400
def test_update_series_seasons_single_int(ia):
movie = ia.get_movie('0264235') # Curb Your Enthusiasm
ia.update_series_seasons(movie, season_nums=10)
assert 'episodes' in movie
assert list(movie['episodes']) == [10]
def test_update_series_seasons_range(ia):
movie = ia.get_movie('0264235') # Curb Your Enthusiasm
ia.update_series_seasons(movie, season_nums=range(3, 10))
assert 'episodes' in movie
assert list(sorted(movie['episodes'])) == list(range(3, 10))
def test_update_series_seasons_list(ia):
movie = ia.get_movie('0264235') # Curb Your Enthusiasm
ia.update_series_seasons(movie, season_nums=[1, 3, 5])
assert 'episodes' in movie
assert list(sorted(movie['episodes'])) == [1, 3, 5]
def test_update_series_seasons_tuple(ia):
movie = ia.get_movie('0264235') # Curb Your Enthusiasm
ia.update_series_seasons(movie, season_nums=(1, 3, 5))
assert 'episodes' in movie
assert list(sorted(movie['episodes'])) == [1, 3, 5]
def test_update_series_seasons_set(ia):
movie = ia.get_movie('0264235') # Curb Your Enthusiasm
ia.update_series_seasons(movie, season_nums={1, 3, 5})
assert 'episodes' in movie
assert list(sorted(movie['episodes'])) == [1, 3, 5]
def test_update_series_seasons_iterable(ia):
movie = ia.get_movie('0264235') # Curb Your Enthusiasm
ia.update_series_seasons(movie, season_nums=(i for i in range(6) if i % 2))
assert 'episodes' in movie
assert list(sorted(movie['episodes'])) == [1, 3, 5]
def test_update_series_seasons_less_season_available(ia):
movie = ia.get_movie('0185906') # Band of Brothers
# Only 1 season but request 9
ia.update_series_seasons(movie, season_nums=range(1, 10))
assert 'episodes' in movie
assert list(movie['episodes']) == [1]
| 3,172 | Python | .py | 58 | 50.310345 | 82 | 0.648867 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,147 | test_http_gather_refs.py | cinemagoer_cinemagoer/tests/test_http_gather_refs.py | def test_references_to_titles_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
titles_refs = person.get_titlesRefs()
assert 70 < len(titles_refs) < 100
def test_references_to_names_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
names_refs = person.get_namesRefs()
assert 100 < len(names_refs) < 150
| 416 | Python | .py | 8 | 47.75 | 75 | 0.692118 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,148 | test_http_search_movie_keyword.py | cinemagoer_cinemagoer/tests/test_http_search_movie_keyword.py | def test_get_keyword_should_list_correct_number_of_movies(ia):
movies = ia.get_keyword('colander')
assert 5 <= len(movies) <= 10
def test_get_keyword_if_too_many_should_list_upper_limit_of_movies(ia):
movies = ia.get_keyword('computer')
assert len(movies) == 50
def test_get_keyword_entries_should_include_movie_id(ia):
movies = ia.get_keyword('colander')
assert movies[0].movieID == '0382932'
def test_get_keyword_entries_should_include_movie_title(ia):
movies = ia.get_keyword('colander')
assert movies[0]['title'] == 'Ratatouille'
def test_get_keyword_entries_should_include_movie_kind(ia):
movies = ia.get_keyword('colander')
assert movies[0]['kind'] == 'movie'
def test_get_keyword_entries_should_include_movie_year(ia):
movies = ia.get_keyword('colander')
assert movies[0]['year'] == 2007
| 853 | Python | .py | 18 | 43.166667 | 71 | 0.711515 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,149 | test_http_movie_goofs.py | cinemagoer_cinemagoer/tests/test_http_movie_goofs.py | def test_movie_goofs(ia):
movie = ia.get_movie('0133093', info=['goofs'])
goofs = movie.get('goofs', [])
assert len(goofs) > 120
| 141 | Python | .py | 4 | 31.25 | 51 | 0.620438 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,150 | test_http_search_movie.py | cinemagoer_cinemagoer/tests/test_http_search_movie.py | # -*- coding: utf-8 -*-
from pytest import mark
def test_search_movie_if_single_should_list_one_movie(ia):
movies = ia.search_movie('od instituta do proizvodnje')
assert len(movies) == 1
assert movies[0].movieID == '0483758'
assert movies[0]['kind'] == 'short'
assert movies[0]['title'] == 'Od instituta do proizvodnje'
assert movies[0]['year'] == 1971
def test_search_movie_should_list_default_number_of_movies(ia):
movies = ia.search_movie('movie')
assert len(movies) == 20
@mark.skip(reason="number of results limit is not honored anymore")
def test_search_movie_limited_should_list_requested_number_of_movies(ia):
movies = ia.search_movie('ace in the hole', results=98)
assert len(movies) == 98
def test_search_movie_unlimited_should_list_correct_number_of_movies(ia):
movies = ia.search_movie('ace in the hole', results=500)
assert len(movies) == 25
def test_search_movie_if_too_many_result_should_list_upper_limit_of_movies(ia):
movies = ia.search_movie('matrix', results=500)
assert len(movies) == 25
def test_search_movie_if_none_should_be_empty(ia):
movies = ia.search_movie('ᚣ', results=500)
assert movies == []
def test_search_movie_entries_should_include_movie_id(ia):
movies = ia.search_movie('matrix')
assert movies[0].movieID == '0133093'
def test_search_movie_entries_should_include_movie_title(ia):
movies = ia.search_movie('matrix')
assert movies[0]['title'] == 'The Matrix'
def test_search_movie_entries_should_include_cover_url_if_available(ia):
movies = ia.search_movie('matrix')
assert 'cover url' in movies[0]
def test_search_movie_entries_should_include_movie_kind(ia):
movies = ia.search_movie('matrix')
assert movies[0]['kind'] == 'movie'
def test_search_movie_entries_should_include_movie_kind_if_other_than_movie(ia):
movies = ia.search_movie('matrix')
tv_series = [m for m in movies if m.movieID == '0106062']
assert len(tv_series) == 1
assert tv_series[0]['kind'] == 'tv series'
def test_search_movie_entries_should_include_movie_year(ia):
movies = ia.search_movie('matrix')
assert movies[0]['year'] == 1999
@mark.skip(reason="index is no longer shown on search results")
def test_search_movie_entries_should_include_imdb_index(ia):
movies = ia.search_movie('blink')
movie_with_index = [m for m in movies if m.movieID == '6544524']
assert len(movie_with_index) == 1
assert movie_with_index[0]['imdbIndex'] == 'XXIV'
def test_search_movie_entries_missing_imdb_index_should_be_excluded(ia):
movies = ia.search_movie('matrix')
assert 'imdbIndex' not in movies[0]
@mark.skip(reason="AKAs are no longer shown on search results")
def test_search_movie_entries_should_include_akas(ia):
movies = ia.search_movie('Una calibro 20 per lo specialista')
movie_with_aka = [m for m in movies if m.movieID == '0072288']
assert len(movie_with_aka) == 1
assert movie_with_aka[0]['akas'] == ['Una calibro 20 per lo specialista']
def test_search_movie_entries_missing_akas_should_be_excluded(ia):
movies = ia.search_movie('matrix')
assert 'akas' not in movies[0]
@mark.skip(reason="episode title are no longer shown on search results")
def test_search_movie_episodes_should_include_season_and_number(ia):
movies = ia.search_movie('swarley') # How I Met Your Mother S02E07
movie_with_season_and_episode = [m for m in movies if m.movieID == '0875360']
assert len(movie_with_season_and_episode) == 1
assert movie_with_season_and_episode[0]['season'] == 2
assert movie_with_season_and_episode[0]['episode'] == 7
def test_search_movie_entries_tv_mini_series_should_have_correct_kind(ia):
movies = ia.search_movie('capture 2019') # The Capture (2019)
miniseries = [m for m in movies if m.movieID == '8201186']
assert len(miniseries) == 1
miniseries[0]['kind'] == 'tv mini series'
| 3,928 | Python | .py | 75 | 48.173333 | 81 | 0.70972 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,151 | test_http_search_movie_advanced.py | cinemagoer_cinemagoer/tests/test_http_search_movie_advanced.py | from pytest import mark
import sys
def test_search_results_should_include_correct_number_of_works_by_default(ia):
movies = ia.search_movie_advanced('matrix')
assert len(movies) == 20
def test_search_results_should_include_correct_number_of_works(ia):
movies = ia.search_movie_advanced('matrix', results=250)
assert len(movies) > 220
def test_search_results_should_include_correct_number_of_works_if_asked_less_than_available(ia):
movies = ia.search_movie_advanced('matrix', results=25)
assert len(movies) == 25
def test_found_movies_should_have_movie_ids(ia):
movies = ia.search_movie_advanced('matrix', results=50)
assert all(isinstance(m.movieID, str) for m in movies)
def test_found_movies_should_have_titles(ia):
movies = ia.search_movie_advanced('matrix', results=50)
assert all(isinstance(m['title'], (str, unicode) if sys.version_info < (3,) else str) for m in movies)
def test_selected_movie_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['kind'] == 'movie'
def test_selected_video_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0295432'][0]
assert selected['kind'] == 'video movie'
def test_selected_tv_movie_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('Sharknado', results=250)
selected = [m for m in movies if m.movieID == '2724064'][0]
assert selected['kind'] == 'tv movie'
def test_selected_tv_short_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0274085'][0]
assert selected['kind'] == 'tv short movie'
@mark.skip('apparently we can no longer tell a series from a movie, in search results')
def test_selected_tv_series_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0106062'][0]
assert selected['kind'] == 'tv series'
def test_selected_ended_tv_series_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0364888'][0]
assert selected['kind'] == 'tv series'
def test_selected_tv_episode_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594932'][0]
assert selected['kind'] == 'episode'
def test_selected_tv_special_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '1025014'][0]
assert selected['kind'] == 'tv special'
def test_selected_video_game_should_have_correct_kind(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0277828'][0]
assert selected['kind'] == 'video game'
def test_selected_movie_should_have_correct_year(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['year'] == 1999
def test_selected_ended_tv_series_should_have_correct_series_years(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0364888'][0]
assert selected['series years'] == '2003-2004'
@mark.skip('skipped until we found another announced title')
def test_selected_unreleased_movie_should_have_correct_state(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '9839912'][0]
assert selected['state'] == 'Announced'
def test_selected_movie_should_have_correct_certificate(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['certificates'] == ['R']
def test_selected_movie_should_have_correct_runtime(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['runtimes'] == ['136']
def test_selected_movie_should_have_correct_genres(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['genres'] == ['Action', 'Sci-Fi']
def test_selected_movie_should_have_correct_rating(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert abs(selected['rating'] - 8.7) < 0.5
def test_selected_movie_should_have_correct_number_of_votes(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['votes'] >= 1513744
def test_selected_movie_should_have_correct_metascore(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert abs(selected['metascore'] - 73) < 5
def test_selected_movie_should_have_correct_gross(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['gross'] >= 171479930
def test_selected_movie_should_have_correct_plot(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['plot'].startswith('When a beautiful stranger')
def test_selected_movie_should_have_correct_director_imdb_ids(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '10838180'][0]
assert [p.personID for p in selected['directors']] == ['0905154']
def test_selected_work_should_have_correct_director_name(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '10838180'][0]
assert [p['name'] for p in selected['directors']] == ['Lana Wachowski']
def test_selected_work_should_have_correct_director_imdb_ids_if_multiple(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert [p.personID for p in selected['directors']] == ['0905154', '0905152']
def test_selected_work_should_have_correct_director_names_if_multiple(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert [p['name'] for p in selected['directors']] == ['Lana Wachowski', 'Lilly Wachowski']
def test_selected_work_should_have_correct_cast_imdb_id(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '11749868'][0]
assert [p.personID for p in selected['cast']] == ['4216365']
def test_selected_work_should_have_correct_cast_name(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '11749868'][0]
assert [p['name'] for p in selected['cast']] == ['Chris Harvey']
def test_selected_work_should_have_correct_cast_imdb_ids_if_multiple(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert [p.personID for p in selected['cast']] == ['0000206', '0000401', '0005251', '0915989']
def test_selected_work_should_have_correct_cast_names_if_multiple(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert [p['name'] for p in selected['cast']] == [
'Keanu Reeves',
'Laurence Fishburne',
'Carrie-Anne Moss',
'Hugo Weaving'
]
def test_selected_tv_episode_should_have_correct_title(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['title'] == "The Making of 'The Matrix'"
def test_selected_tv_episode_should_have_correct_year(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['year'] == 1999
def test_selected_tv_episode_should_have_correct_imdb_index(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '1072112'][0]
assert selected['imdbIndex'] == 'I'
def test_selected_tv_episode_should_have_correct_certificate(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '1072112'][0]
assert selected['certificates'] == ['TV-PG']
def test_selected_tv_episode_should_have_correct_runtime(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['runtimes'] == ['26']
def test_selected_tv_episode_should_have_correct_genres(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['genres'] == ['Documentary', 'Short']
def test_selected_tv_episode_should_have_correct_rating(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert abs(selected['rating'] - 7.6) < 0.5
def test_selected_tv_episode_should_have_correct_number_of_votes(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['votes'] >= 14
def test_selected_tv_episode_should_have_correct_plot(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '10177094'][0]
assert selected['plot'].startswith('Roberto Leoni reviews The Matrix (1999)')
def test_selected_tv_episode_should_have_correct_director_imdb_ids(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert [p.personID for p in selected['directors']] == ['0649609']
def test_selected_tv_episode_should_have_correct_director_names(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert [p['name'] for p in selected['directors']] == ['Josh Oreck']
def test_selected_tv_episode_should_have_correct_cast_imdb_ids(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert [p.personID for p in selected['cast']] == ['0000401', '0300665', '0303293', '0005251']
def test_selected_tv_episode_should_have_correct_cast_names(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert [p['name'] for p in selected['cast']] == [
'Laurence Fishburne',
'John Gaeta',
"Robert 'Rock' Galotti",
'Carrie-Anne Moss'
]
def test_selected_tv_episode_should_have_series(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['episode of']['kind'] == 'tv series'
def test_selected_tv_episode_should_have_correct_series_imdb_id(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['episode of'].movieID == '0318220'
def test_selected_tv_episode_should_have_correct_series_title(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '0594933'][0]
assert selected['episode of']['title'] == 'HBO First Look'
def test_selected_tv_episode_should_have_correct_series_year(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '1072112'][0]
assert selected['episode of']['year'] == 2001
def test_selected_tv_episode_should_have_correct_series_series_years(ia):
movies = ia.search_movie_advanced('matrix', results=250)
selected = [m for m in movies if m.movieID == '1072112'][0]
assert selected['episode of']['series years'] == '2001-2012'
def test_selected_movie_should_have_cover_url(ia):
movies = ia.search_movie_advanced('matrix', results=50)
selected = [m for m in movies if m.movieID == '0133093'][0]
assert selected['cover url'].endswith('.jpg')
def test_search_results_should_include_adult_titles_if_requested(ia):
movies = ia.search_movie_advanced('castello', adult=True, results=250)
movies_no_adult = ia.search_movie_advanced('castello', adult=False, results=250)
assert len(movies) > len(movies_no_adult)
def test_selected_adult_movie_should_have_correct_title(ia):
movies = ia.search_movie_advanced('matrix', adult=True, results=250)
selected = [m for m in movies if m.movieID == '0273126'][0]
assert selected['title'] == 'Blue Matrix'
def test_selected_adult_movie_should_have_adult_in_genres(ia):
movies = ia.search_movie_advanced('matrix', adult=True, results=250)
selected = [m for m in movies if m.movieID == '0273126'][0]
assert 'Adult' in selected['genres']
@mark.skip('IMDb sorting works in misterious ways')
def test_search_results_should_be_sortable_in_alphabetical_order_default_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='alpha')
titles = [m['title'] for m in movies]
# assert all(a <= b for a, b in zip(titles, titles[1:])) # fails due to IMDb
assert sum(1 if a > b else 0 for a, b in zip(titles, titles[1:])) <= 1
@mark.skip('IMDb sorting works in misterious ways')
def test_search_results_should_be_sortable_in_alphabetical_order_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='alpha', sort_dir='asc')
titles = [m['title'] for m in movies]
# assert all(a <= b for a, b in zip(titles, titles[1:])) # fails due to IMDb
assert sum(1 if a > b else 0 for a, b in zip(titles, titles[1:])) <= 1
@mark.skip('IMDb sorting works in misterious ways')
def test_search_results_should_be_sortable_in_alphabetical_order_descending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='alpha', sort_dir='desc')
titles = [m['title'] for m in movies]
assert all(a >= b for a, b in zip(titles, titles[1:]))
def test_search_results_should_be_sortable_in_rating_order_default_descending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='user_rating')
ratings = [m.get('rating', 0) for m in movies]
assert all(a >= b for a, b in zip(ratings, ratings[1:]))
def test_search_results_should_be_sortable_in_rating_order_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='user_rating', sort_dir='asc')
ratings = [m.get('rating', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(ratings, ratings[1:]))
def test_search_results_should_be_sortable_in_rating_order_descending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='user_rating', sort_dir='desc')
ratings = [m.get('rating', 0) for m in movies]
assert all(a >= b for a, b in zip(ratings, ratings[1:]))
def test_search_results_should_be_sortable_in_votes_order_default_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='num_votes')
votes = [m.get('votes', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(votes, votes[1:]))
def test_search_results_should_be_sortable_in_votes_order_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='num_votes', sort_dir='asc')
votes = [m.get('votes', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(votes, votes[1:]))
def test_search_results_should_be_sortable_in_votes_order_descending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='num_votes', sort_dir='desc')
votes = [m.get('votes', 0) for m in movies]
assert all(a >= b for a, b in zip(votes, votes[1:]))
def test_search_results_should_be_sortable_in_gross_order_default_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='boxoffice_gross_us')
grosses = [m.get('gross', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(grosses, grosses[1:]))
def test_search_results_should_be_sortable_in_gross_order_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='boxoffice_gross_us', sort_dir='asc')
grosses = [m.get('gross', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(grosses, grosses[1:]))
def test_search_results_should_be_sortable_in_gross_order_descending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='boxoffice_gross_us', sort_dir='desc')
grosses = [m.get('gross', 0) for m in movies]
assert all(a >= b for a, b in zip(grosses, grosses[1:]))
def test_search_results_should_be_sortable_in_runtime_order_default_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='runtime')
runtimes = [m.get('runtime', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(runtimes, runtimes[1:]))
def test_search_results_should_be_sortable_in_runtime_order_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='runtime', sort_dir='asc')
runtimes = [int(m.get('runtimes', [float('inf')])[0]) for m in movies]
assert all(a <= b for a, b in zip(runtimes, runtimes[1:]))
def test_search_results_should_be_sortable_in_runtime_order_descending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='runtime', sort_dir='desc')
runtimes = [int(m.get('runtimes', [float('inf')])[0]) for m in movies]
assert all(a >= b for a, b in zip(runtimes, runtimes[1:]))
def test_search_results_should_be_sortable_in_year_order_default_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='year')
years = [m.get('year', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(years, years[1:]))
def test_search_results_should_be_sortable_in_year_order_ascending(ia):
movies = ia.search_movie_advanced(title='matrix', sort='year', sort_dir='asc')
years = [m.get('year', float('inf')) for m in movies]
assert all(a <= b for a, b in zip(years, years[1:]))
# def test_search_results_should_be_sortable_in_year_order_descending(ia):
# movies = ia.search_movie_advanced(title='matrix', sort='year', sort_dir='desc')
# years = [m.get('year', float('inf')) for m in movies]
# assert all(a >= b for a, b in zip(years, years[1:]))
| 18,937 | Python | .py | 306 | 57.385621 | 106 | 0.695899 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,152 | test_http_movie_taglines.py | cinemagoer_cinemagoer/tests/test_http_movie_taglines.py | def test_movie_taglines_if_single_should_be_a_list_of_phrases(ia):
movie = ia.get_movie('0109151', info=['taglines']) # Matrix (V)
taglines = movie.get('taglines', [])
assert taglines == ["If humans don't want me... why'd they create me?"]
def test_movie_taglines_if_multiple_should_be_a_list_of_phrases(ia):
movie = ia.get_movie('0060666', info=['taglines']) # Manos
taglines = movie.get('taglines', [])
assert len(taglines) == 3
assert taglines[0] == "It's Shocking! It's Beyond Your Imagination!"
def test_movie_taglines_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['taglines']) # Ates Parcasi
assert 'taglines' not in movie
| 696 | Python | .py | 12 | 53.666667 | 75 | 0.680882 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,153 | test_http_movie_tech.py | cinemagoer_cinemagoer/tests/test_http_movie_tech.py | def test_movie_tech_sections(ia):
movie = ia.get_movie('0133093', info=['technical'])
tech = movie.get('tech', [])
assert set(tech.keys()) == set(['sound mix', 'color', 'aspect ratio', 'camera',
'laboratory', 'cinematographic process', 'printed film format',
'negative format', 'runtime', 'film length'])
| 389 | Python | .py | 6 | 49.833333 | 99 | 0.54047 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,154 | test_http_chart_top.py | cinemagoer_cinemagoer/tests/test_http_chart_top.py | def test_top_chart_should_contain_250_entries(ia):
chart = ia.get_top250_movies()
assert len(chart) == 250
def test_top_chart_entries_should_have_rank(ia):
movies = ia.get_top250_movies()
for rank, movie in enumerate(movies):
assert movie['top 250 rank'] == rank + 1
def test_top_chart_entries_should_have_movie_id(ia):
movies = ia.get_top250_movies()
for movie in movies:
assert movie.movieID.isdigit()
def test_top_chart_entries_should_have_title(ia):
movies = ia.get_top250_movies()
for movie in movies:
assert 'title' in movie
def test_top_chart_entries_should_be_movies(ia):
movies = ia.get_top250_movies()
for movie in movies:
assert movie['kind'] == 'movie'
def test_top_chart_entries_should_have_year(ia):
movies = ia.get_top250_movies()
for movie in movies:
assert isinstance(movie['year'], int)
def test_top_chart_entries_should_have_high_ratings(ia):
movies = ia.get_top250_movies()
for movie in movies:
assert movie['rating'] > 7.5
def test_top_chart_entries_should_have_minimal_number_of_votes(ia):
movies = ia.get_top250_movies()
for movie in movies:
assert movie['votes'] >= 25000 # limit stated by IMDb
| 1,254 | Python | .py | 31 | 35.129032 | 67 | 0.688172 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,155 | test_http_movie_quotes.py | cinemagoer_cinemagoer/tests/test_http_movie_quotes.py | def test_movie_quotes(ia):
movie = ia.get_movie('0133093', info=['quotes'])
quotes = movie.get('quotes', [])
assert len(quotes) > 100
| 146 | Python | .py | 4 | 32.5 | 52 | 0.633803 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,156 | test_http_chart_popular_movies.py | cinemagoer_cinemagoer/tests/test_http_chart_popular_movies.py | def test_popular_movies_chart_should_contain_100_entries(ia):
chart = ia.get_popular100_movies()
assert len(chart) == 100
def test_popular_movies_chart_entries_should_have_rank(ia):
movies = ia.get_popular100_movies()
for rank, movie in enumerate(movies):
assert movie['popular movies 100 rank'] == rank + 1
def test_popular_movies_chart_entries_should_have_movie_id(ia):
movies = ia.get_popular100_movies()
for movie in movies:
assert movie.movieID.isdigit()
def test_popular_movies_chart_entries_should_have_title(ia):
movies = ia.get_popular100_movies()
for movie in movies:
assert 'title' in movie
def test_popular_movies_chart_entries_should_be_movies(ia):
movies = ia.get_popular100_movies()
for movie in movies:
assert movie['kind'] == 'movie'
| 831 | Python | .py | 19 | 38.526316 | 63 | 0.712687 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,157 | test_http_movie_full_credit.py | cinemagoer_cinemagoer/tests/test_http_movie_full_credit.py | def test_movie_full_credits(ia):
movie = ia.get_movie('0133093', info=['full credits']) # Matrix
assert 'cast' in movie
lcast = len(movie['cast'])
assert lcast > 38 and lcast < 42
def test_movie_full_credits_for_tv_show(ia):
movie = ia.get_movie('0098904', info=['full credits']) # Seinfeld
assert 'cast' in movie
assert len(movie['cast']) > 1300 and len(movie['cast']) < 1350
def test_movie_full_credits_contains_headshot(ia):
movie = ia.get_movie('0133093', info=['main', 'full credits']) # Matrix
assert 'headshot' in movie['cast'][0] # Keanu Reeves
assert 'nopicture' not in movie['cast'][0]['headshot'] # is real headshot, not default
| 689 | Python | .py | 13 | 48.615385 | 91 | 0.668155 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,158 | test_http_chart_boxoffice.py | cinemagoer_cinemagoer/tests/test_http_chart_boxoffice.py | def test_boxoffice_movies_must_return_10_results(ia):
chart = ia.get_boxoffice_movies()
assert len(chart) == 10
def test_top50_horrors_must_return_50_results(ia):
movies = ia.get_top50_movies_by_genres('horror')
assert len(movies) == 50
def test_top50_action_thriller_tv_must_return_50_results(ia):
tv = ia.get_top50_tv_by_genres(['action', 'thriller'])
assert len(tv) == 50
| 403 | Python | .py | 9 | 40.666667 | 61 | 0.705128 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,159 | test_http_movie_connections.py | cinemagoer_cinemagoer/tests/test_http_movie_connections.py | def test_movie_followed_by_connections(ia):
movie = ia.get_movie('0133093', info=['connections'])
quotes = movie.get('connections', {}).get('followed by', [])
assert len(quotes) >= 5
def test_movie_spinoff_connections(ia):
movie = ia.get_movie('0133093', info=['connections'])
quotes = movie.get('connections', {}).get('spin-off', [])
assert len(quotes) >= 4
| 385 | Python | .py | 8 | 43.875 | 64 | 0.656 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,160 | test_http_chart_top_tv.py | cinemagoer_cinemagoer/tests/test_http_chart_top_tv.py | def test_top_tv_should_contain_100_entries(ia):
chart = ia.get_top250_tv()
assert len(chart) == 250
def test_top_tv_entries_should_have_rank(ia):
movies = ia.get_top250_tv()
for rank, movie in enumerate(movies):
assert movie['top tv 250 rank'] == rank + 1
def test_top_tv_entries_should_have_movie_id(ia):
movies = ia.get_top250_tv()
for movie in movies:
assert movie.movieID.isdigit()
def test_top_tv_entries_should_have_title(ia):
movies = ia.get_top250_tv()
for movie in movies:
assert 'title' in movie
def test_top_tv_entries_should_be_movies(ia):
movies = ia.get_top250_tv()
for movie in movies:
assert movie['kind'] == 'movie'
def test_top_tv_entries_should_have_year(ia):
movies = ia.get_top250_tv()
for movie in movies:
assert isinstance(movie['year'], int)
def test_top_tv_entries_should_have_high_ratings(ia):
movies = ia.get_top250_tv()
for movie in movies:
assert movie['rating'] > 8.0
def test_top_tv_entries_should_have_minimal_number_of_votes(ia):
movies = ia.get_top250_tv()
for movie in movies:
assert movie['votes'] >= 1500 # limit stated by IMDb
| 1,200 | Python | .py | 31 | 33.387097 | 64 | 0.672727 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,161 | test_http_movie_trivia.py | cinemagoer_cinemagoer/tests/test_http_movie_trivia.py | def test_movie_trivia(ia):
movie = ia.get_movie('0133093', info=['trivia'])
trivia = movie.get('trivia', [])
assert len(trivia) >= 5
| 145 | Python | .py | 4 | 32.25 | 52 | 0.624113 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,162 | test_http_movie_awards.py | cinemagoer_cinemagoer/tests/test_http_movie_awards.py | def test_movie_awards(ia):
movie = ia.get_movie('0133093', info=['awards'])
awards = movie.get('awards', [])
assert len(awards) > 80
| 145 | Python | .py | 4 | 32.25 | 52 | 0.631206 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,163 | test_http_movie_titles.py | cinemagoer_cinemagoer/tests/test_http_movie_titles.py | from pytest import mark
@mark.skip('obviously this depends on your country of origin')
def test_movie_localized_title(ia):
movie = ia.get_movie('2991224', info=['main'])
title = movie.get('localized title', '')
assert title in ('Tangerines - Mandarini', 'Tangerines')
def test_movie_original_title(ia):
movie = ia.get_movie('2991224', info=['main'])
title = movie.get('original title', '')
assert title == 'Mandariinid'
def test_movie_title(ia):
movie = ia.get_movie('2991224', info=['main'])
title = movie.get('title', '')
assert title == 'Tangerines'
| 595 | Python | .py | 14 | 38.5 | 62 | 0.676522 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,164 | test_http_movie_parental_guide.py | cinemagoer_cinemagoer/tests/test_http_movie_parental_guide.py | def test_movie_parental_guide_contains_mpaa_rating(ia):
movie = ia.get_movie('0133093', info=['parents guide']) # Matrix
assert movie.get('mpaa') == "Rated R for sci-fi violence and brief language"
def test_movie_certificates_from_parental_guide(ia):
movie = ia.get_movie('0133093', info=['parents guide']) # Matrix
arCert = {'country_code': 'AR', 'country': 'Argentina', 'certificate': '13', 'note': '', 'full': 'Argentina:13'}
assert arCert in movie.get('certificates', [])
def test_movie_advisories(ia):
movie = ia.get_movie('0133093', info=['parents guide']) # Matrix
assert any(['Mouse gets shot' in x for x in movie.get('advisory spoiler violence')])
def test_movie_advisory_votes(ia):
movie = ia.get_movie('0133093', info=['parents guide']) # Matrix
votes = movie.get('advisory votes')
assert votes['nudity']['votes']['Mild'] > 300
assert votes['nudity']['status'] == 'Mild'
assert votes['profanity']['status'] == 'Moderate'
| 1,002 | Python | .py | 16 | 58.25 | 116 | 0.663265 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,165 | test_http_person_awards.py | cinemagoer_cinemagoer/tests/test_http_person_awards.py | def test_person_awards(ia):
person = ia.get_person('0000206', info=['awards'])
awards = person.get('awards', [])
assert len(awards) > 20
| 149 | Python | .py | 4 | 33.25 | 54 | 0.641379 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,166 | test_http_search_person.py | cinemagoer_cinemagoer/tests/test_http_search_person.py | from pytest import mark
def test_search_person_should_list_default_number_of_people(ia):
people = ia.search_person('julia')
assert len(people) == 20
def test_search_person_limited_should_list_requested_number_of_people(ia):
people = ia.search_person('julia', results=11)
assert len(people) == 11
def test_search_person_if_too_many_should_list_upper_limit_of_people(ia):
people = ia.search_person('john', results=500)
assert len(people) == 25
def test_search_person_if_none_result_should_be_empty(ia):
people = ia.search_person('%e3%82%a2')
assert people == []
def test_search_person_entries_should_include_person_id(ia):
people = ia.search_person('julia roberts')
assert people[0].personID == '0000210'
def test_search_person_entries_should_include_person_name(ia):
people = ia.search_person('julia roberts')
assert people[0]['name'] == 'Julia Roberts'
def test_search_person_entries_should_include_headshot_if_available(ia):
people = ia.search_person('julia roberts')
assert 'headshot' in people[0]
def test_search_person_entries_with_aka_should_exclude_name_in_aka(ia):
people = ia.search_person('julia roberts')
robertson = None
for person in people:
if person['name'] == 'Julia Robertson':
robertson = person
break
assert robertson
assert robertson['name'] == 'Julia Robertson'
@mark.skip(reason="imdbIndex no longer included in results")
def test_search_person_entries_should_include_person_index(ia):
people = ia.search_person('julia roberts')
assert people[0]['imdbIndex'] == 'I'
@mark.skip(reason="no persons without imdbIndex in the first 20 results")
def test_search_person_entries_missing_index_should_be_excluded(ia):
people = ia.search_person('julia roberts')
assert 'imdbIndex' not in people[3]
@mark.skip(reason="AKAs no longer present in results?")
def test_search_person_entries_should_include_akas(ia):
people = ia.search_person('julia roberts')
person_with_aka = [p for p in people if p.personID == '4691618']
assert len(person_with_aka) == 1
assert person_with_aka[0]['akas'] == ['Julia Robertson']
def test_search_person_entries_missing_akas_should_be_excluded(ia):
people = ia.search_person('julia roberts')
assert 'akas' not in people[0]
| 2,333 | Python | .py | 48 | 44.020833 | 74 | 0.720478 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,167 | test_in_operator.py | cinemagoer_cinemagoer/tests/test_in_operator.py | def test_person_in_movie(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
person = ia.get_person('0000206', info=['main']) # Keanu Reeves
assert person in movie
def test_key_in_movie(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert 'cast' in movie
def test_movie_in_person(ia):
movie = ia.get_movie('10838180', info=['main']) # The Matrix Resurrections
person = ia.get_person('0000206', info=['main']) # Keanu Reeves
assert movie in person
def test_key_in_person(ia):
person = ia.get_person('0000206') # Keanu Reeves
assert 'filmography' in person
def test_key_in_company(ia):
company = ia.get_company('0017902', info=['main']) # Pixar
assert 'name' in company
| 755 | Python | .py | 17 | 40.117647 | 79 | 0.668493 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,168 | test_http_movie_combined.py | cinemagoer_cinemagoer/tests/test_http_movie_combined.py | from pytest import mark
import re
from imdb.Movie import Movie
from imdb.Person import Person
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
re_date = re.compile(r'[0-9]{1,2} (%s) [0-9]{4}' % '|'.join(months), re.I)
def test_movie_cover_url_should_be_an_image_link(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert re.match(r'^https?://.*\.jpg$', movie.get('cover url'))
def test_cover_url_if_none_should_be_excluded(ia):
movie = ia.get_movie('3629794', info=['main']) # Aslan
assert 'cover url' not in movie
def test_videos_if_none_should_be_excluded(ia):
movie = ia.get_movie('7967312', info=['main']) # Simple Worker Needed
assert 'videos' not in movie
def test_movie_directors_should_be_a_list_of_persons(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
directors = [p for p in movie.get('directors', [])]
assert len(directors) == 2
for p in directors:
assert isinstance(p, Person)
def test_movie_directors_should_contain_correct_people(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
directorIDs = [p.personID for p in movie.get('directors', [])]
assert directorIDs == ['0905154', '0905152']
def test_movie_directors_should_contain_person_names(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
director_names = [p.get('name') for p in movie.get('directors', [])]
assert director_names == ['Lana Wachowski', 'Lilly Wachowski']
def test_movie_writers_should_be_a_list_of_persons(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
writers = [p for p in movie.get('writers', [])]
assert len(writers) == 2
for p in writers:
assert isinstance(p, Person)
def test_movie_writers_should_contain_correct_people(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
writerIDs = [p.personID for p in movie.get('writer', [])]
assert writerIDs == ['0905152', '0905154']
def test_movie_writers_should_contain_person_names(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
writer_names = [p.get('name') for p in movie.get('writers', [])]
assert writer_names == ['Lilly Wachowski', 'Lana Wachowski']
def test_movie_title_should_not_have_year(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('title') == 'The Matrix'
def test_movie_title_tv_movie_should_not_include_type(ia):
movie = ia.get_movie('0389150', info=['main']) # Matrix (TV)
assert movie.get('title') == 'The Matrix Defence'
def test_movie_title_video_movie_should_not_include_type(ia):
movie = ia.get_movie('0109151', info=['main']) # Matrix (V)
assert movie.get('title') == 'Armitage III: Polymatrix'
def test_movie_title_video_game_should_not_include_type(ia):
movie = ia.get_movie('0390244', info=['main']) # Matrix (VG)
assert movie.get('title') == 'The Matrix Online'
def test_movie_title_tv_series_should_not_have_quotes(ia):
movie = ia.get_movie('0436992', info=['main']) # Doctor Who
assert movie.get('title') == 'Doctor Who'
def test_movie_title_tv_mini_series_should_not_have_quotes(ia):
movie = ia.get_movie('0185906', info=['main']) # Band of Brothers
assert movie.get('title') == 'Band of Brothers'
def test_movie_title_tv_episode_should_not_be_series_title(ia):
movie = ia.get_movie('1000252', info=['main']) # Doctor Who - Blink
assert movie.get('title') == 'Blink'
def test_movie_year_should_be_an_integer(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('year') == 1999
def test_movie_year_followed_by_kind_in_full_title_should_be_ok(ia):
movie = ia.get_movie('0109151', info=['main']) # Matrix (V)
assert movie.get('year') == 1996
def test_movie_year_if_none_should_be_excluded(ia):
movie = ia.get_movie('3629794', info=['main']) # Aslan
assert 'year' not in movie
@mark.skip(reason="imdb index is not included anymore")
def test_movie_imdb_index_should_be_a_roman_number(ia):
movie = ia.get_movie('3698420', info=['main']) # Mother's Day IV
assert movie.get('imdbIndex') == 'IV'
@mark.skip(reason="imdb index is not included anymore")
def test_movie_imdb_index_none_should_be_excluded(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert 'imdbIndex' not in movie
def test_movie_kind_none_should_be_movie(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('kind') == 'movie'
def test_movie_kind_tv_movie_should_be_tv_movie(ia):
movie = ia.get_movie('0389150', info=['main']) # Matrix (TV)
assert movie.get('kind') == 'tv movie'
def test_movie_kind_tv_special_should_be_tv_special(ia):
movie = ia.get_movie('14544192', info=['main']) # Bo Burnham: Inside
assert movie.get('kind') == 'tv special'
def test_movie_kind_video_movie_should_be_video_movie(ia):
movie = ia.get_movie('0109151', info=['main']) # Matrix (V)
assert movie.get('kind') == 'video movie'
def test_movie_kind_video_game_should_be_video_game(ia):
movie = ia.get_movie('0390244', info=['main']) # Matrix (VG)
assert movie.get('kind') == 'video game'
def test_movie_kind_tv_series_should_be_tv_series(ia):
movie = ia.get_movie('0436992', info=['main']) # Doctor Who
assert movie.get('kind') == 'tv series'
def test_movie_kind_tv_mini_series_should_be_tv_mini_series(ia):
movie = ia.get_movie('0185906', info=['main']) # Band of Brothers
assert movie.get('kind') == 'tv mini series'
def test_movie_kind_tv_series_episode_should_be_episode(ia):
movie = ia.get_movie('1000252', info=['main']) # Doctor Who - Blink
assert movie.get('kind') == 'episode'
# def test_movie_kind_short_movie_should_be_short_movie(ia):
# movie = ia.get_movie('2971344', info=['main']) # Matrix (Short)
# assert movie.get('kind') == 'short movie'
# def test_movie_kind_tv_short_movie_should_be_tv_short_movie(ia):
# movie = ia.get_movie('0274085', info=['main']) # Matrix (TV Short)
# assert movie.get('kind') == 'tv short movie'
# def test_movie_kind_tv_special_should_be_tv_special(ia):
# movie = ia.get_movie('1985970', info=['main']) # Roast of Charlie Sheen
# assert movie.get('kind') == 'tv special'
def test_series_years_if_continuing_should_be_open_range(ia):
movie = ia.get_movie('0436992', info=['main']) # Doctor Who
assert movie.get('series years') == '2005-'
def test_series_years_if_ended_should_be_closed_range(ia):
movie = ia.get_movie('0412142', info=['main']) # House M.D.
assert movie.get('series years') == '2004-2012'
def test_series_years_mini_series_ended_in_same_year_should_be_closed_range(ia):
movie = ia.get_movie('0185906', info=['main']) # Band of Brothers
assert movie.get('series years') == '2001-2001'
def test_series_years_if_none_should_be_excluded(ia):
movie = ia.get_movie('1000252', info=['main']) # Doctor Who - Blink
assert 'series years' not in movie
def test_series_number_of_episodes_should_be_an_integer(ia):
movie = ia.get_movie('2121964', info=['main']) # House M.D. - 8/21
assert movie.get('number of episodes') == 176
def test_series_number_of_episodes_if_none_should_be_excluded(ia):
movie = ia.get_movie('0412142', info=['main']) # House M.D.
assert 'number of episodes' not in movie
@mark.skip(reason="total episode number is not included anymore")
def test_episode_number_should_be_an_integer(ia):
movie = ia.get_movie('2121964', info=['main']) # House M.D. - 8/21
assert movie.get('episode number') == 175
@mark.skip(reason="total episode number is not included anymore")
def test_episode_number_if_none_should_be_excluded(ia):
movie = ia.get_movie('0412142', info=['main']) # House M.D.
assert 'episode number' not in movie
def test_episode_previous_episode_should_be_an_imdb_id(ia):
movie = ia.get_movie('2121964', info=['main']) # House M.D. - 8/21
assert movie.get('previous episode') == '2121963'
def test_episode_previous_episode_if_none_should_be_excluded(ia):
movie = ia.get_movie('0606035', info=['main']) # House M.D. - 1/1
assert 'previous episode' not in movie
def test_episode_next_episode_should_be_an_imdb_id(ia):
movie = ia.get_movie('2121964', info=['main']) # House M.D. - 8/21
assert movie.get('next episode') == '2121965'
def test_episode_next_episode_if_none_should_be_excluded(ia):
movie = ia.get_movie('2121965', info=['main']) # House M.D. - 8/22
assert 'next episode' not in movie
def test_episode_of_series_should_have_title_year_and_kind(ia):
movie = ia.get_movie('2121964', info=['main']) # House M.D. - 8/21
series = movie.get('episode of')
assert isinstance(series, Movie)
assert series.movieID == '0412142'
assert series.get('kind') == 'tv series'
# original title and year are not included anymore
# assert series.data == {'title': 'House M.D.', 'year': 2004, 'kind': 'tv series'}
def test_episode_of_mini_series_should_have_title_year_and_kind(ia):
movie = ia.get_movie('1247467', info=['main']) # Band of Brothers - 4
series = movie.get('episode of')
assert isinstance(series, Movie)
assert series.movieID == '0185906'
assert series.get('kind') == 'tv series'
# original title and year are not included anymore
# assert series.data == {'title': 'Band of Brothers', 'year': 2001, 'kind': 'tv series'}
def test_episode_of_series_if_none_should_be_excluded(ia):
movie = ia.get_movie('0412142', info=['main']) # House M.D.
assert 'episode of' not in movie
def test_movie_rating_should_be_between_1_and_10(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert 1.0 <= movie.get('rating') <= 10.0
def test_movie_rating_if_none_should_be_excluded(ia):
movie = ia.get_movie('3629794', info=['main']) # Aslan
assert 'rating' not in movie
def test_movie_votes_should_be_an_integer(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('votes') > 1000000
def test_movie_votes_if_none_should_be_excluded(ia):
movie = ia.get_movie('3629794', info=['main']) # Aslan
assert 'votes' not in movie
def test_movie_top250_rank_should_be_between_1_and_250(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert 1 <= movie.get('top 250 rank') <= 250
def test_movie_top250_rank_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['main']) # Ates Parcasi
assert 'top 250 rank' not in movie
def test_movie_bottom100_rank_should_be_between_1_and_100(ia):
movie = ia.get_movie('0060666', info=['main']) # Manos
assert 1 <= movie.get('bottom 100 rank') <= 100
def test_movie_bottom100_rank_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['main']) # Ates Parcasi
assert 'bottom 100 rank' not in movie
@mark.skip('seasons is an alias for number of seasons')
def test_series_season_titles_should_be_a_list_of_season_titles(ia):
movie = ia.get_movie('0436992', info=['main']) # Doctor Who
assert movie.get('seasons', []) == [str(i) for i in range(1, 12)]
# unknown doesn't show up in the reference page
# assert movie.get('seasons', []) == [str(i) for i in range(1, 12)] + ['unknown']
def test_series_season_titles_if_none_should_be_excluded(ia):
movie = ia.get_movie('1000252', info=['main']) # Doctor Who - Blink
assert 'seasons' not in movie
def test_series_number_of_seasons_should_be_numeric(ia):
movie = ia.get_movie('0412142', info=['main']) # House M.D.
assert movie.get('number of seasons') == 8
def test_series_number_of_seasons_should_exclude_non_numeric_season_titles(ia):
movie = ia.get_movie('0436992', info=['main']) # Doctor Who
assert movie.get('number of seasons') == 14
def test_episode_original_air_date_should_be_a_date(ia):
movie = ia.get_movie('1000252', info=['main']) # Doctor Who - Blink
assert re_date.match(movie.get('original air date'))
def test_episode_original_air_date_if_none_should_be_excluded(ia):
movie = ia.get_movie('0436992', info=['main']) # Doctor Who
assert 'original air date' not in movie
def test_season_and_episode_numbers_should_be_integers(ia):
movie = ia.get_movie('1000252', info=['main']) # Doctor Who - Blink
assert movie.get('season') == 3
assert movie.get('episode') == 10
def test_season_and_episode_numbers_none_should_be_excluded(ia):
movie = ia.get_movie('0436992', info=['main']) # Doctor Who
assert 'season' not in movie
assert 'episode' not in movie
def test_movie_genres_if_single_should_be_a_list_of_genre_names(ia):
movie = ia.get_movie('0389150', info=['main']) # Matrix (TV)
assert movie.get('genres', []) == ['Documentary']
def test_movie_genres_if_multiple_should_be_a_list_of_genre_names(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('genres', []) == ['Action', 'Sci-Fi']
# TODO: find a movie with no genre
def test_movie_plot_outline_should_be_a_longer_text(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert re.match(r'^Thomas A\. Anderson is a man .* human rebellion.$', movie.get('plot outline'))
def test_movie_plot_outline_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['main']) # Ates Parcasi
assert 'plot outline' not in movie
@mark.skip(reason="mpaa rating is not included anymore")
def test_movie_mpaa_should_be_a_rating(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('mpaa') == 'Rated R for sci-fi violence and brief language'
@mark.skip(reason="mpaa rating is not included anymore")
def test_movie_mpaa_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['main']) # Ates Parcasi
assert 'mpaa' not in movie
def test_movie_runtimes_single_should_be_a_list_in_minutes(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('runtimes', []) == ['136']
def test_movie_runtimes_with_countries_should_include_context(ia):
movie = ia.get_movie('0076786', info=['main']) # Suspiria
assert movie.get('runtimes', []) == ['99']
def test_movie_runtimes_if_none_should_be_excluded(ia):
movie = ia.get_movie('0390244', info=['main']) # Matrix (VG)
assert 'runtimes' not in movie
def test_movie_countries_if_single_should_be_a_list_of_country_names(ia):
movie = ia.get_movie('0060666', info=['main']) # Manos
assert movie.get('countries', []) == ['United States']
def test_movie_countries_if_multiple_should_be_a_list_of_country_names(ia):
movie = ia.get_movie('0081505', info=['main']) # Shining
assert movie.get('countries', []) == ['United Kingdom', 'United States']
# TODO: find a movie with no country
def test_movie_country_codes_if_single_should_be_a_list_of_country_codes(ia):
movie = ia.get_movie('0060666', info=['main']) # Manos
assert movie.get('country codes', []) == ['us']
def test_movie_country_codes_if_multiple_should_be_a_list_of_country_codes(ia):
movie = ia.get_movie('0081505', info=['main']) # Shining
assert movie.get('country codes', []) == ['gb', 'us']
# TODO: find a movie with no country
def test_movie_languages_if_single_should_be_a_list_of_language_names(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('languages', []) == ['English']
def test_movie_languages_if_multiple_should_be_a_list_of_language_names(ia):
movie = ia.get_movie('0043338', info=['main']) # Ace in the Hole
assert movie.get('languages', []) == ['English', 'Spanish', 'Latin']
def test_movie_languages_none_as_a_language_name_should_be_valid(ia):
movie = ia.get_movie('2971344', info=['main']) # Matrix (Short)
assert movie.get('languages', []) == ['None']
# TODO: find a movie with no language
def test_movie_language_codes_if_single_should_be_a_list_of_language_names(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('language codes', []) == ['en']
def test_movie_language_codes_if_multiple_should_be_a_list_of_language_names(ia):
movie = ia.get_movie('0043338', info=['main']) # Ace in the Hole
assert movie.get('language codes', []) == ['en', 'es', 'la']
def test_movie_language_codes_zxx_as_a_language_code_should_be_valid(ia):
movie = ia.get_movie('2971344', info=['main']) # Matrix (Short)
assert movie.get('language codes', []) == ['zxx']
# TODO: find a movie with no language
def test_movie_colors_if_single_should_be_a_list_of_color_types(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('color info', []) == ['Color']
def test_movie_colors_if_multiple_should_be_a_list_of_color_types(ia):
# this used to return multiple colors, now it only returns the first
movie = ia.get_movie('0120789', info=['main']) # Pleasantville
assert movie.get('color info', []) == ['Black and White']
def test_movie_cast_can_contain_notes(ia):
movie = ia.get_movie('0060666', info=['main']) # Manos
diane_adelson = movie['cast'][2]
assert str(diane_adelson.currentRole) == 'Margaret'
assert diane_adelson.notes == '(as Diane Mahree)'
def test_movie_colors_if_single_with_notes_should_include_notes(ia):
movie = ia.get_movie('0060666', info=['main']) # Manos
assert movie.get('color info', []) == ['Color::(Eastmancolor)']
def test_movie_colors_if_none_should_be_excluded(ia):
movie = ia.get_movie('0389150', info=['main']) # Matrix (TV)
assert 'color info' not in movie
def test_movie_aspect_ratio_should_be_a_number_to_one(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie.get('aspect ratio') == '2.39 : 1'
def test_movie_aspect_ratio_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['main']) # Ates Parcasi
assert 'aspect ratio' not in movie
def test_movie_sound_mix_if_single_should_be_a_list_of_sound_mix_types(ia):
movie = ia.get_movie('0063850', info=['main']) # If....
assert movie.get('sound mix', []) == ['Mono']
def test_movie_sound_mix_if_multiple_should_be_a_list_of_sound_mix_types(ia):
movie = ia.get_movie('0120789', info=['main']) # Pleasantville
assert movie.get('sound mix', []) == ['DTS', 'Dolby Digital', 'SDDS']
def test_movie_sound_mix_if_single_with_notes_should_include_notes(ia):
movie = ia.get_movie('0043338', info=['main']) # Ace in the Hole
assert movie.get('sound mix', []) == ['Mono::(Western Electric Recording)']
def test_movie_sound_mix_if_multiple_with_notes_should_include_notes(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
expected = set(['DTS::(Digital DTS Sound)', 'Dolby Digital', 'SDDS', 'Dolby Atmos'])
assert expected.issubset(set(movie.get('sound mix', [])))
def test_movie_sound_mix_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['main']) # Ates Parcasi
assert 'sound mix' not in movie
def test_movie_certificates_should_be_a_list_of_certificates(ia):
movie = ia.get_movie('1000252', info=['main']) # Doctor Who - Blink
assert movie.get('certificates', []) == [
'Australia:PG::(most episodes)',
'Brazil:12',
'Netherlands:9::(some episodes)',
'New Zealand:PG',
'Singapore:PG',
'South Africa:PG',
'United Kingdom:PG',
'United Kingdom:PG::(DVD rating)',
'United States:TV-PG'
]
def test_movie_certificates_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['main']) # Ates Parcasi
assert 'certificates' not in movie
def test_movie_cast_must_contain_items(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert len(movie.get('cast', [])) > 20
def test_movie_cast_must_be_in_plain_format(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie['cast'][0].data.get('name') == 'Keanu Reeves'
def test_movie_misc_sections_must_contain_items(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert len(movie.get('casting department', [])) == 3
def test_movie_misc_sections_must_be_in_plain_format(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert movie['casting department'][0].data.get('name') == 'Tim Littleton'
def test_movie_companies_sections_must_contain_items(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert len(movie.get('special effects companies', [])) == 6
def test_movie_box_office_should_be_a_dict(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert isinstance(movie.get('box office'), dict)
assert len(movie.get('box office', {})) == 3
def test_movie_contains_stars(ia):
movie = ia.get_movie('0133093', info=['main']) # Matrix
assert len(movie.get('stars', [])) >= 3
| 21,433 | Python | .py | 366 | 54.23224 | 101 | 0.659888 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,169 | test_http_chart_popular_tv.py | cinemagoer_cinemagoer/tests/test_http_chart_popular_tv.py | def test_popular_tv_should_contain_100_entries(ia):
chart = ia.get_popular100_tv()
assert len(chart) == 100
def test_popular_tv_entries_should_have_rank(ia):
movies = ia.get_popular100_tv()
for rank, movie in enumerate(movies):
assert movie['popular tv 100 rank'] == rank + 1
def test_popular_tv_entries_should_have_movie_id(ia):
movies = ia.get_popular100_tv()
for movie in movies:
assert movie.movieID.isdigit()
def test_popular_tv_entries_should_have_title(ia):
movies = ia.get_popular100_tv()
for movie in movies:
assert 'title' in movie
def test_popular_tv_entries_should_have_year(ia):
movies = ia.get_popular100_tv()
for movie in movies:
assert isinstance(movie['year'], int)
| 763 | Python | .py | 19 | 34.947368 | 55 | 0.695652 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,170 | test_http_movie_keywords.py | cinemagoer_cinemagoer/tests/test_http_movie_keywords.py | def test_movie_keywords_should_be_a_list_of_keywords(ia):
movie = ia.get_movie('0133093', info=['keywords']) # Matrix
keywords = movie.get('keywords', [])
assert 250 <= len(keywords) <= 400
assert {'computer-hacker', 'messiah', 'artificial-reality'}.issubset(set(keywords))
def test_movie_relevant_keywords_should_be_a_list_of_keywords(ia):
movie = ia.get_movie('0133093', info=['keywords']) # Matrix
keywords = movie.get('relevant keywords', [])
assert 250 <= len(keywords) <= 400
assert 'artificial reality' in [x['keyword'] for x in keywords]
def test_movie_keywords_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['keywords']) # Ates Parcasi
assert 'keywords' not in movie
| 746 | Python | .py | 13 | 53 | 87 | 0.689986 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,171 | test_http_search_keyword.py | cinemagoer_cinemagoer/tests/test_http_search_keyword.py | def test_search_keyword_check_list_of_keywords(ia):
keywords = ia.search_keyword('zoolander')
assert 'reference to zoolander' in keywords
def test_search_keyword_if_multiple_should_list_correct_number_of_keywords(ia):
keywords = ia.search_keyword('messiah')
assert len(keywords) == 25
def test_search_keyword_if_too_many_should_list_upper_limit_of_keywords(ia):
keywords = ia.search_keyword('computer')
assert len(keywords) == 25
def test_search_keyword_if_none_result_should_be_empty(ia):
keywords = ia.search_keyword('%e3%82%a2')
assert keywords == []
def test_get_keyword_pagination(ia):
superheroes_without_page_param = ia.get_keyword('superhero')
superheroes_page_one = ia.get_keyword('superhero', page=1)
superheroes_page_two = ia.get_keyword('superhero', page=2)
for i in range(50):
assert superheroes_without_page_param[i]['title'] == superheroes_page_one[i]['title']
assert superheroes_without_page_param[i]['title'] != superheroes_page_two[i]['title']
| 1,034 | Python | .py | 19 | 49.631579 | 93 | 0.722939 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,172 | test_http_movie_plot.py | cinemagoer_cinemagoer/tests/test_http_movie_plot.py | import re
def test_movie_summary_should_be_some_text_with_author(ia):
movie = ia.get_movie('0133093', info=['plot']) # Matrix
plots = movie.get('plot', [])
assert 3 <= len(plots) <= 10
sel_plot = ''
for plot in plots:
if plot.endswith('redcommander27'):
sel_plot = plot
break
assert re.match(r'^Thomas A. Anderson is a man.*As a rebel.*redcommander27$', sel_plot)
def test_movie_summary_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['plot']) # Ates Parcasi
assert 'plot' not in movie
def test_movie_synopsis_should_be_some_text(ia):
movie = ia.get_movie('0133093', info=['plot']) # Matrix
synopsis = movie.get('synopsis')
assert len(synopsis) == 1
assert re.match(r'^The screen fills with .* three Matrix movies\.$', synopsis[0], re.M | re.I | re.DOTALL)
def test_movie_synopsis_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['plot']) # Ates Parcasi
assert 'synopsis' not in movie
| 1,028 | Python | .py | 22 | 41.363636 | 110 | 0.656313 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,173 | test_http_showtimes.py | cinemagoer_cinemagoer/tests/test_http_showtimes.py | def test_get_showtimes_contains_data(ia):
data = ia.get_showtimes()
assert len(data) > 0
for cinema_info in data:
assert 'cinema' in cinema_info
assert 'movies' in cinema_info
assert len(cinema_info['movies']) > 0
for movie in cinema_info['movies']:
assert 'movie' in movie
assert 'showtimes' in movie
| 370 | Python | .py | 10 | 29.2 | 45 | 0.622222 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,174 | test_http_person_main.py | cinemagoer_cinemagoer/tests/test_http_person_main.py | import re
def test_person_headshot_should_be_an_image_link(ia):
person = ia.get_person('0000206', info=['main']) # Keanu Reeves
assert re.match(r'^https?://.*\.jpg$', person['headshot'])
def test_person_producer_is_in_filmography(ia):
person = ia.get_person('0000206', info=['filmography']) # Keanu Reeves
assert 'producer' in person.get('filmography', {})
def test_person_filmography_includes_role(ia):
person = ia.get_person('0000206', info=['filmography']) # Keanu Reeves
movies = person.get('filmography', {}).get('actor', {})
assert 'John Wick' in [str(movie.currentRole) for movie in movies]
def test_person_with_id_redirect(ia):
person = ia.get_person('1890852', info=['main']) # Aleksandr Karpov
assert '0440022' == person.get('imdbID')
def test_person_name_in_data_should_be_plain(ia):
person = ia.get_person('0000206', info=['main']) # Keanu Reeves
assert person.data.get('name') == 'Keanu Reeves'
def test_person_canonical_name(ia):
person = ia.get_person('0000206', info=['main']) # Keanu Reeves
assert person.get('canonical name') == 'Reeves, Keanu'
def test_person_headshot_if_none_should_be_excluded(ia):
person = ia.get_person('0330139', info=['main']) # Deni Gordon
assert 'headshot' not in person
def test_person_name_should_not_be_canonicalized(ia):
person = ia.get_person('0000206', info=['main']) # Keanu Reeves
assert person.get('name') == 'Keanu Reeves'
def test_person_name_should_not_have_birth_and_death_years(ia):
person = ia.get_person('0000001', info=['main']) # Fred Astaire
assert person.get('name') == 'Fred Astaire'
def test_person_imdb_index_should_be_a_roman_number(ia):
person = ia.get_person('0000210', info=['main']) # Julia Roberts
assert person.get('imdbIndex') == 'I'
def test_person_should_have_filmography(ia):
person = ia.get_person('0000210', info=['filmography']) # Julia Roberts
filmoset = set(['actress', 'producer', 'soundtrack'])
assert filmoset.issubset(set(person.get('filmography', {}).keys()))
def test_person_filmography_should_contain_movies(ia):
person = ia.get_person('0000210', info=['filmography']) # Julia Roberts
assert len(person.get('filmography', {}).get('actress')) >= 20
def test_person_filmography_actor_and_actress_should_be_the_same(ia):
person = ia.get_person('0000210', info=['filmography']) # Julia Roberts
assert person.get('actress') == person.get('actor')
def test_person_filmography_should_contain_many_roles(ia):
person = ia.get_person('0000110', info=['filmography']) # Kenneth Branagh
filmography = person.get('filmography', {})
assert len(filmography) > 9
assert len(filmography.get('actor')) >= 70
assert len(filmography.get('writer')) >= 9
assert len(filmography.get('self')) >= 150
def test_person_imdb_index_if_none_should_be_excluded(ia):
person = ia.get_person('0000206', info=['main']) # Keanu Reeves
assert 'imdbIndex' not in person
| 3,041 | Python | .py | 52 | 54.134615 | 80 | 0.685705 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,175 | test_http_search_company.py | cinemagoer_cinemagoer/tests/test_http_search_company.py | from pytest import mark
def test_search_company_should_list_default_number_of_companies(ia):
companies = ia.search_company('pixar')
assert len(companies) == 13
@mark.skip(reason="number of results limit is not honored anymore")
def test_search_company_limited_should_list_requested_number_of_companies(ia):
companies = ia.search_company('pixar', results=7)
assert len(companies) == 7
def test_search_company_unlimited_should_list_correct_number_of_companies(ia):
companies = ia.search_company('pixar', results=500)
assert len(companies) == 13
def test_search_company_too_many_should_list_upper_limit_of_companies(ia):
companies = ia.search_company('pictures', results=500)
assert len(companies) == 25
def test_search_company_if_none_result_should_be_empty(ia):
companies = ia.search_company('%e3%82%a2')
assert companies == []
def test_search_company_entries_should_include_company_id(ia):
companies = ia.search_company('pixar')
assert companies[0].companyID in ('0348691', '0017902')
def test_search_company_entries_should_include_company_name(ia):
companies = ia.search_company('pixar')
assert companies[0]['name'] == 'Pixar Animation Studios'
def test_search_company_entries_should_include_company_country(ia):
companies = ia.search_company('pixar')
assert companies[0]['country'] == '[United States]' # shouldn't this be just 'ca'?
@mark.skip(reason="tested company always has a country: find another one")
def test_search_company_entries_missing_country_should_be_excluded(ia):
companies = ia.search_company('pixar', results=500)
company_without_country = [c for c in companies if c.companyID == '0115838']
assert len(company_without_country) == 1
assert 'country' not in company_without_country[0]
| 1,805 | Python | .py | 32 | 52.34375 | 89 | 0.74302 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,176 | test_http_chart_top_indian.py | cinemagoer_cinemagoer/tests/test_http_chart_top_indian.py | def test_top_indian_chart_should_contain_250_entries(ia):
chart = ia.get_top250_indian_movies()
assert len(chart) == 250
def test_top_indian_chart_entries_should_have_rank(ia):
movies = ia.get_top250_indian_movies()
for rank, movie in enumerate(movies):
assert movie['top indian 250 rank'] == rank + 1
def test_top_indian_chart_entries_should_have_movie_id(ia):
movies = ia.get_top250_indian_movies()
for movie in movies:
assert movie.movieID.isdigit()
def test_top_indian_chart_entries_should_have_title(ia):
movies = ia.get_top250_indian_movies()
for movie in movies:
assert 'title' in movie
def test_top_indian_chart_entries_should_be_movies(ia):
movies = ia.get_top250_indian_movies()
for movie in movies:
assert movie['kind'] == 'movie'
def test_top_indian_chart_entries_should_have_year(ia):
movies = ia.get_top250_indian_movies()
for movie in movies:
assert isinstance(movie['year'], int)
def test_top_indian_chart_entries_should_have_high_ratings(ia):
movies = ia.get_top250_indian_movies()
for movie in movies:
assert movie['rating'] > 7
| 1,163 | Python | .py | 27 | 37.777778 | 63 | 0.702847 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,177 | test_http_movie_releaseinfo.py | cinemagoer_cinemagoer/tests/test_http_movie_releaseinfo.py | def test_movie_release_info_raw_akas_must_be_a_list(ia):
movie = ia.get_movie('0133093', info=['release info']) # Matrix
akas = movie.get('raw akas', [])
assert len(akas) >= 40
assert len(akas) == len(movie.get('akas from release info'))
def test_movie_release_info_raw_release_dates_must_be_a_list(ia):
movie = ia.get_movie('0133093', info=['release info']) # Matrix
akas = movie.get('raw release dates', [])
assert len(akas) >= 56
assert len(akas) == len(movie.get('release dates'))
| 525 | Python | .py | 10 | 48.1 | 70 | 0.653021 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,178 | test_locale.py | cinemagoer_cinemagoer/tests/test_locale.py | from pytest import fixture
import os
import sys
import imdb.locale
if sys.version_info.major >= 3:
from importlib import reload
@fixture
def italian():
"""Set the language temporarily to Italian."""
lang = os.environ["LANG"]
os.environ["LANG"] = "it_IT.UTF-8"
reload(imdb.locale)
yield imdb.locale._
os.environ["LANG"] = lang
reload(imdb.locale)
def test_locale_should_work(italian):
assert italian("art-director") == "Direttore artistico"
| 483 | Python | .py | 17 | 24.882353 | 59 | 0.714597 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,179 | test_xml.py | cinemagoer_cinemagoer/tests/test_xml.py | import xml.etree.ElementTree as ET
def test_movie_xml(ia):
movie = ia.get_movie('0133093') # Matrix
movie_xml = movie.asXML()
movie_xml = movie_xml.encode('utf8', 'ignore')
assert ET.fromstring(movie_xml) is not None
| 238 | Python | .py | 6 | 35.666667 | 50 | 0.691304 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,180 | test_http_movie_reviews.py | cinemagoer_cinemagoer/tests/test_http_movie_reviews.py | def test_movie_reviews_should_be_a_list(ia):
movie = ia.get_movie('0104155', info=['reviews']) # Dust Devil
reviews = movie.get('reviews', [])
assert len(reviews) == 25
def test_movie_reviews_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['reviews']) # Ates Parcasi
assert 'reviews' not in movie
def test_movie_critic_reviews_metascore(ia):
movie = ia.get_movie('0133093', info=['critic reviews']) # The Matrix
assert 65 < movie.get('metascore') < 80
assert 'metacritic.com' in movie.get('metacritic url', '')
| 574 | Python | .py | 11 | 48 | 75 | 0.676786 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,181 | test_http_movie_votes.py | cinemagoer_cinemagoer/tests/test_http_movie_votes.py | from pytest import mark
def test_movie_votes_should_be_divided_into_10_slots(ia):
movie = ia.get_movie('0133093', info=['vote details']) # Matrix
votes = movie.get('number of votes', [])
assert len(votes) == 10
def test_movie_votes_should_be_integers(ia):
movie = ia.get_movie('0133093', info=['vote details']) # Matrix
votes = movie.get('number of votes', [])
for vote in votes:
assert isinstance(vote, int)
def test_movie_votes_median_should_be_an_integer(ia):
movie = ia.get_movie('0133093', info=['vote details']) # Matrix
median = movie.get('median')
assert median == 9
def test_movie_votes_mean_should_be_numeric(ia):
movie = ia.get_movie('0133093', info=['vote details']) # Matrix
mean = movie.get('arithmetic mean')
assert 8.5 <= mean <= 9
def test_movie_demographics_should_be_divided_into_multiple_categories(ia):
movie = ia.get_movie('0133093', info=['vote details']) # Matrix
demographics = movie['demographics']
assert len(demographics) >= 18
@mark.skip(reason="top 1000 voters parser doesn't seem to work")
def test_movie_demographics_votes_should_be_integers(ia):
movie = ia.get_movie('0133093', info=['vote details']) # Matrix
top1000 = movie['demographics']['top 1000 voters']
assert 890 <= top1000['votes'] <= 1000
@mark.skip(reason="top 1000 voters parser doesn't seem to work")
def test_movie_demographics_rating_should_be_numeric(ia):
movie = ia.get_movie('0133093', info=['vote details']) # Matrix
top1000 = movie['demographics']['top 1000 voters']
assert 8 <= top1000['rating'] <= 8.5
| 1,621 | Python | .py | 32 | 46.34375 | 75 | 0.689524 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,182 | test_http_movie_list.py | cinemagoer_cinemagoer/tests/test_http_movie_list.py | listId = "ls058726648"
def test_list_movies_entries_should_have_rank(ia):
movies = ia.get_movie_list(list_=listId)
for rank, movie in enumerate(movies):
assert movie['rank'] == rank + 1
def test_list_movies_entries_should_have_movie_id(ia):
movies = ia.get_movie_list(list_=listId)
for movie in movies:
assert movie.movieID.isdigit()
def test_list_movies_entries_should_have_title(ia):
movies = ia.get_movie_list(list_=listId)
for movie in movies:
assert 'title' in movie
def test_list_entries_should_be_movies(ia):
movies = ia.get_movie_list(list_=listId)
for movie in movies:
assert movie['kind'] == 'movie'
| 682 | Python | .py | 17 | 34.882353 | 54 | 0.692542 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,183 | test_http_movie_series.py | cinemagoer_cinemagoer/tests/test_http_movie_series.py | from pytest import mark
@mark.skip('creator parser is still broken see #275')
def test_series_has_creator(ia):
movie = ia.get_movie('0412142')
assert '0794914' in [p.personID for p in movie.get('creator')]
def test_series_full_cast_has_ids(ia):
movie = ia.get_movie('0412142', info=['full cast']) # House M.D.
# all persons must have a personID
assert [p for p in movie.get('cast', []) if p.personID is None] == []
| 444 | Python | .py | 9 | 45.666667 | 73 | 0.675174 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,184 | test_http_company_main.py | cinemagoer_cinemagoer/tests/test_http_company_main.py | def test_company_name_should_not_include_country(ia):
data = ia.get_company('0017902', info=['main'])
assert data.get('name') == 'Pixar Animation Studios'
| 163 | Python | .py | 3 | 50.666667 | 56 | 0.7 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,185 | test_http_person_otherworks.py | cinemagoer_cinemagoer/tests/test_http_person_otherworks.py | def test_person_other_works_should_contain_correct_number_of_works(ia):
person = ia.get_person('0000206', info=['other works']) # Keanu Reeves
other_works = person.get('other works', [])
assert len(other_works) == 42
def test_person_other_works_should_contain_correct_work(ia):
person = ia.get_person('0000206', info=['other works']) # Keanu Reeves
other_works = person.get('other works', [])
assert other_works[0].startswith('(1995) Stage: Appeared')
def test_person_other_works_if_none_should_be_excluded(ia):
person = ia.get_person('0330139', info=['other works']) # Deni Gordon
assert 'other works' not in person
| 665 | Python | .py | 11 | 56.181818 | 78 | 0.692308 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,186 | test_http_movie_recommendations.py | cinemagoer_cinemagoer/tests/test_http_movie_recommendations.py | from pytest import mark
@mark.skip("FIXME: this section changed name to 'More Like This' and divs/classes have changed too")
def test_movie_contains_recommendations(ia):
movie = ia.get_movie('0133093', info=['recommendations']) # Matrix
assert len(movie.get('recommendations', [])) == 12
| 303 | Python | .py | 5 | 57.6 | 100 | 0.726351 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,187 | _exceptions.py | cinemagoer_cinemagoer/imdb/_exceptions.py | # Copyright 2004-2017 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the exception hierarchy used by the imdb package.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from imdb._logging import imdbpyLogger as logger
class IMDbError(Exception):
"""Base class for every exception raised by the imdb package."""
def __init__(self, *args, **kwargs):
"""Initialize the exception and pass the message to the log system."""
# Every raised exception also dispatch a critical log.
logger.critical(
'%s exception raised; args: %s; kwds: %s',
self.__class__.__name__, args, kwargs, exc_info=True
)
Exception.__init__(self, *args, **kwargs)
class IMDbDataAccessError(IMDbError):
"""Exception raised when is not possible to access needed data."""
pass
class IMDbParserError(IMDbError):
"""Exception raised when an error occurred parsing the data."""
pass
| 1,695 | Python | .py | 36 | 43.361111 | 82 | 0.731959 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,188 | linguistics.py | cinemagoer_cinemagoer/imdb/linguistics.py | # -*- coding: utf-8 -*-
# Copyright 2009-2017 Davide Alberani <[email protected]>
# 2012 Alberto Malagoli <albemala AT gmail.com>
# 2009 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides functions and data to handle languages and articles
(in various languages) at the beginning of movie titles in a smart way.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in unicode:
GENERIC_ARTICLES = (
'the', 'la', 'a', 'die', 'der', 'le', 'el', "l'", 'il', 'das', 'les', 'i', 'o', 'ein',
'un', 'de', 'los', 'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os', 'ang',
'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-', 'mga', "un'", 'uno', 'ett', 'dem', 'egy',
'els', 'eines', 'Ï', 'Ç', 'Ôï', 'Ïé'
)
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at [email protected]
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'", 'uno'),
'Spanish': (
'la', 'lo', 'el', 'las', 'un', 'los', 'una', 'al', 'del', 'unos', 'unas', 'uno'
),
'French': ('le', "l'", 'la', 'les', 'un', 'une', 'des', 'au', 'du', 'à la', 'de la', 'aux'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': () # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# [email protected] .
LANG_COUNTRIES = {
'English': (
'Canada', 'Swaziland', 'Ghana', 'St. Lucia', 'Liberia', 'Jamaica', 'Bahamas',
'New Zealand', 'Lesotho', 'Kenya', 'Solomon Islands', 'United States', 'South Africa',
'St. Vincent and the Grenadines', 'Fiji', 'UK', 'Nigeria', 'Australia', 'USA',
'St. Kitts and Nevis', 'Belize', 'Sierra Leone', 'Gambia', 'Namibia', 'Micronesia',
'Kiribati', 'Grenada', 'Antigua and Barbuda', 'Barbados', 'Malta', 'Zimbabwe',
'Ireland', 'Uganda', 'Trinidad and Tobago', 'South Sudan', 'Guyana', 'Botswana',
'United Kingdom', 'Zambia'
),
'Italian': ('Italy', 'San Marino', 'Vatican City'),
'Spanish': (
'Spain', 'Mexico', 'Argentina', 'Bolivia', 'Guatemala', 'Uruguay', 'Peru', 'Cuba',
'Dominican Republic', 'Panama', 'Costa Rica', 'Ecuador', 'El Salvador', 'Chile',
'Equatorial Guinea', 'Spain', 'Colombia', 'Nicaragua', 'Venezuela', 'Honduras',
'Paraguay'
),
'French': (
'Cameroon', 'Burkina Faso', 'Dominica', 'Gabon', 'Monaco', 'France', "Cote d'Ivoire",
'Benin', 'Togo', 'Central African Republic', 'Mali', 'Niger', 'Congo, Republic of',
'Guinea', 'Congo, Democratic Republic of the', 'Luxembourg', 'Haiti', 'Chad',
'Burundi', 'Madagascar', 'Comoros', 'Senegal'
),
'Portuguese': (
'Portugal', 'Brazil', 'Sao Tome and Principe', 'Cape Verde', 'Angola', 'Mozambique',
'Guinea-Bissau'
),
'German': (
'Liechtenstein', 'Austria', 'West Germany', 'Switzerland', 'East Germany', 'Germany'
),
'Arabic': (
'Saudi Arabia', 'Kuwait', 'Jordan', 'Oman', 'Yemen', 'United Arab Emirates',
'Mauritania', 'Lebanon', 'Bahrain', 'Libya', 'Palestinian State (proposed)', 'Qatar',
'Algeria', 'Morocco', 'Iraq', 'Egypt', 'Djibouti', 'Sudan', 'Syria', 'Tunisia'
),
'Turkish': ('Turkey', 'Azerbaijan'),
'Swahili': ('Tanzania',),
'Swedish': ('Sweden',),
'Icelandic': ('Iceland',),
'Estonian': ('Estonia',),
'Romanian': ('Romania',),
'Samoan': ('Samoa',),
'Slovenian': ('Slovenia',),
'Tok Pisin': ('Papua New Guinea',),
'Palauan': ('Palau',),
'Macedonian': ('Macedonia',),
'Hindi': ('India',),
'Dutch': ('Netherlands', 'Belgium', 'Suriname'),
'Marshallese': ('Marshall Islands',),
'Korean': ('Korea, North', 'Korea, South', 'North Korea', 'South Korea'),
'Vietnamese': ('Vietnam',),
'Danish': ('Denmark',),
'Khmer': ('Cambodia',),
'Lao': ('Laos',),
'Somali': ('Somalia',),
'Filipino': ('Philippines',),
'Hungarian': ('Hungary',),
'Ukrainian': ('Ukraine',),
'Bosnian': ('Bosnia and Herzegovina',),
'Georgian': ('Georgia',),
'Lithuanian': ('Lithuania',),
'Malay': ('Brunei',),
'Tetum': ('East Timor',),
'Norwegian': ('Norway',),
'Armenian': ('Armenia',),
'Russian': ('Russia',),
'Slovak': ('Slovakia',),
'Thai': ('Thailand',),
'Croatian': ('Croatia',),
'Turkmen': ('Turkmenistan',),
'Nepali': ('Nepal',),
'Finnish': ('Finland',),
'Uzbek': ('Uzbekistan',),
'Albanian': ('Albania', 'Kosovo'),
'Hebrew': ('Israel',),
'Bulgarian': ('Bulgaria',),
'Greek': ('Cyprus', 'Greece'),
'Burmese': ('Myanmar',),
'Latvian': ('Latvia',),
'Serbian': ('Serbia',),
'Afar': ('Eritrea',),
'Catalan': ('Andorra',),
'Chinese': ('China', 'Taiwan'),
'Czech': ('Czech Republic', 'Czechoslovakia'),
'Bislama': ('Vanuatu',),
'Japanese': ('Japan',),
'Kinyarwanda': ('Rwanda',),
'Amharic': ('Ethiopia',),
'Persian': ('Afghanistan', 'Iran'),
'Tajik': ('Tajikistan',),
'Mongolian': ('Mongolia',),
'Dzongkha': ('Bhutan',),
'Urdu': ('Pakistan',),
'Polish': ('Poland',),
'Sinhala': ('Sri Lanka',),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in LANG_COUNTRIES:
for country in LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUTF8(articles):
"""Convert a list of unicode articles to utf-8 encoded strings."""
return tuple([art.encode('utf8') for art in articles])
def toDicts(articles):
"""Given a list of unicode encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
utf8Articles = toUTF8(articles)
return dict([(x, x) for x in utf8Articles]), dict([(x, x) for x in articles])
def addTrailingSpace(articles):
"""From the given list of unicode articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article.encode('utf8'))
_spUnicodeArticles.append(article)
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles
| 9,486 | Python | .py | 211 | 40.620853 | 96 | 0.628096 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,189 | _logging.py | cinemagoer_cinemagoer/imdb/_logging.py | # Copyright 2009-2017 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the logging facilities used by the imdb package.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
imdbpyLogger = logging.getLogger('imdbpy')
imdbpyStreamHandler = logging.StreamHandler()
imdbpyFormatter = logging.Formatter(
'%(asctime)s %(levelname)s [%(name)s] %(pathname)s:%(lineno)d: %(message)s'
)
imdbpyStreamHandler.setFormatter(imdbpyFormatter)
imdbpyLogger.addHandler(imdbpyStreamHandler)
def setLevel(level):
"""Set logging level for the main logger."""
level = level.lower().strip()
imdbpyLogger.setLevel(LEVELS.get(level, logging.NOTSET))
imdbpyLogger.log(logging.INFO, 'set logging threshold to "%s"',
logging.getLevelName(imdbpyLogger.level))
| 1,732 | Python | .py | 41 | 39.463415 | 82 | 0.756387 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,190 | cli.py | cinemagoer_cinemagoer/imdb/cli.py | # Copyright 2017 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the command line interface for Cinemagoer.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from argparse import ArgumentParser
from imdb import VERSION, IMDb
DEFAULT_RESULT_SIZE = 20
def list_results(items, type_, n=None):
field = 'title' if type_ == 'movie' else 'name'
print(' # IMDb id %s' % field)
print('=== ======= %s' % ('=' * len(field),))
for i, item in enumerate(items[:n]):
print('%(index)3d %(imdb_id)7s %(title)s' % {
'index': i + 1,
'imdb_id': getattr(item, type_ + 'ID'),
'title': item['long imdb ' + field]
})
def search_item(args):
connection = IMDb()
n = args.n if args.n is not None else DEFAULT_RESULT_SIZE
if args.type == 'keyword':
items = connection.search_keyword(args.key)
if args.first:
items = connection.get_keyword(items[0])
list_results(items, type_='movie', n=n)
else:
print(' # keyword')
print('=== =======')
for i, keyword in enumerate(items[:n]):
print('%(index)3d %(kw)s' % {'index': i + 1, 'kw': keyword})
else:
if args.type == 'movie':
items = connection.search_movie(args.key)
elif args.type == 'person':
items = connection.search_person(args.key)
elif args.type == 'character':
items = connection.search_character(args.key)
elif args.type == 'company':
items = connection.search_company(args.key)
if args.first:
connection.update(items[0])
print(items[0].summary())
else:
list_results(items, type_=args.type, n=args.n)
def get_item(args):
connection = IMDb()
if args.type == 'keyword':
n = args.n if args.n is not None else DEFAULT_RESULT_SIZE
items = connection.get_keyword(args.key, results=n)
list_results(items, type_='movie')
else:
if args.type == 'movie':
item = connection.get_movie(args.key)
elif args.type == 'person':
item = connection.get_person(args.key)
elif args.type == 'character':
item = connection.get_character(args.key)
elif args.type == 'company':
item = connection.get_company(args.key)
print(item.summary())
def list_ranking(items, n=None):
print(' # rating votes IMDb id title')
print('=== ====== ======= ======= =====')
n = n if n is not None else DEFAULT_RESULT_SIZE
for i, movie in enumerate(items[:n]):
print('%(index)3d %(rating)s %(votes)7s %(imdb_id)7s %(title)s' % {
'index': i + 1,
'rating': movie.get('rating'),
'votes': movie.get('votes'),
'imdb_id': movie.movieID,
'title': movie.get('long imdb title')
})
def get_top_movies(args):
connection = IMDb()
items = connection.get_top250_movies()
if args.first:
connection.update(items[0])
print(items[0].summary())
else:
list_ranking(items, n=args.n)
def get_bottom_movies(args):
connection = IMDb()
items = connection.get_bottom100_movies()
if args.first:
connection.update(items[0])
print(items[0].summary())
else:
list_ranking(items, n=args.n)
def make_parser(prog):
parser = ArgumentParser(prog)
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
command_parsers = parser.add_subparsers(metavar='command', dest='command')
command_parsers.required = True
command_search_parser = command_parsers.add_parser('search', help='search for items')
command_search_parser.add_argument('type', help='type of item to search for',
choices=['movie', 'person', 'character', 'company', 'keyword'])
command_search_parser.add_argument('key', help='title or name of item to search for')
command_search_parser.add_argument('-n', type=int, help='number of items to list')
command_search_parser.add_argument('--first', action='store_true', help='display only the first result')
command_search_parser.set_defaults(func=search_item)
command_get_parser = command_parsers.add_parser('get', help='retrieve information about an item')
command_get_parser.add_argument('type', help='type of item to retrieve',
choices=['movie', 'person', 'character', 'company', 'keyword'])
command_get_parser.add_argument('key', help='IMDb id (or keyword name) of item to retrieve')
command_get_parser.add_argument('-n', type=int, help='number of movies to list (only for keywords)')
command_get_parser.set_defaults(func=get_item)
command_top_parser = command_parsers.add_parser('top', help='get top ranked movies')
command_top_parser.add_argument('-n', type=int, help='number of movies to list')
command_top_parser.add_argument('--first', action='store_true', help='display only the first result')
command_top_parser.set_defaults(func=get_top_movies)
command_bottom_parser = command_parsers.add_parser('bottom', help='get bottom ranked movies')
command_bottom_parser.add_argument('-n', type=int, help='number of movies to list')
command_bottom_parser.add_argument('--first', action='store_true', help='display only the first result')
command_bottom_parser.set_defaults(func=get_bottom_movies)
return parser
def main(argv=None):
argv = argv if argv is not None else sys.argv
parser = make_parser(prog='imdbpy')
arguments = parser.parse_args(argv[1:])
arguments.func(arguments)
if __name__ == '__main__':
main()
| 6,487 | Python | .py | 138 | 39.971014 | 108 | 0.642032 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,191 | utils.py | cinemagoer_cinemagoer/imdb/utils.py | # Copyright 2004-2022 Davide Alberani <[email protected]>
# 2009 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides basic utilities for the imdb package.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import string
import sys
from copy import copy, deepcopy
from functools import total_ordering
from time import strftime, strptime
from imdb import linguistics
from imdb._exceptions import IMDbParserError
from imdb._logging import imdbpyLogger
PY2 = sys.hexversion < 0x3000000
# Logger for imdb.utils module.
_utils_logger = imdbpyLogger.getChild('utils')
# The regular expression for the "long" year format of IMDb, like
# "(1998)" and "(1986/II)", where the optional roman number (that I call
# "imdbIndex" after the slash is used for movies with the same title
# and year of release.
# XXX: probably L, C, D and M are far too much! ;-)
re_year_index = re.compile(r'\(([0-9\?]{4}([–-]([0-9\?]{4})?\s?)?(/[IVXLCDM]+)?)\)')
re_m_episode = re.compile(r'\(TV Episode\)\s+-\s+', re.I)
re_m_series = re.compile(r'Season\s+(\d+)\s+\|\s+Episode\s+(\d+)\s+-', re.I)
re_m_imdbIndex = re.compile(r'\(([IVXLCDM]+)\)')
re_m_kind = re.compile(
r'\((TV episode|TV Series|TV mini[ -]series|mini|TV|Video|Video Game|VG|Short|TV Movie|TV Short|TV Special|V)\)',
re.I
)
KIND_MAP = {
'tv': 'tv movie',
'tv episode': 'episode',
'v': 'video movie',
'video': 'video movie',
'vg': 'video game',
'mini': 'tv mini series',
'tv mini-series': 'tv mini series',
'tv miniseries': 'tv mini series',
'tv special': 'tv special'
}
# Match only the imdbIndex (for name strings).
re_index = re.compile(r'^\(([IVXLCDM]+)\)$')
# Match things inside parentheses.
re_parentheses = re.compile(r'(\(.*\))')
# Match the number of episodes.
re_episodes = re.compile(r'\s?\((\d+) episodes\)', re.I)
re_episode_info = re.compile(
r'{\s*(.+?)?\s?(\([0-9\?]{4}-[0-9\?]{1,2}-[0-9\?]{1,2}\))?\s?(\(#[0-9]+\.[0-9]+\))?}'
)
# Common suffixes in surnames.
_sname_suffixes = ('de', 'la', 'der', 'den', 'del', 'y', 'da', 'van',
'e', 'von', 'the', 'di', 'du', 'el', 'al')
def canonicalName(name):
"""Return the given name in canonical "Surname, Name" format.
It assumes that name is in the 'Name Surname' format."""
# XXX: some statistics (as of 17 Apr 2008, over 2288622 names):
# - just a surname: 69476
# - single surname, single name: 2209656
# - composed surname, composed name: 9490
# - composed surname, single name: 67606
# (2: 59764, 3: 6862, 4: 728)
# - single surname, composed name: 242310
# (2: 229467, 3: 9901, 4: 2041, 5: 630)
# - Jr.: 8025
# Don't convert names already in the canonical format.
if name.find(', ') != -1:
return name
joiner = '%s, %s'
sur_joiner = '%s %s'
sur_space = ' %s'
space = ' '
sname = name.split(' ')
snl = len(sname)
if snl == 2:
# Just a name and a surname: how boring...
name = joiner % (sname[1], sname[0])
elif snl > 2:
lsname = [x.lower() for x in sname]
if snl == 3:
_indexes = (0, snl - 2)
else:
_indexes = (0, snl - 2, snl - 3)
# Check for common surname prefixes at the beginning and near the end.
for index in _indexes:
if lsname[index] not in _sname_suffixes:
continue
try:
# Build the surname.
surn = sur_joiner % (sname[index], sname[index + 1])
del sname[index]
del sname[index]
try:
# Handle the "Jr." after the name.
if lsname[index + 2].startswith('jr'):
surn += sur_space % sname[index]
del sname[index]
except (IndexError, ValueError):
pass
name = joiner % (surn, space.join(sname))
break
except ValueError:
continue
else:
name = joiner % (sname[-1], space.join(sname[:-1]))
return name
def normalizeName(name):
"""Return a name in the normal "Name Surname" format."""
joiner = '%s %s'
sname = name.split(', ')
if len(sname) == 2:
name = joiner % (sname[1], sname[0])
return name
def analyze_name(name, canonical=None):
"""Return a dictionary with the name and the optional imdbIndex
keys, from the given string.
If canonical is None (default), the name is stored in its own style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
raise an IMDbParserError exception if the name is not valid.
"""
original_n = name
name = name.split(' aka ')[0].strip()
res = {}
imdbIndex = ''
opi = name.rfind('(')
cpi = name.rfind(')')
# Strip notes (but not if the name starts with a parenthesis).
if opi not in (-1, 0) and cpi > opi:
if re_index.match(name[opi:cpi + 1]):
imdbIndex = name[opi + 1:cpi]
name = name[:opi].rstrip()
else:
# XXX: for the birth and death dates case like " (1926-2004)"
name = re_parentheses.sub('', name).strip()
if not name:
raise IMDbParserError('invalid name: "%s"' % original_n)
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
res['name'] = name
if imdbIndex:
res['imdbIndex'] = imdbIndex
return res
def build_name(name_dict, canonical=None):
"""Given a dictionary that represents a "long" IMDb name,
return a string.
If canonical is None (default), the name is returned in the stored style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
"""
name = name_dict.get('canonical name') or name_dict.get('name', '')
if not name:
return ''
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
imdbIndex = name_dict.get('imdbIndex')
if imdbIndex:
name += ' (%s)' % imdbIndex
return name
# XXX: here only for backward compatibility. Find and remove any dependency.
_unicodeArticles = linguistics.GENERIC_ARTICLES
_articles = linguistics.toUTF8(_unicodeArticles)
articlesDicts = linguistics.articlesDictsForLang(None)
spArticles = linguistics.spArticlesForLang(None)
def canonicalTitle(title, lang=None, imdbIndex=None):
"""Return the title in the canonic format 'Movie Title, The';
beware that it doesn't handle long imdb titles.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, str)
articlesDicts = linguistics.articlesDictsForLang(lang)
try:
if title.split(', ')[-1].lower() in articlesDicts[isUnicode]:
return title
except IndexError:
pass
_format = '%s%s, %s'
ltitle = title.lower()
if imdbIndex:
imdbIndex = ' (%s)' % imdbIndex
else:
imdbIndex = ''
spArticles = linguistics.spArticlesForLang(lang)
for article in spArticles[isUnicode]:
if ltitle.startswith(article):
lart = len(article)
title = _format % (title[lart:], imdbIndex, title[:lart])
if article[-1] == ' ':
title = title[:-1]
break
return title
def normalizeTitle(title, lang=None):
"""Return the title in the normal "The Title" format;
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, str)
stitle = title.split(', ')
articlesDicts = linguistics.articlesDictsForLang(lang)
if len(stitle) > 1 and stitle[-1].lower() in articlesDicts[isUnicode]:
sep = ' '
if stitle[-1][-1] in ("'", '-'):
sep = ''
_format = '%s%s%s'
_joiner = ', '
title = _format % (stitle[-1], sep, _joiner.join(stitle[:-1]))
return title
def _split_series_episode(title):
"""Return the series and the episode titles; if this is not a
series' episode, the returned series title is empty.
This function recognize two different styles:
"The Series" An Episode (2005)
"The Series" (2004) {An Episode (2005) (#season.episode)}"""
series_title = ''
episode_or_year = ''
if title[-1:] == '}':
# Title of the episode, as in the plain text data files.
begin_eps = title.rfind('{')
if begin_eps == -1:
return '', ''
series_title = title[:begin_eps].rstrip()
# episode_or_year is returned with the {...}
episode_or_year = title[begin_eps:].strip()
if episode_or_year[:12] == '{SUSPENDED}}':
return '', ''
# XXX: works only with tv series; it's still unclear whether
# IMDb will support episodes for tv mini series and tv movies...
elif title[0:1] == '"':
second_quot = title[1:].find('"') + 2
if second_quot != 1: # a second " was found.
episode_or_year = title[second_quot:].lstrip()
first_char = episode_or_year[0:1]
if not first_char:
return '', ''
if first_char != '(':
# There is not a (year) but the title of the episode;
# that means this is an episode title, as returned by
# the web server.
series_title = title[:second_quot]
return series_title, episode_or_year
def is_series_episode(title):
"""Return True if 'title' is an series episode."""
return bool(_split_series_episode(title.strip())[0])
def analyze_title(title, canonical=None, canonicalSeries=None, canonicalEpisode=None):
"""Analyze the given title and return a dictionary with the
"stripped" title, the kind of the show ("movie", "tv series", etc.),
the year of production and the optional imdbIndex (a roman number
used to distinguish between movies with the same title and year).
If canonical is None (default), the title is stored in its own style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
raise an IMDbParserError exception if the title is not valid.
"""
# XXX: introduce the 'lang' argument?
if canonical is not None:
canonicalSeries = canonicalEpisode = canonical
original_t = title
result = {}
title = title.split(' aka ')[0].strip().replace('""', '"')
year = ''
kind = ''
imdbIndex = ''
series_title, episode_or_year = _split_series_episode(title)
if series_title:
# It's an episode of a series.
series_d = analyze_title(series_title, canonical=canonicalSeries)
oad = sen = ep_year = ''
# Plain text data files format.
if episode_or_year[0:1] == '{' and episode_or_year[-1:] == '}':
match = re_episode_info.findall(episode_or_year)
if match:
# Episode title, original air date and #season.episode
episode_or_year, oad, sen = match[0]
episode_or_year = episode_or_year.strip()
if not oad:
# No year, but the title is something like (2005-04-12)
if episode_or_year and episode_or_year[0] == '(' and \
episode_or_year[-1:] == ')' and \
episode_or_year[1:2] != '#':
oad = episode_or_year
if oad[1:5] and oad[5:6] == '-':
try:
ep_year = int(oad[1:5])
except (TypeError, ValueError):
pass
if not oad and not sen and episode_or_year.startswith('(#'):
sen = episode_or_year
elif episode_or_year.startswith('Episode dated'):
oad = episode_or_year[14:]
if oad[-4:].isdigit():
try:
ep_year = int(oad[-4:])
except (TypeError, ValueError):
pass
episode_d = analyze_title(episode_or_year, canonical=canonicalEpisode)
episode_d['kind'] = 'episode'
episode_d['episode of'] = series_d
if oad:
episode_d['original air date'] = oad[1:-1]
if ep_year and episode_d.get('year') is None:
episode_d['year'] = ep_year
if sen and sen[2:-1].find('.') != -1:
seas, epn = sen[2:-1].split('.')
if seas:
# Set season and episode.
try:
seas = int(seas)
except ValueError:
pass
try:
epn = int(epn)
except ValueError:
pass
episode_d['season'] = seas
if epn:
episode_d['episode'] = epn
return episode_d
# First of all, search for the kind of show.
# XXX: Number of entries at 17 Apr 2008:
# movie: 379,871
# episode: 483,832
# tv movie: 61,119
# tv series: 44,795
# video movie: 57,915
# tv mini series: 5,497
# video game: 5,490
# More up-to-date statistics: http://us.imdb.com/database_statistics
epindex = re_m_episode.search(title)
if epindex:
# It's an episode of a series.
kind = 'episode'
series_title = title[epindex.end():]
season_episode_match = re_m_series.match(series_title)
if season_episode_match:
result['season'] = int(season_episode_match.groups()[0])
result['episode'] = int(season_episode_match.groups()[1])
series_title = re_m_series.sub('', series_title)
series_info = analyze_title(series_title)
result['episode of'] = series_info.get('title')
result['series year'] = series_info.get('year')
title = title[:epindex.start()].strip()
else:
detected_kind = re_m_kind.findall(title)
if detected_kind:
kind = detected_kind[-1].lower().replace('-', '')
kind = KIND_MAP.get(kind, kind)
title = re_m_kind.sub('', title).strip()
# Search for the year and the optional imdbIndex (a roman number).
yi = re_year_index.findall(title)
if yi:
last_yi = yi[-1]
year = last_yi[0]
if last_yi[1]:
imdbIndex = last_yi[1][1:]
year = year[:-len(imdbIndex) - 1]
i = title.rfind('(%s)' % last_yi[0])
if i != -1:
title = title[:i - 1].rstrip()
if not imdbIndex:
detect_imdbIndex = re_m_imdbIndex.findall(title)
if detect_imdbIndex:
imdbIndex = detect_imdbIndex[-1]
title = re_m_imdbIndex.sub('', title).strip()
# This is a tv (mini) series: strip the '"' at the begin and at the end.
# XXX: strip('"') is not used for compatibility with Python 2.0.
if title and title[0] == title[-1] == '"':
if not kind:
kind = 'tv series'
title = title[1:-1].strip()
if not title:
raise IMDbParserError('invalid title: "%s"' % original_t)
if canonical is not None:
if canonical:
title = canonicalTitle(title)
else:
title = normalizeTitle(title)
result['title'] = title
if year and year != '????':
if '-' in year:
result['series years'] = year
year = year[:4]
try:
result['year'] = int(year)
except (TypeError, ValueError):
pass
if imdbIndex:
result['imdbIndex'] = imdbIndex.strip()
result['kind'] = kind or 'movie'
return result
_web_format = '%d %B %Y'
_ptdf_format = '(%Y-%m-%d)'
def _convertTime(title, fromPTDFtoWEB=True):
"""Convert a time expressed in the pain text data files, to
the 'Episode dated ...' format used on the web site; if
fromPTDFtoWEB is false, the inverted conversion is applied."""
try:
if fromPTDFtoWEB:
from_format = _ptdf_format
to_format = _web_format
else:
from_format = 'Episode dated %s' % _web_format
to_format = _ptdf_format
t = strptime(title, from_format)
title = strftime(to_format, t)
if fromPTDFtoWEB:
if title[0] == '0':
title = title[1:]
title = 'Episode dated %s' % title
except ValueError:
pass
return title
def build_title(title_dict, canonical=None, canonicalSeries=None,
canonicalEpisode=None, ptdf=False, lang=None, _doYear=True, appendKind=True):
"""Given a dictionary that represents a "long" IMDb title,
return a string.
If canonical is None (default), the title is returned in the stored style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
lang can be used to specify the language of the title.
If ptdf is true, the plain text data files format is used.
"""
if canonical is not None:
canonicalSeries = canonical
pre_title = ''
kind = title_dict.get('kind')
episode_of = title_dict.get('episode of')
if kind == 'episode' and episode_of is not None:
# Works with both Movie instances and plain dictionaries.
doYear = 0
if ptdf:
doYear = 1
# XXX: for results coming from the new search page.
if not isinstance(episode_of, (dict, _Container)):
episode_of = {'title': episode_of, 'kind': 'tv series'}
if 'series year' in title_dict:
episode_of['year'] = title_dict['series year']
pre_title = build_title(episode_of, canonical=canonicalSeries,
ptdf=False, _doYear=doYear)
ep_dict = {'title': title_dict.get('title', ''),
'imdbIndex': title_dict.get('imdbIndex')}
ep_title = ep_dict['title']
if not ptdf:
doYear = 1
ep_dict['year'] = title_dict.get('year', '????')
if ep_title[0:1] == '(' and ep_title[-1:] == ')' and \
ep_title[1:5].isdigit():
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=True)
else:
doYear = 0
if ep_title.startswith('Episode dated'):
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=False)
episode_title = build_title(ep_dict,
canonical=canonicalEpisode, ptdf=ptdf,
_doYear=doYear)
if ptdf:
oad = title_dict.get('original air date', '')
if len(oad) == 10 and oad[4] == '-' and oad[7] == '-' and \
episode_title.find(oad) == -1:
episode_title += ' (%s)' % oad
seas = title_dict.get('season')
if seas is not None:
episode_title += ' (#%s' % seas
episode = title_dict.get('episode')
if episode is not None:
episode_title += '.%s' % episode
episode_title += ')'
episode_title = '{%s}' % episode_title
return '%s %s' % (pre_title, episode_title)
title = title_dict.get('title', '')
imdbIndex = title_dict.get('imdbIndex', '')
if not title:
return ''
if canonical is not None:
if canonical:
title = canonicalTitle(title, lang=lang, imdbIndex=imdbIndex)
else:
title = normalizeTitle(title, lang=lang)
if pre_title:
title = '%s %s' % (pre_title, title)
if kind in ('tv series', 'tv mini series'):
title = '"%s"' % title
if _doYear:
year = str(title_dict.get('year')) or '????'
imdbIndex = title_dict.get('imdbIndex')
if not ptdf:
if imdbIndex and (canonical is None or canonical):
title += ' (%s)' % imdbIndex
title += ' (%s)' % year
else:
title += ' (%s' % year
if imdbIndex and (canonical is None or canonical):
title += '/%s' % imdbIndex
title += ')'
if appendKind and kind:
if kind == 'tv movie':
title += ' (TV)'
elif kind == 'video movie':
title += ' (V)'
elif kind == 'tv mini series':
title += ' (mini)'
elif kind == 'video game':
title += ' (VG)'
return title
def split_company_name_notes(name):
"""Return two strings, the first representing the company name,
and the other representing the (optional) notes."""
name = name.strip()
notes = ''
if name.endswith(')'):
fpidx = name.find('(')
if fpidx != -1:
notes = name[fpidx:]
name = name[:fpidx].rstrip()
return name, notes
def analyze_company_name(name, stripNotes=False):
"""Return a dictionary with the name and the optional 'country'
keys, from the given string.
If stripNotes is true, tries to not consider optional notes.
raise an IMDbParserError exception if the name is not valid.
"""
if stripNotes:
name = split_company_name_notes(name)[0]
o_name = name
name = name.strip()
country = None
if name.startswith('['):
name = re.sub(r'[!@#$\(\)\[\]]', '', name)
else:
if name.endswith(']'):
idx = name.rfind('[')
if idx != -1:
country = name[idx:]
name = name[:idx].rstrip()
if not name:
raise IMDbParserError('invalid name: "%s"' % o_name)
result = {'name': name}
if country:
result['country'] = country
return result
def build_company_name(name_dict):
"""Given a dictionary that represents a "long" IMDb company name,
return a string.
"""
name = name_dict.get('name')
if not name:
return ''
country = name_dict.get('country')
if country is not None:
name += ' %s' % country
return name
@total_ordering
class _LastC:
"""Size matters."""
def __lt__(self, other):
return False
def __eq__(self, other):
return not isinstance(other, self.__class__)
_last = _LastC()
def cmpMovies(m1, m2):
"""Compare two movies by year, in reverse order; the imdbIndex is checked
for movies with the same year of production and title."""
# Sort tv series' episodes.
m1e = m1.get('episode of')
m2e = m2.get('episode of')
if m1e is not None and m2e is not None:
cmp_series = cmpMovies(m1e, m2e)
if cmp_series != 0:
return cmp_series
m1s = m1.get('season')
m2s = m2.get('season')
if m1s is not None and m2s is not None:
if m1s < m2s:
return 1
elif m1s > m2s:
return -1
m1p = m1.get('episode')
m2p = m2.get('episode')
if m1p < m2p:
return 1
elif m1p > m2p:
return -1
try:
if m1e is None:
m1y = int(m1.get('year', 0))
else:
m1y = int(m1e.get('year', 0))
except ValueError:
m1y = 0
try:
if m2e is None:
m2y = int(m2.get('year', 0))
else:
m2y = int(m2e.get('year', 0))
except ValueError:
m2y = 0
if m1y > m2y:
return -1
if m1y < m2y:
return 1
# Ok, these movies have the same production year...
# m1t = m1.get('canonical title', _last)
# m2t = m2.get('canonical title', _last)
# It should works also with normal dictionaries (returned from searches).
# if m1t is _last and m2t is _last:
m1t = m1.get('title', _last)
m2t = m2.get('title', _last)
if m1t < m2t:
return -1
if m1t > m2t:
return 1
# Ok, these movies have the same title...
m1i = m1.get('imdbIndex', _last)
m2i = m2.get('imdbIndex', _last)
if m1i > m2i:
return -1
if m1i < m2i:
return 1
m1id = getattr(m1, 'movieID', None)
# Introduce this check even for other comparisons functions?
# XXX: is it safe to check without knowing the data access system?
# probably not a great idea. Check for 'kind', instead?
if m1id is not None:
m2id = getattr(m2, 'movieID', None)
if m1id > m2id:
return -1
elif m1id < m2id:
return 1
return 0
def cmpPeople(p1, p2):
"""Compare two people by billingPos, name and imdbIndex."""
p1b = getattr(p1, 'billingPos', None) or _last
p2b = getattr(p2, 'billingPos', None) or _last
if p1b > p2b:
return 1
if p1b < p2b:
return -1
p1n = p1.get('canonical name', _last)
p2n = p2.get('canonical name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n:
return 1
if p1n < p2n:
return -1
p1i = p1.get('imdbIndex', _last)
p2i = p2.get('imdbIndex', _last)
if p1i > p2i:
return 1
if p1i < p2i:
return -1
return 0
def cmpCompanies(p1, p2):
"""Compare two companies."""
p1n = p1.get('long imdb name', _last)
p2n = p2.get('long imdb name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n:
return 1
if p1n < p2n:
return -1
p1i = p1.get('country', _last)
p2i = p2.get('country', _last)
if p1i > p2i:
return 1
if p1i < p2i:
return -1
return 0
# References to titles, names and characters.
# XXX: find better regexp!
re_titleRef = re.compile(
r'_(.+?(?: \([0-9\?]{4}(?:/[IVXLCDM]+)?\))?(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)_ \(qv\)'
)
# FIXME: doesn't match persons with ' in the name.
re_nameRef = re.compile(r"'([^']+?)' \(qv\)")
# XXX: good choice? Are there characters with # in the name?
re_characterRef = re.compile(r"#([^']+?)# \(qv\)")
# Functions used to filter the text strings.
def modNull(s, titlesRefs, namesRefs, charactersRefs):
"""Do nothing."""
return s
def modClearTitleRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles references."""
return re_titleRef.sub(r'\1', s)
def modClearNameRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove names references."""
return re_nameRef.sub(r'\1', s)
def modClearCharacterRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove characters references"""
return re_characterRef.sub(r'\1', s)
def modClearRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles, names and characters references."""
s = modClearTitleRefs(s, {}, {}, {})
s = modClearCharacterRefs(s, {}, {}, {})
return modClearNameRefs(s, {}, {}, {})
def modifyStrings(o, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Modify a string (or string values in a dictionary or strings
in a list), using the provided modFunct function and titlesRefs
namesRefs and charactersRefs references dictionaries."""
# Notice that it doesn't go any deeper than the first two levels in a list.
if isinstance(o, str):
return modFunct(o, titlesRefs, namesRefs, charactersRefs)
elif isinstance(o, (list, tuple, dict)):
_stillorig = 1
if isinstance(o, (list, tuple)):
keys = range(len(o))
else:
keys = list(o.keys())
for i in keys:
v = o[i]
if isinstance(v, str):
if _stillorig:
o = copy(o)
_stillorig = 0
o[i] = modFunct(v, titlesRefs, namesRefs, charactersRefs)
elif isinstance(v, (list, tuple)):
modifyStrings(o[i], modFunct, titlesRefs, namesRefs, charactersRefs)
return o
def date_and_notes(s):
"""Parse (birth|death) date and notes; returns a tuple in the
form (date, notes)."""
s = s.strip()
if not s:
return '', ''
notes = ''
if s[0].isdigit() or s.split()[0].lower() in (
'c.', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december',
'ca.', 'circa', '????,'):
i = s.find(',')
if i != -1:
notes = s[i + 1:].strip()
s = s[:i]
else:
notes = s
s = ''
if s == '????':
s = ''
return s, notes
class RolesList(list):
"""A list of Person or Character instances, used for the currentRole
property."""
@property
def notes(self):
return self._notes
@notes.setter
def notes(self, notes):
self._notes = notes
def __init__(self, *args, **kwds):
self._notes = None
super(RolesList, self).__init__(*args, **kwds)
def __str__(self):
return ' / '.join([str(x) for x in self])
# Replace & with &, but only if it's not already part of a charref.
# _re_amp = re.compile(r'(&)(?!\w+;)', re.I)
# _re_amp = re.compile(r'(?<=\W)&(?=[^a-zA-Z0-9_#])')
_re_amp = re.compile(r'&(?![^a-zA-Z0-9_#]{1,5};)')
def escape4xml(value):
"""Escape some chars that can't be present in a XML value."""
if isinstance(value, (int, float)):
value = str(value)
value = _re_amp.sub('&', value)
value = value.replace('"', '"').replace("'", ''')
value = value.replace('<', '<').replace('>', '>')
if isinstance(value, bytes):
value = value.decode('utf-8', 'xmlcharrefreplace')
return value
def _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Return three lists - for movie titles, persons and characters names -
with two items tuples: the first item is the reference once escaped
by the user-provided modFunct function, the second is the same
reference un-escaped."""
mRefs = []
for refRe, refTemplate in [(re_titleRef, '_%s_ (qv)'),
(re_nameRef, "'%s' (qv)"),
(re_characterRef, '#%s# (qv)')]:
theseRefs = []
for theRef in refRe.findall(value):
# refTemplate % theRef values don't change for a single
# _Container instance, so this is a good candidate for a
# cache or something - even if it's so rarely used that...
# Moreover, it can grow - ia.update(...) - and change if
# modFunct is modified.
goodValue = modFunct(refTemplate % theRef, titlesRefs, namesRefs, charactersRefs)
# Prevents problems with crap in plain text data files.
# We should probably exclude invalid chars and string that
# are too long in the re_*Ref expressions.
if '_' in goodValue or len(goodValue) > 128:
continue
toReplace = escape4xml(goodValue)
# Only the 'value' portion is replaced.
replaceWith = goodValue.replace(theRef, escape4xml(theRef))
theseRefs.append((toReplace, replaceWith))
mRefs.append(theseRefs)
return mRefs
def _handleTextNotes(s):
"""Split text::notes strings."""
ssplit = s.split('::', 1)
if len(ssplit) == 1:
return s
return '%s<notes>%s</notes>' % (ssplit[0], ssplit[1])
def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
namesRefs=None, charactersRefs=None):
"""Replace some chars that can't be present in a XML text."""
if not withRefs:
value = _handleTextNotes(escape4xml(value))
else:
# Replace references that were accidentally escaped.
replaceLists = _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs)
value = modFunct(value, titlesRefs or {}, namesRefs or {}, charactersRefs or {})
value = _handleTextNotes(escape4xml(value))
for replaceList in replaceLists:
for toReplace, replaceWith in replaceList:
value = value.replace(toReplace, replaceWith)
return value
def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
"""Build a tag for the given _Container instance;
both open and close tags are returned."""
tag = ton.__class__.__name__.lower()
what = 'name'
if tag == 'movie':
value = ton.get('long imdb title') or ton.get('title', '')
what = 'title'
else:
value = ton.get('long imdb name') or ton.get('name', '')
value = _normalizeValue(value)
extras = ''
crl = ton.currentRole
if crl:
if not isinstance(crl, list):
crl = [crl]
for cr in crl:
crTag = cr.__class__.__name__.lower()
if PY2 and isinstance(cr, unicode):
crValue = cr
crID = None
else:
crValue = cr.get('long imdb name') or ''
crID = cr.getID()
crValue = _normalizeValue(crValue)
if crID is not None:
extras += '<current-role><%s id="%s"><name>%s</name></%s>' % (
crTag, crID, crValue, crTag
)
else:
extras += '<current-role><%s><name>%s</name></%s>' % (crTag, crValue, crTag)
if hasattr(cr, 'notes'):
extras += '<notes>%s</notes>' % _normalizeValue(cr.notes)
extras += '</current-role>'
theID = ton.getID()
if theID is not None:
beginTag = '<%s id="%s"' % (tag, theID)
if addAccessSystem and ton.accessSystem:
beginTag += ' access-system="%s"' % ton.accessSystem
if not _containerOnly:
beginTag += '><%s>%s</%s>' % (what, value, what)
else:
beginTag += '>'
else:
# workaround for #350
beginTag=""
if not _containerOnly:
if value:
beginTag = '<%s><%s>%s</%s>' % (tag, what, value, what)
else:
beginTag = '<%s>' % tag
beginTag += extras
if ton.notes:
beginTag += '<notes>%s</notes>' % _normalizeValue(ton.notes)
if beginTag == "":
return beginTag
return beginTag, '</%s>' % tag
TAGS_TO_MODIFY = {
'movie.parents-guide': ('item', True),
'movie.number-of-votes': ('item', True),
'movie.soundtrack.item': ('item', True),
'movie.soundtrack.item.item': ('item', True),
'movie.quotes': ('quote', False),
'movie.quotes.quote': ('line', False),
'movie.demographic': ('item', True),
'movie.episodes': ('season', True),
'movie.episodes.season': ('episode', True),
'person.merchandising-links': ('item', True),
'person.genres': ('item', True),
'person.quotes': ('quote', False),
'person.keywords': ('item', True),
'character.quotes': ('item', True),
'character.quotes.item': ('quote', False),
'character.quotes.item.quote': ('line', False)
}
_valid_chars = string.ascii_lowercase + '-' + string.digits
_translator = str.maketrans(_valid_chars, _valid_chars) if not PY2 else \
string.maketrans(_valid_chars, _valid_chars)
def _tagAttr(key, fullpath):
"""Return a tuple with a tag name and a (possibly empty) attribute,
applying the conversions specified in TAGS_TO_MODIFY and checking
that the tag is safe for a XML document."""
attrs = {}
_escapedKey = escape4xml(key)
if fullpath in TAGS_TO_MODIFY:
tagName, useTitle = TAGS_TO_MODIFY[fullpath]
if useTitle:
attrs['key'] = _escapedKey
elif not isinstance(key, str):
strType = str(type(key)).replace("<type '", "").replace("'>", "")
attrs['keytype'] = strType
tagName = str(key)
else:
tagName = key
if isinstance(key, int):
attrs['keytype'] = 'int'
origTagName = tagName
tagName = tagName.lower().replace(' ', '-')
tagName = str(tagName).translate(_translator)
if origTagName != tagName:
if 'key' not in attrs:
attrs['key'] = _escapedKey
if (not tagName) or tagName[0].isdigit() or tagName[0] == '-':
# This is a fail-safe: we should never be here, since unpredictable
# keys must be listed in TAGS_TO_MODIFY.
# This will proably break the DTD/schema, but at least it will
# produce a valid XML.
tagName = 'item'
_utils_logger.error('invalid tag: %s [%s]' % (_escapedKey, fullpath))
attrs['key'] = _escapedKey
return tagName, ' '.join(['%s="%s"' % i for i in list(attrs.items())])
def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
titlesRefs=None, namesRefs=None, charactersRefs=None,
_topLevel=True, key2infoset=None, fullpath=''):
"""Convert a sequence or a dictionary to a list of XML
unicode strings."""
if _l is None:
_l = []
if isinstance(seq, dict):
for key in seq:
value = seq[key]
if isinstance(key, _Container):
# Here we're assuming that a _Container is never a top-level
# key (otherwise we should handle key2infoset).
openTag, closeTag = _tag4TON(key)
# So that fullpath will contains something meaningful.
tagName = key.__class__.__name__.lower()
else:
tagName, attrs = _tagAttr(key, fullpath)
openTag = '<%s' % tagName
if attrs:
openTag += ' %s' % attrs
if _topLevel and key2infoset and key in key2infoset:
openTag += ' infoset="%s"' % key2infoset[key]
if isinstance(value, int):
openTag += ' type="int"'
elif isinstance(value, float):
openTag += ' type="float"'
openTag += '>'
closeTag = '</%s>' % tagName
_l.append(openTag)
_seq2xml(value, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
elif isinstance(seq, (list, tuple)):
tagName, attrs = _tagAttr('item', fullpath)
beginTag = '<%s' % tagName
if attrs:
beginTag += ' %s' % attrs
# beginTag += u'>'
closeTag = '</%s>' % tagName
for item in seq:
if isinstance(item, _Container):
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, item.__class__.__name__.lower()))
else:
openTag = beginTag
if isinstance(item, int):
openTag += ' type="int"'
elif isinstance(item, float):
openTag += ' type="float"'
openTag += '>'
_l.append(openTag)
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
else:
if isinstance(seq, _Container):
_l.extend(_tag4TON(seq))
elif seq:
# Text, ints, floats and the like.
_l.append(_normalizeValue(seq, withRefs=withRefs,
modFunct=modFunct,
titlesRefs=titlesRefs,
namesRefs=namesRefs,
charactersRefs=charactersRefs))
return _l
_xmlHead = """<?xml version="1.0"?>
<!DOCTYPE %s SYSTEM "https://cinemagoer.github.io/static/dtd/cinemagoer.dtd">
"""
@total_ordering
class _Container(object):
"""Base class for Movie, Person, Character and Company classes."""
# The default sets of information retrieved.
default_info = ()
# Aliases for some not-so-intuitive keys.
keys_alias = {}
# List of keys to modify.
keys_tomodify_list = ()
# Function used to compare two instances of this class.
cmpFunct = None
# key that contains the cover/headshot
_image_key = None
def __init__(self, myID=None, data=None, notes='',
currentRole='', roleID=None, roleIsPerson=False,
accessSystem=None, titlesRefs=None, namesRefs=None,
charactersRefs=None, modFunct=None, *args, **kwds):
"""Initialize a Movie, Person, Character or Company object.
*myID* -- your personal identifier for this object.
*data* -- a dictionary used to initialize the object.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)' or the alias used in the
movie credits.
*accessSystem* -- a string representing the data access system used.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
self.reset()
self.accessSystem = accessSystem
self.myID = myID
if data is None:
data = {}
self.set_data(data, override=True)
self.notes = notes
if titlesRefs is None:
titlesRefs = {}
self.update_titlesRefs(titlesRefs)
if namesRefs is None:
namesRefs = {}
self.update_namesRefs(namesRefs)
if charactersRefs is None:
charactersRefs = {}
self.update_charactersRefs(charactersRefs)
self.set_mod_funct(modFunct)
self.keys_tomodify = {}
for item in self.keys_tomodify_list:
self.keys_tomodify[item] = None
self._roleIsPerson = roleIsPerson
if not roleIsPerson:
from imdb.Character import Character
self._roleClass = Character
else:
from imdb.Person import Person
self._roleClass = Person
self.currentRole = currentRole
if roleID:
self.roleID = roleID
self._init(*args, **kwds)
def _get_roleID(self):
"""Return the characterID or personID of the currentRole object."""
if not self.__role:
return None
if isinstance(self.__role, list):
return [x.getID() for x in self.__role]
return self.currentRole.getID()
def _set_roleID(self, roleID):
"""Set the characterID or personID of the currentRole object."""
if not self.__role:
# XXX: needed? Just ignore it? It's probably safer to
# ignore it, to prevent some bugs in the parsers.
# raise IMDbError,"Can't set ID of an empty Character/Person object."
pass
if not self._roleIsPerson:
if not isinstance(roleID, (list, tuple)):
if not (PY2 and isinstance(self.currentRole, unicode)):
self.currentRole.characterID = roleID
else:
for index, item in enumerate(roleID):
r = self.__role[index]
if PY2 and isinstance(r, unicode):
continue
r.characterID = item
else:
if not isinstance(roleID, (list, tuple)):
self.currentRole.personID = roleID
else:
for index, item in enumerate(roleID):
r = self.__role[index]
if PY2 and isinstance(r, unicode):
continue
r.personID = item
roleID = property(_get_roleID, _set_roleID,
doc="the characterID or personID of the currentRole object.")
def _get_currentRole(self):
"""Return a Character or Person instance."""
if self.__role:
return self.__role
return self._roleClass(name='', accessSystem=self.accessSystem, modFunct=self.modFunct)
def _set_currentRole(self, role):
"""Set self.currentRole to a Character or Person instance."""
if isinstance(role, str):
if not role:
self.__role = None
else:
self.__role = self._roleClass(name=role, modFunct=self.modFunct,
accessSystem=self.accessSystem)
elif isinstance(role, (list, tuple)):
self.__role = RolesList()
for item in role:
if isinstance(item, str):
self.__role.append(self._roleClass(name=item,
accessSystem=self.accessSystem,
modFunct=self.modFunct))
else:
self.__role.append(item)
if not self.__role:
self.__role = None
else:
self.__role = role
currentRole = property(_get_currentRole, _set_currentRole,
doc="The role of a Person in a Movie"
" or the interpreter of a Character in a Movie.")
def _init(self, **kwds):
pass
def get_fullsizeURL(self):
"""Return the full-size URL for this object."""
if not (self._image_key and self._image_key in self.data):
return None
url = self.data[self._image_key] or ''
ext_idx = url.rfind('.')
if ext_idx == -1:
return url
if '@' in url:
return url[:url.rindex('@') + 1] + url[ext_idx:]
else:
prev_dot = url[:ext_idx].rfind('.')
if prev_dot == -1:
return url
return url[:prev_dot] + url[ext_idx:]
def reset(self):
"""Reset the object."""
self.data = {}
self.myID = None
self.notes = ''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.modFunct = modClearRefs
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._reset()
def _reset(self):
pass
def clear(self):
"""Reset the dictionary."""
self.data.clear()
self.notes = ''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._clear()
def _clear(self):
pass
def get_current_info(self):
"""Return the current set of information retrieved."""
return self.current_info
def update_infoset_map(self, infoset, keys, mainInfoset):
"""Update the mappings between infoset and keys."""
if keys is None:
keys = []
if mainInfoset is not None:
theIS = mainInfoset
else:
theIS = infoset
self.infoset2keys[theIS] = keys
for key in keys:
self.key2infoset[key] = theIS
def set_current_info(self, ci):
"""Set the current set of information retrieved."""
# XXX:Remove? It's never used and there's no way to update infoset2keys.
self.current_info = ci
def add_to_current_info(self, val, keys=None, mainInfoset=None):
"""Add a set of information to the current list."""
if val not in self.current_info:
self.current_info.append(val)
self.update_infoset_map(val, keys, mainInfoset)
def has_current_info(self, val):
"""Return true if the given set of information is in the list."""
return val in self.current_info
def set_mod_funct(self, modFunct):
"""Set the fuction used to modify the strings."""
if modFunct is None:
modFunct = modClearRefs
self.modFunct = modFunct
def update_titlesRefs(self, titlesRefs):
"""Update the dictionary with the references to movies."""
self.titlesRefs.update(titlesRefs)
def get_titlesRefs(self):
"""Return the dictionary with the references to movies."""
return self.titlesRefs
def update_namesRefs(self, namesRefs):
"""Update the dictionary with the references to names."""
self.namesRefs.update(namesRefs)
def get_namesRefs(self):
"""Return the dictionary with the references to names."""
return self.namesRefs
def update_charactersRefs(self, charactersRefs):
"""Update the dictionary with the references to characters."""
self.charactersRefs.update(charactersRefs)
def get_charactersRefs(self):
"""Return the dictionary with the references to characters."""
return self.charactersRefs
def set_data(self, data, override=False):
"""Set the movie data to the given dictionary; if 'override' is
set, the previous data is removed, otherwise the two dictionary
are merged.
"""
if not override:
self.data.update(data)
else:
self.data = data
def getID(self):
"""Return movieID, personID, characterID or companyID."""
raise NotImplementedError('override this method')
def __lt__(self, other):
"""Compare two Movie, Person, Character or Company objects."""
if self.cmpFunct is None:
return False
if not isinstance(other, self.__class__):
return False
return self.cmpFunct(other) == -1
def __eq__(self, other):
"""Compare two Movie, Person, Character or Company objects."""
if self.cmpFunct is None:
return False
if not isinstance(other, self.__class__):
return False
return self.cmpFunct(other)
def __hash__(self):
"""Hash for this object."""
# XXX: does it always work correctly?
theID = self.getID()
if theID is not None and self.accessSystem not in ('UNKNOWN', None):
# Handle 'http' and 'mobile' as they are the same access system.
acs = self.accessSystem
if acs in ('mobile', 'httpThin'):
acs = 'http'
# There must be some indication of the kind of the object, too.
s4h = '%s:%s[%s]' % (self.__class__.__name__, theID, acs)
else:
s4h = repr(self)
return hash(s4h)
def isSame(self, other):
"""Return True if the two represent the same object."""
return isinstance(other, self.__class__) and hash(self) == hash(other)
def __len__(self):
"""Number of items in the data dictionary."""
return len(self.data)
def getAsXML(self, key, _with_add_keys=True):
"""Return a XML representation of the specified key, or None
if empty. If _with_add_keys is False, dinamically generated
keys are excluded."""
# Prevent modifyStrings in __getitem__ to be called; if needed,
# it will be called by the _normalizeValue function.
origModFunct = self.modFunct
self.modFunct = modNull
# XXX: not totally sure it's a good idea, but could prevent
# problems (i.e.: the returned string always contains
# a DTD valid tag, and not something that can be only in
# the keys_alias map).
key = self.keys_alias.get(key, key)
if (not _with_add_keys) and (key in self._additional_keys()):
self.modFunct = origModFunct
return None
try:
withRefs = False
if key in self.keys_tomodify and \
origModFunct not in (None, modNull):
withRefs = True
value = self.get(key)
if value is None:
return None
tag = self.__class__.__name__.lower()
return ''.join(_seq2xml({key: value}, withRefs=withRefs,
modFunct=origModFunct,
titlesRefs=self.titlesRefs,
namesRefs=self.namesRefs,
charactersRefs=self.charactersRefs,
key2infoset=self.key2infoset,
fullpath=tag))
finally:
self.modFunct = origModFunct
def asXML(self, _with_add_keys=True):
"""Return a XML representation of the whole object.
If _with_add_keys is False, dinamically generated keys are excluded."""
beginTag, endTag = _tag4TON(self, addAccessSystem=True, _containerOnly=True)
resList = [beginTag]
for key in list(self.keys()):
value = self.getAsXML(key, _with_add_keys=_with_add_keys)
if not value:
continue
resList.append(value)
resList.append(endTag)
head = _xmlHead % self.__class__.__name__.lower()
return head + ''.join(resList)
def _getitem(self, key):
"""Handle special keys."""
return None
def __getitem__(self, key):
"""Return the value for a given key, checking key aliases;
a KeyError exception is raised if the key is not found.
"""
value = self._getitem(key)
if value is not None:
return value
# Handle key aliases.
if key in self.keys_alias and self.keys_alias[key] in self.data:
rawData = self.data[self.keys_alias[key]]
else:
rawData = self.data[key]
if key in self.keys_tomodify and \
self.modFunct not in (None, modNull):
try:
return modifyStrings(rawData, self.modFunct, self.titlesRefs,
self.namesRefs, self.charactersRefs)
except RuntimeError as e:
import warnings
warnings.warn("RuntimeError in imdb.utils._Container.__getitem__;"
" if it's not a recursion limit exceeded, it's a bug:\n%s" % e)
return rawData
def __setitem__(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __delitem__(self, key):
"""Remove the given section or key."""
# XXX: how to remove an item of a section?
del self.data[key]
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
return []
def keys(self):
"""Return a list of valid keys."""
return list(self.data.keys()) + self._additional_keys()
def items(self):
"""Return the items in the dictionary."""
return [(k, self.get(k)) for k in list(self.keys())]
# XXX: is this enough?
def iteritems(self):
return iter(self.data.items())
def iterkeys(self):
return iter(self.data.keys())
def itervalues(self):
return iter(self.data.values())
def values(self):
"""Return the values in the dictionary."""
return [self.get(k) for k in list(self.keys())]
def has_key(self, key):
"""Return true if a given section is defined."""
try:
self.__getitem__(key)
except KeyError:
return False
return True
# XXX: really useful???
# consider also that this will confuse people who meant to
# call ia.update(movieObject, 'data set') instead.
def update(self, dict):
self.data.update(dict)
def get(self, key, failobj=None):
"""Return the given section, or default if it's not found."""
try:
return self.__getitem__(key)
except KeyError:
return failobj
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __repr__(self):
"""String representation of an object."""
raise NotImplementedError('override this method')
def __str__(self):
"""Movie title or person name."""
raise NotImplementedError('override this method')
def __contains__(self, key):
raise NotImplementedError('override this method')
def append_item(self, key, item):
"""The item is appended to the list identified by the given key."""
self.data.setdefault(key, []).append(item)
def set_item(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __bool__(self):
"""Return true if self.data contains something."""
return bool(self.data)
def __deepcopy__(self, memo):
raise NotImplementedError('override this method')
def copy(self):
"""Return a deep copy of the object itself."""
return deepcopy(self)
def flatten(seq, toDescend=(list, dict, tuple), yieldDictKeys=False,
onlyKeysType=(_Container,), scalar=None):
"""Iterate over nested lists and dictionaries; toDescend is a list
or a tuple of types to be considered non-scalar; if yieldDictKeys is
true, also dictionaries' keys are yielded; if scalar is not None, only
items of the given type(s) are yielded."""
if scalar is None or isinstance(seq, scalar):
yield seq
if isinstance(seq, toDescend):
if isinstance(seq, (dict, _Container)):
if yieldDictKeys:
# Yield also the keys of the dictionary.
for key in seq.keys():
for k in flatten(key, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
if onlyKeysType and isinstance(k, onlyKeysType):
yield k
for value in seq.values():
for v in flatten(value, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield v
elif not isinstance(seq, (str, bytes, int, float)):
for item in seq:
for i in flatten(item, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield i
| 60,548 | Python | .py | 1,472 | 31.505435 | 117 | 0.570711 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,192 | __init__.py | cinemagoer_cinemagoer/imdb/__init__.py | # Copyright 2004-2021 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This package can be used to retrieve information about a movie or a person
from the IMDb database. It can fetch data through different media such as
the IMDb web pages, or a SQL database.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from imdb.version import __version__
__all__ = ['Cinemagoer', 'IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company',
'available_access_systems']
VERSION = __version__
import os
import sys
from pkgutil import find_loader
from types import FunctionType, MethodType
from imdb import Character, Company, Movie, Person
from imdb._exceptions import IMDbDataAccessError, IMDbError
from imdb._logging import imdbpyLogger as _imdb_logger
from imdb.utils import build_company_name, build_name, build_title
PY2 = sys.hexversion < 0x3000000
if PY2:
import ConfigParser as configparser
else:
import configparser
_aux_logger = _imdb_logger.getChild('aux')
# URLs of the main pages for movies, persons, characters and queries.
imdbURL_base = 'https://www.imdb.com/'
# NOTE: the urls below will be removed in a future version.
# please use the values in the 'urls' attribute
# of the IMDbBase subclass instance.
# http://www.imdb.com/title/
imdbURL_movie_base = '%stitle/' % imdbURL_base
# http://www.imdb.com/title/tt%s/
imdbURL_movie_main = imdbURL_movie_base + 'tt%s/'
# http://www.imdb.com/name/
imdbURL_person_base = '%sname/' % imdbURL_base
# http://www.imdb.com/name/nm%s/
imdbURL_person_main = imdbURL_person_base + 'nm%s/'
# http://www.imdb.com/character/
imdbURL_character_base = '%scharacter/' % imdbURL_base
# http://www.imdb.com/character/ch%s/
imdbURL_character_main = imdbURL_character_base + 'ch%s/'
# http://www.imdb.com/company/
imdbURL_company_base = '%scompany/' % imdbURL_base
# http://www.imdb.com/company/co%s/
imdbURL_company_main = imdbURL_company_base + 'co%s/'
# http://www.imdb.com/keyword/%s/
imdbURL_keyword_main = imdbURL_base + 'search/keyword/?keywords=%s'
# http://www.imdb.com/chart/top
imdbURL_top250 = imdbURL_base + 'chart/top'
# http://www.imdb.com/chart/bottom
imdbURL_bottom100 = imdbURL_base + 'chart/bottom'
# http://www.imdb.com/find/?%s
imdbURL_find = imdbURL_base + 'find/?%s'
# http://www.imdb.com/list/
imdbURL_list_base = imdbURL_base + 'list/'
# Name of the configuration files.
confFileNames = ['cinemagoer.cfg', 'imdbpy.cfg']
class ConfigParserWithCase(configparser.ConfigParser):
"""A case-sensitive parser for configuration files."""
def __init__(self, defaults=None, confFile=None, *args, **kwds):
"""Initialize the parser.
*defaults* -- defaults values.
*confFile* -- the file (or list of files) to parse."""
if PY2:
configparser.ConfigParser.__init__(self, defaults=defaults)
else:
super(configparser.ConfigParser, self).__init__(defaults=defaults)
if confFile is None:
for confFileName in confFileNames:
dotFileName = '.' + confFileName
# Current and home directory.
confFile = [os.path.join(os.getcwd(), confFileName),
os.path.join(os.getcwd(), dotFileName),
os.path.join(os.path.expanduser('~'), confFileName),
os.path.join(os.path.expanduser('~'), dotFileName)]
if os.name == 'posix':
sep = getattr(os.path, 'sep', '/')
# /etc/ and /etc/conf.d/
confFile.append(os.path.join(sep, 'etc', confFileName))
confFile.append(os.path.join(sep, 'etc', 'conf.d', confFileName))
else:
# etc subdirectory of sys.prefix, for non-unix systems.
confFile.append(os.path.join(sys.prefix, 'etc', confFileName))
for fname in confFile:
try:
self.read(fname)
except (configparser.MissingSectionHeaderError,
configparser.ParsingError) as e:
_aux_logger.warn('Troubles reading config file: %s' % e)
# Stop at the first valid file.
if self.has_section('imdbpy'):
break
def optionxform(self, optionstr):
"""Option names are case sensitive."""
return optionstr
def _manageValue(self, value):
"""Custom substitutions for values."""
if not isinstance(value, str):
return value
vlower = value.lower()
if vlower in ('1', 'on', 'false', '0', 'off', 'yes', 'no', 'true'):
return self._convert_to_boolean(vlower)
elif vlower == 'none':
return None
return value
def get(self, section, option, *args, **kwds):
"""Return the value of an option from a given section."""
value = configparser.ConfigParser.get(self, section, option, *args, **kwds)
return self._manageValue(value)
def items(self, section, *args, **kwds):
"""Return a list of (key, value) tuples of items of the
given section."""
if section != 'DEFAULT' and not self.has_section(section):
return []
keys = configparser.ConfigParser.options(self, section)
return [(k, self.get(section, k, *args, **kwds)) for k in keys]
def getDict(self, section):
"""Return a dictionary of items of the specified section."""
return dict(self.items(section))
def IMDb(accessSystem=None, *arguments, **keywords):
"""Return an instance of the appropriate class.
The accessSystem parameter is used to specify the kind of
the preferred access system."""
if accessSystem is None or accessSystem in ('auto', 'config'):
try:
cfg_file = ConfigParserWithCase(*arguments, **keywords)
# Parameters set by the code take precedence.
kwds = cfg_file.getDict('imdbpy')
if 'accessSystem' in kwds:
accessSystem = kwds['accessSystem']
del kwds['accessSystem']
else:
accessSystem = 'http'
kwds.update(keywords)
keywords = kwds
except Exception as e:
_imdb_logger.warn('Unable to read configuration file; complete error: %s' % e)
# It just LOOKS LIKE a bad habit: we tried to read config
# options from some files, but something is gone horribly
# wrong: ignore everything and pretend we were called with
# the 'http' accessSystem.
accessSystem = 'http'
if 'loggingLevel' in keywords:
_imdb_logger.setLevel(keywords['loggingLevel'])
del keywords['loggingLevel']
if 'loggingConfig' in keywords:
logCfg = keywords['loggingConfig']
del keywords['loggingConfig']
try:
import logging.config
logging.config.fileConfig(os.path.expanduser(logCfg))
except Exception as e:
_imdb_logger.warn('unable to read logger config: %s' % e)
if accessSystem in ('http', 'https', 'web', 'html'):
from .parser.http import IMDbHTTPAccessSystem
return IMDbHTTPAccessSystem(*arguments, **keywords)
if accessSystem in ('s3', 's3dataset', 'imdbws'):
from .parser.s3 import IMDbS3AccessSystem
return IMDbS3AccessSystem(*arguments, **keywords)
elif accessSystem in ('sql', 'db', 'database'):
try:
from .parser.sql import IMDbSqlAccessSystem
except ImportError:
raise IMDbError('the sql access system is not installed')
return IMDbSqlAccessSystem(*arguments, **keywords)
else:
raise IMDbError('unknown kind of data access system: "%s"' % accessSystem)
# Cinemagoer alias
Cinemagoer = IMDb
def available_access_systems():
"""Return the list of available data access systems."""
asList = []
if find_loader('imdb.parser.http') is not None:
asList.append('http')
if find_loader('imdb.parser.sql') is not None:
asList.append('sql')
return asList
# XXX: I'm not sure this is a good guess.
# I suppose that an argument of the IMDb function can be used to
# set a default encoding for the output, and then Movie, Person and
# Character objects can use this default encoding, returning strings.
# Anyway, passing unicode strings to search_movie(), search_person()
# and search_character() methods is always safer.
encoding = getattr(sys.stdin, 'encoding', '') or sys.getdefaultencoding()
class IMDbBase:
"""The base class used to search for a movie/person/character and
to get a Movie/Person/Character object.
This class cannot directly fetch data of any kind and so you
have to search the "real" code into a subclass."""
# The name of the preferred access system (MUST be overridden
# in the subclasses).
accessSystem = 'UNKNOWN'
# Whether to re-raise caught exceptions or not.
_reraise_exceptions = False
def __init__(self, defaultModFunct=None, results=20, keywordsResults=100,
*arguments, **keywords):
"""Initialize the access system.
If specified, defaultModFunct is the function used by
default by the Person, Movie and Character objects, when
accessing their text fields.
"""
# The function used to output the strings that need modification (the
# ones containing references to movie titles and person names).
self._defModFunct = defaultModFunct
# Number of results to get.
try:
results = int(results)
except (TypeError, ValueError):
results = 20
if results < 1:
results = 20
self._results = results
try:
keywordsResults = int(keywordsResults)
except (TypeError, ValueError):
keywordsResults = 100
if keywordsResults < 1:
keywordsResults = 100
self._keywordsResults = keywordsResults
self._reraise_exceptions = keywords.get('reraiseExceptions', True)
self.set_imdb_urls(keywords.get('imdbURL_base') or imdbURL_base)
def set_imdb_urls(self, imdbURL_base):
"""Set the urls used accessing the IMDb site."""
imdbURL_base = imdbURL_base.strip().strip('"\'')
if not imdbURL_base.startswith(('https://', 'http://')):
imdbURL_base = 'https://%s' % imdbURL_base
if not imdbURL_base.endswith('/'):
imdbURL_base = '%s/' % imdbURL_base
# http://www.imdb.com/title/
imdbURL_movie_base = '%stitle/' % imdbURL_base
# http://www.imdb.com/title/tt%s/
imdbURL_movie_main = imdbURL_movie_base + 'tt%s/'
# http://www.imdb.com/name/
imdbURL_person_base = '%sname/' % imdbURL_base
# http://www.imdb.com/name/nm%s/
imdbURL_person_main = imdbURL_person_base + 'nm%s/'
# http://www.imdb.com/character/
imdbURL_character_base = '%scharacter/' % imdbURL_base
# http://www.imdb.com/character/ch%s/
imdbURL_character_main = imdbURL_character_base + 'ch%s/'
# http://www.imdb.com/company/
imdbURL_company_base = '%scompany/' % imdbURL_base
# http://www.imdb.com/company/co%s/
imdbURL_company_main = imdbURL_company_base + 'co%s/'
# http://www.imdb.com/keyword/%s/
imdbURL_keyword_main = imdbURL_base + '/search/keyword?keywords=%s'
# http://www.imdb.com/chart/top
imdbURL_top250 = imdbURL_base + 'chart/top'
# http://www.imdb.com/chart/bottom
imdbURL_bottom100 = imdbURL_base + 'chart/bottom'
# http://www.imdb.com/chart/moviemeter
imdbURL_moviemeter100 = imdbURL_base + 'chart/moviemeter'
# http://www.imdb.com/chart/tvmeter
imdbURL_tvmeter100 = imdbURL_base + 'chart/tvmeter'
# http://www.imdb.com/chart/toptv
imdbURL_toptv250 = imdbURL_base + 'chart/toptv'
# https://www.imdb.com/india/top-rated-indian-movies
imdbURL_topindian250 = imdbURL_base + 'india/top-rated-indian-movies'
# http://www.imdb.com/chart/boxoffice/
imdbURL_boxoffice = imdbURL_base + 'chart/boxoffice/'
# http://www.imdb.com/find/?%s
imdbURL_find = imdbURL_base + 'find/?%s'
# http://www.imdb.com/search/title?%s
imdbURL_search_movie_advanced = imdbURL_base + 'search/title/?%s'
# http://www.imdb.com/list/
imdbURL_list_base = imdbURL_base + 'list/'
# https://www.imdb.com/showtimes
imdbURL_showtimes = imdbURL_base + 'showtimes'
self.urls = dict(
movie_base=imdbURL_movie_base,
movie_main=imdbURL_movie_main,
person_base=imdbURL_person_base,
person_main=imdbURL_person_main,
character_base=imdbURL_character_base,
character_main=imdbURL_character_main,
company_base=imdbURL_company_base,
company_main=imdbURL_company_main,
keyword_main=imdbURL_keyword_main,
top250=imdbURL_top250,
bottom100=imdbURL_bottom100,
moviemeter100=imdbURL_moviemeter100,
tvmeter100=imdbURL_tvmeter100,
toptv250=imdbURL_toptv250,
topindian250=imdbURL_topindian250,
find=imdbURL_find,
movie_list=imdbURL_list_base,
search_movie_advanced=imdbURL_search_movie_advanced,
boxoffice=imdbURL_boxoffice,
showtimes=imdbURL_showtimes)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
# By default, do nothing.
return movieID
def _normalize_personID(self, personID):
"""Normalize the given personID."""
# By default, do nothing.
return personID
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
# By default, do nothing.
return characterID
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
# By default, do nothing.
return companyID
def _get_real_movieID(self, movieID):
"""Handle title aliases."""
# By default, do nothing.
return movieID
def _get_real_personID(self, personID):
"""Handle name aliases."""
# By default, do nothing.
return personID
def _get_real_characterID(self, characterID):
"""Handle character name aliases."""
# By default, do nothing.
return characterID
def _get_real_companyID(self, companyID):
"""Handle company name aliases."""
# By default, do nothing.
return companyID
def _get_infoset(self, prefname):
"""Return methods with the name starting with prefname."""
infoset = []
excludes = ('%sinfoset' % prefname,)
preflen = len(prefname)
for name in dir(self.__class__):
if name.startswith(prefname) and name not in excludes:
member = getattr(self.__class__, name)
if isinstance(member, (MethodType, FunctionType)):
infoset.append(name[preflen:].replace('_', ' '))
return infoset
def get_movie_infoset(self):
"""Return the list of info set available for movies."""
return self._get_infoset('get_movie_')
def get_person_infoset(self):
"""Return the list of info set available for persons."""
return self._get_infoset('get_person_')
def get_character_infoset(self):
"""Return the list of info set available for characters."""
return self._get_infoset('get_character_')
def get_company_infoset(self):
"""Return the list of info set available for companies."""
return self._get_infoset('get_company_')
def get_movie(self, movieID, info=Movie.Movie.default_info, modFunct=None):
"""Return a Movie object for the given movieID.
The movieID is something used to univocally identify a movie;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Movie
object when accessing its text fields (like 'plot')."""
movieID = self._normalize_movieID(movieID)
movieID = self._get_real_movieID(movieID)
movie = Movie.Movie(movieID=movieID, accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
movie.set_mod_funct(modFunct)
self.update(movie, info)
return movie
get_episode = get_movie
def _search_movie(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_movie(self, title, results=None, _episodes=False):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not _episodes:
res = self._search_movie(title, results)
else:
res = self._search_episode(title, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res if mi and md][:results]
def _get_movie_list(self, list_, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_movie_list(self, list_, results=None):
"""Return a list of Movie objects for a list id as input """
res = self._get_movie_list(list_, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res if mi and md][:results]
def _search_movie_advanced(self, title=None, adult=None, results=None, sort=None, sort_dir=None):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_movie_advanced(self, title=None, adult=None, results=None, sort=None, sort_dir=None):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
res = self._search_movie_advanced(title=title, adult=adult, results=results, sort=sort, sort_dir=sort_dir)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res if mi and md][:results]
def _search_episode(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_episode(self, title, results=None):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return;
this method searches only for titles of tv (mini) series' episodes."""
return self.search_movie(title, results=results, _episodes=True)
def get_person(self, personID, info=Person.Person.default_info, modFunct=None):
"""Return a Person object for the given personID.
The personID is something used to univocally identify a person;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Person
object when accessing its text fields (like 'mini biography')."""
personID = self._normalize_personID(personID)
personID = self._get_real_personID(personID)
person = Person.Person(personID=personID, accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
person.set_mod_funct(modFunct)
self.update(person, info)
return person
def _search_person(self, name, results):
"""Return a list of tuples (personID, {personData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_person(self, name, results=None):
"""Return a list of Person objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
res = self._search_person(name, results)
return [Person.Person(personID=self._get_real_personID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res if pi and pd][:results]
def get_character(self, characterID, info=Character.Character.default_info,
modFunct=None):
"""Return a Character object for the given characterID.
The characterID is something used to univocally identify a character;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Character
object when accessing its text fields (like 'biography')."""
characterID = self._normalize_characterID(characterID)
characterID = self._get_real_characterID(characterID)
character = Character.Character(characterID=characterID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
character.set_mod_funct(modFunct)
self.update(character, info)
return character
def _search_character(self, name, results):
"""Return a list of tuples (characterID, {characterData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_character(self, name, results=None):
"""Return a list of Character objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
res = self._search_character(name, results)
return [Character.Character(characterID=self._get_real_characterID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res if pi and pd][:results]
def get_company(self, companyID, info=Company.Company.default_info,
modFunct=None):
"""Return a Company object for the given companyID.
The companyID is something used to univocally identify a company;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Company
object when accessing its text fields (none, so far)."""
companyID = self._normalize_companyID(companyID)
companyID = self._get_real_companyID(companyID)
company = Company.Company(companyID=companyID, accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
company.set_mod_funct(modFunct)
self.update(company, info)
return company
def _search_company(self, name, results):
"""Return a list of tuples (companyID, {companyData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_company(self, name, results=None):
"""Return a list of Company objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
res = self._search_company(name, results)
return [Company.Company(companyID=self._get_real_companyID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res if pi and pd][:results]
def _search_keyword(self, keyword, results):
"""Return a list of 'keyword' strings."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_keyword(self, keyword, results=None):
"""Search for existing keywords, similar to the given one."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
return self._search_keyword(keyword, results)
def _get_keyword(self, keyword, results, page):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_keyword(self, keyword, results=None, page=None):
"""Return a list of movies for the given keyword."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
res = self._get_keyword(keyword, results, page)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _get_top_bottom_movies(self, kind):
"""Return the list of the top 250 or bottom 100 movies."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
# This method must return a list of (movieID, {movieDict})
# tuples. The kind parameter can be 'top' or 'bottom'.
raise NotImplementedError('override this method')
def get_top250_movies(self):
"""Return the list of the top 250 movies."""
res = self._get_top_bottom_movies('top')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_bottom100_movies(self):
"""Return the list of the bottom 100 movies."""
res = self._get_top_bottom_movies('bottom')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_top250_tv(self):
"""Return the list of the top 250 tv shows."""
res = self._get_top_bottom_movies('toptv')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_popular100_movies(self):
"""Return the list of the 100 most popular movies."""
res = self._get_top_bottom_movies('moviemeter')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_popular100_tv(self):
"""Return the list of the 100 most popular tv shows."""
res = self._get_top_bottom_movies('tvmeter')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_top250_indian_movies(self):
"""Return the list of the top 250 indian movies."""
res = self._get_top_bottom_movies('topindian250')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_boxoffice_movies(self):
"""Return the list of the top box office movies."""
res = self._get_top_bottom_movies('boxoffice')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def _get_top_movies_or_tv_by_genres(self, genres, filter_content):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_top50_movies_by_genres(self, genres):
"""Return the list of the top 50 movies by genres.
:sig: (Union[str, List[str]]) -> List
:param genres: Name genre or list of genre's names."""
if isinstance(genres, list):
genres = ','.join(map(str, genres))
movies_filter = '&title_type=feature'
res = self._get_top_movies_or_tv_by_genres(genres, movies_filter)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_top50_tv_by_genres(self, genres):
"""Return the list of the top 50 tv series and mini series by genres.
:sig: (Union[str, List[str]]) -> List
:param genres: Name genre or list of genre's names."""
if isinstance(genres, list):
genres = ','.join(map(str, genres))
tv_filter = '&title_type=tv_series,mini_series'
res = self._get_top_movies_or_tv_by_genres(genres, tv_filter)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def _get_showtimes(self):
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_showtimes(self):
"""Return a list of objects like this:
[{'cinema': 'Cinema Name', 'address and contacts': '...',
'movies': [{'movie': MovieObject, 'showtimes': 'showtimes info'}}, ...]"""
return self._get_showtimes()
def new_movie(self, *arguments, **keywords):
"""Return a Movie object."""
# XXX: not really useful...
return Movie.Movie(accessSystem=self.accessSystem, *arguments, **keywords)
def new_person(self, *arguments, **keywords):
"""Return a Person object."""
# XXX: not really useful...
return Person.Person(accessSystem=self.accessSystem, *arguments, **keywords)
def new_character(self, *arguments, **keywords):
"""Return a Character object."""
# XXX: not really useful...
return Character.Character(accessSystem=self.accessSystem, *arguments, **keywords)
def new_company(self, *arguments, **keywords):
"""Return a Company object."""
# XXX: not really useful...
return Company.Company(accessSystem=self.accessSystem, *arguments, **keywords)
def update(self, mop, info=None, override=0):
"""Given a Movie, Person, Character or Company object with only
partial information, retrieve the required set of information.
info is the list of sets of information to retrieve.
If override is set, the information are retrieved and updated
even if they're already in the object."""
# XXX: should this be a method of the Movie/Person/Character/Company
# classes? NO! What for instances created by external functions?
mopID = None
prefix = ''
if isinstance(mop, Movie.Movie):
mopID = mop.movieID
prefix = 'movie'
elif isinstance(mop, Person.Person):
mopID = mop.personID
prefix = 'person'
elif isinstance(mop, Character.Character):
mopID = mop.characterID
prefix = 'character'
elif isinstance(mop, Company.Company):
mopID = mop.companyID
prefix = 'company'
else:
raise IMDbError('object ' + repr(mop) +
' is not a Movie, Person, Character or Company instance')
if mopID is None:
# XXX: enough? It's obvious that there are Characters
# objects without characterID, so I think they should
# just do nothing, when an i.update(character) is tried.
if prefix == 'character':
return
raise IMDbDataAccessError('supplied object has null movieID, personID or companyID')
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if info is None:
info = mop.default_info
elif info == 'all':
if isinstance(mop, Movie.Movie):
info = self.get_movie_infoset()
elif isinstance(mop, Person.Person):
info = self.get_person_infoset()
elif isinstance(mop, Character.Character):
info = self.get_character_infoset()
else:
info = self.get_company_infoset()
if not isinstance(info, (tuple, list)):
info = (info,)
res = {}
for i in info:
if i in mop.current_info and not override:
continue
if not i:
continue
_imdb_logger.debug('retrieving "%s" info set', i)
try:
method = getattr(aSystem, 'get_%s_%s' % (prefix, i.replace(' ', '_')))
except AttributeError:
_imdb_logger.error('unknown information set "%s"', i)
# Keeps going.
method = lambda *x: {}
try:
ret = method(mopID)
except Exception:
_imdb_logger.critical(
'caught an exception retrieving or parsing "%s" info set'
' for mopID "%s" (accessSystem: %s)',
i, mopID, mop.accessSystem, exc_info=True
)
ret = {}
# If requested by the user, reraise the exception.
if self._reraise_exceptions:
raise
keys = None
if 'data' in ret:
res.update(ret['data'])
if isinstance(ret['data'], dict):
keys = list(ret['data'].keys())
if 'info sets' in ret:
for ri in ret['info sets']:
mop.add_to_current_info(ri, keys, mainInfoset=i)
else:
mop.add_to_current_info(i, keys)
if 'titlesRefs' in ret:
mop.update_titlesRefs(ret['titlesRefs'])
if 'namesRefs' in ret:
mop.update_namesRefs(ret['namesRefs'])
if 'charactersRefs' in ret:
mop.update_charactersRefs(ret['charactersRefs'])
mop.set_data(res, override=0)
def update_series_seasons(self, mop, season_nums, override=0):
"""Given a Movie object with only retrieve the season data.
season_nums is the list of the specific seasons to retrieve.
If override is set, the information are retrieved and updated
even if they're already in the object."""
mopID = None
if isinstance(mop, Movie.Movie):
mopID = mop.movieID
else:
raise IMDbError('object ' + repr(mop) + ' is not a Movie instance')
if mopID is None:
raise IMDbDataAccessError('supplied object has null movieID, personID or companyID')
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
info = 'episodes'
res = {}
if info in mop.current_info and not override:
return
_imdb_logger.debug('retrieving "%s" info set', info)
try:
method = getattr(aSystem, 'get_movie_episodes')
except AttributeError:
_imdb_logger.error('unknown information set "%s"', info)
# Keeps going.
method = lambda *x: {}
try:
ret = method(mopID, season_nums)
except Exception:
_imdb_logger.critical(
'caught an exception retrieving or parsing "%s" info set'
' for mopID "%s" (accessSystem: %s)',
info, mopID, mop.accessSystem, exc_info=True
)
ret = {}
# If requested by the user, reraise the exception.
if self._reraise_exceptions:
raise
keys = None
if 'data' in ret:
res.update(ret['data'])
if isinstance(ret['data'], dict):
keys = list(ret['data'].keys())
if 'info sets' in ret:
for ri in ret['info sets']:
mop.add_to_current_info(ri, keys, mainInfoset=info)
else:
mop.add_to_current_info(info, keys)
if 'titlesRefs' in ret:
mop.update_titlesRefs(ret['titlesRefs'])
if 'namesRefs' in ret:
mop.update_namesRefs(ret['namesRefs'])
if 'charactersRefs' in ret:
mop.update_charactersRefs(ret['charactersRefs'])
mop.set_data(res, override=0)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbPersonID(self, personID):
"""Translate a personID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def _searchIMDb(self, kind, ton, title_kind=None):
"""Search the IMDb www server for the given title or name."""
if not ton:
return None
ton = ton.strip('"')
aSystem = IMDb()
if kind == 'tt':
searchFunct = aSystem.search_movie
check = 'long imdb title'
elif kind == 'nm':
searchFunct = aSystem.search_person
check = 'long imdb name'
elif kind == 'char':
searchFunct = aSystem.search_character
check = 'long imdb name'
elif kind == 'co':
# XXX: are [COUNTRY] codes included in the results?
searchFunct = aSystem.search_company
check = 'long imdb name'
try:
searchRes = searchFunct(ton)
except IMDbError:
return None
# When only one result is returned, assume it was from an
# exact match.
if len(searchRes) == 1:
return searchRes[0].getID()
title_only_matches = []
for item in searchRes:
# Return the first perfect match.
if item[check].strip('"') == ton:
# For titles do additional check for kind
if kind != 'tt' or title_kind == item['kind']:
return item.getID()
elif kind == 'tt':
title_only_matches.append(item.getID())
# imdbpy2sql.py could detected wrong type, so if no title and kind
# matches found - collect all results with title only match
# Return list of IDs if multiple matches (can happen when searching
# titles with no title_kind specified)
# Example: DB: Band of Brothers "tv series" vs "tv mini-series"
if title_only_matches:
if len(title_only_matches) == 1:
return title_only_matches[0]
else:
return title_only_matches
return None
def title2imdbID(self, title, kind=None):
"""Translate a movie title (in the plain text data files format)
to an imdbID.
Try an Exact Primary Title search on IMDb;
return None if it's unable to get the imdbID;
Always specify kind: movie, tv series, video game etc. or search can
return list of IDs if multiple matches found
"""
return self._searchIMDb('tt', title, kind)
def name2imdbID(self, name):
"""Translate a person name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('nm', name)
def character2imdbID(self, name):
"""Translate a character name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('char', name)
def company2imdbID(self, name):
"""Translate a company name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('co', name)
def get_imdbID(self, mop):
"""Return the imdbID for the given Movie, Person, Character or Company
object."""
imdbID = None
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if isinstance(mop, Movie.Movie):
if mop.movieID is not None:
imdbID = aSystem.get_imdbMovieID(mop.movieID)
else:
imdbID = aSystem.title2imdbID(build_title(mop, canonical=0,
ptdf=0, appendKind=False),
mop['kind'])
elif isinstance(mop, Person.Person):
if mop.personID is not None:
imdbID = aSystem.get_imdbPersonID(mop.personID)
else:
imdbID = aSystem.name2imdbID(build_name(mop, canonical=False))
elif isinstance(mop, Character.Character):
if mop.characterID is not None:
imdbID = aSystem.get_imdbCharacterID(mop.characterID)
else:
# canonical=0 ?
imdbID = aSystem.character2imdbID(build_name(mop, canonical=False))
elif isinstance(mop, Company.Company):
if mop.companyID is not None:
imdbID = aSystem.get_imdbCompanyID(mop.companyID)
else:
imdbID = aSystem.company2imdbID(build_company_name(mop))
else:
raise IMDbError('object ' + repr(mop) +
' is not a Movie, Person or Character instance')
return imdbID
def get_imdbURL(self, mop):
"""Return the main IMDb URL for the given Movie, Person,
Character or Company object, or None if unable to get it."""
imdbID = self.get_imdbID(mop)
if imdbID is None:
return None
if isinstance(mop, Movie.Movie):
url_firstPart = imdbURL_movie_main
elif isinstance(mop, Person.Person):
url_firstPart = imdbURL_person_main
elif isinstance(mop, Character.Character):
url_firstPart = imdbURL_character_main
elif isinstance(mop, Company.Company):
url_firstPart = imdbURL_company_main
else:
raise IMDbError('object ' + repr(mop) +
' is not a Movie, Person, Character or Company instance')
return url_firstPart % imdbID
def get_special_methods(self):
"""Return the special methods defined by the subclass."""
sm_dict = {}
base_methods = []
for name in dir(IMDbBase):
member = getattr(IMDbBase, name)
if isinstance(member, (MethodType, FunctionType)):
base_methods.append(name)
for name in dir(self.__class__):
if name.startswith('_') or name in base_methods or \
name.startswith('get_movie_') or \
name.startswith('get_person_') or \
name.startswith('get_company_') or \
name.startswith('get_character_'):
continue
member = getattr(self.__class__, name)
if isinstance(member, (MethodType, FunctionType)):
sm_dict.update({name: member.__doc__})
return sm_dict
| 48,283 | Python | .py | 987 | 38.771023 | 114 | 0.622618 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,193 | Person.py | cinemagoer_cinemagoer/imdb/Person.py | # Copyright 2004-2019 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the Person class, used to store information about
a given person.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
from imdb.utils import _Container, analyze_name, build_name, canonicalName, cmpPeople, flatten, normalizeName
class Person(_Container):
"""A Person.
Every information about a person can be accessed as::
personObject['information']
to get a list of the kind of information stored in a
Person object, use the keys() method; some useful aliases
are defined (as "biography" for the "mini biography" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'biography': 'mini biography',
'bio': 'mini biography',
'aka': 'akas',
'also known as': 'akas',
'nick name': 'nick names',
'nicks': 'nick names',
'nickname': 'nick names',
'nicknames': 'nick names',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'misc': 'miscellaneous crew',
'guest': 'notable tv guest appearances',
'guests': 'notable tv guest appearances',
'tv guest': 'notable tv guest appearances',
'guest appearances': 'notable tv guest appearances',
'spouses': 'spouse',
'salary': 'salary history',
'salaries': 'salary history',
'otherworks': 'other works',
"maltin's biography": "biography from leonard maltin's movie encyclopedia",
"leonard maltin's biography": "biography from leonard maltin's movie encyclopedia",
'real name': 'birth name',
'where are they now': 'where now',
'personal quotes': 'quotes',
'mini-biography author': 'imdb mini-biography by',
'biography author': 'imdb mini-biography by',
'genre': 'genres',
'portrayed': 'portrayed in',
'keys': 'keywords',
'trademarks': 'trade mark',
'trade mark': 'trade mark',
'trade marks': 'trade mark',
'trademark': 'trade mark',
'pictorials': 'pictorial',
'magazine covers': 'magazine cover photo',
'magazine-covers': 'magazine cover photo',
'tv series episodes': 'episodes',
'tv-series episodes': 'episodes',
'articles': 'article',
'keyword': 'keywords'
}
# 'nick names'???
keys_tomodify_list = (
'mini biography', 'spouse', 'quotes', 'other works',
'salary history', 'trivia', 'trade mark', 'news',
'books', 'biographical movies', 'portrayed in',
'where now', 'interviews', 'article',
"biography from leonard maltin's movie encyclopedia"
)
_image_key = 'headshot'
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Person object.
*personID* -- the unique identifier for the person.
*name* -- the name of the Person, if not in the data dictionary.
*myName* -- the nickname you use for this person.
*myID* -- your personal id for this person.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes about the given person for a specific movie
or role (e.g.: the alias used in the movie credits).
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*modFunct* -- function called returning text fields.
*billingPos* -- position of this person in the credits list.
"""
name = kwds.get('name')
if name and 'name' not in self.data:
self.set_name(name)
self.personID = kwds.get('personID', None)
self.myName = kwds.get('myName', '')
self.billingPos = kwds.get('billingPos', None)
def _reset(self):
"""Reset the Person object."""
self.personID = None
self.myName = ''
self.billingPos = None
def _clear(self):
"""Reset the dictionary."""
self.billingPos = None
def set_name(self, name):
"""Set the name of the person."""
d = analyze_name(name, canonical=False)
self.data.update(d)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if 'name' in self.data:
addkeys += ['canonical name', 'long imdb name',
'long imdb canonical name']
if 'headshot' in self.data:
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
if 'name' in self.data:
if key == 'name':
return normalizeName(self.data['name'])
elif key == 'canonical name':
return canonicalName(self.data['name'])
elif key == 'long imdb name':
return build_name(self.data, canonical=False)
elif key == 'long imdb canonical name':
return build_name(self.data, canonical=True)
if key == 'full-size headshot':
return self.get_fullsizeURL()
elif key not in self.data:
filmography = self.data.get('filmography', {})
if key in filmography:
return self.data['filmography'][key]
elif key == 'actor' and 'actress' in filmography:
# we can also access the 'actress' key using 'actor'
return filmography['actress']
return None
def getID(self):
"""Return the personID."""
return self.personID
def __bool__(self):
"""The Person is "false" if the self.data does not contain a name."""
# XXX: check the name and the personID?
return 'name' in self.data
def __contains__(self, item):
"""Return true if this Person has worked in the given Movie,
or if the fiven Character was played by this Person."""
from .Character import Character
from .Movie import Movie
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=True, scalar=Movie):
if item.isSame(m):
return True
elif isinstance(item, Character):
for m in flatten(self.data, yieldDictKeys=True, scalar=Movie):
if item.isSame(m.currentRole):
return True
elif isinstance(item, str):
return item in self.data
return False
def isSameName(self, other):
"""Return true if two persons have the same name and imdbIndex
and/or personID.
"""
if not isinstance(other, self.__class__):
return False
if 'name' in self.data and \
'name' in other.data and \
build_name(self.data, canonical=True) == \
build_name(other.data, canonical=True):
return True
if self.accessSystem == other.accessSystem and \
self.personID and self.personID == other.personID:
return True
return False
isSamePerson = isSameName # XXX: just for backward compatiblity.
def __deepcopy__(self, memo):
"""Return a deep copy of a Person instance."""
p = Person(name='', personID=self.personID, myName=self.myName,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
p.current_info = list(self.current_info)
p.set_mod_funct(self.modFunct)
p.billingPos = self.billingPos
return p
def __repr__(self):
"""String representation of a Person object."""
# XXX: add also currentRole and notes, if present?
return '<Person id:%s[%s] name:_%s_>' % (
self.personID, self.accessSystem, self.get('long imdb name')
)
def __str__(self):
"""Simply print the short name."""
return self.get('name', '')
def summary(self):
"""Return a string with a pretty-printed summary for the person."""
if not self:
return ''
s = 'Person\n=====\nName: %s\n' % self.get('long imdb canonical name', '')
bdate = self.get('birth date')
if bdate:
s += 'Birth date: %s' % bdate
bnotes = self.get('birth notes')
if bnotes:
s += ' (%s)' % bnotes
s += '.\n'
ddate = self.get('death date')
if ddate:
s += 'Death date: %s' % ddate
dnotes = self.get('death notes')
if dnotes:
s += ' (%s)' % dnotes
s += '.\n'
bio = self.get('mini biography')
if bio:
s += 'Biography: %s\n' % bio[0]
director = self.get('director')
if director:
d_list = [x.get('long imdb canonical title', '') for x in director[:3]]
s += 'Last movies directed: %s.\n' % '; '.join(d_list)
act = self.get('actor') or self.get('actress')
if act:
a_list = [x.get('long imdb canonical title', '') for x in act[:5]]
s += 'Last movies acted: %s.\n' % '; '.join(a_list)
return s
| 11,146 | Python | .py | 251 | 34.63745 | 109 | 0.594662 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,194 | Movie.py | cinemagoer_cinemagoer/imdb/Movie.py | # Copyright 2004-2018 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the Movie class, used to store information about
a given movie.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
from imdb import linguistics
from imdb.utils import _Container, analyze_title, build_title, canonicalTitle, cmpMovies, flatten
class Movie(_Container):
"""A Movie.
Every information about a movie can be accessed as::
movieObject['information']
to get a list of the kind of information stored in a
Movie object, use the keys() method; some useful aliases
are defined (as "casting" for the "casting director" key); see
the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'plot')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'tv schedule': 'airing',
'user rating': 'rating',
'plot summary': 'plot',
'plot summaries': 'plot',
'directed by': 'director',
'directors': 'director',
'writers': 'writer',
'actors': 'cast',
'actresses': 'cast',
'aka': 'akas',
'also known as': 'akas',
'country': 'countries',
'production country': 'countries',
'production countries': 'countries',
'genre': 'genres',
'runtime': 'runtimes',
'lang': 'languages',
'color': 'color info',
'cover': 'cover url',
'full-size cover': 'full-size cover url',
'seasons': 'number of seasons',
'language': 'languages',
'certificate': 'certificates',
'certifications': 'certificates',
'certification': 'certificates',
'episodes number': 'number of episodes',
'faq': 'faqs',
'technical': 'tech',
'frequently asked questions': 'faqs'
}
keys_tomodify_list = (
'plot', 'trivia', 'alternate versions', 'goofs',
'quotes', 'dvd', 'laserdisc', 'news', 'soundtrack',
'crazy credits', 'business', 'supplements',
'video review', 'faqs'
)
_image_key = 'cover url'
cmpFunct = cmpMovies
def _init(self, **kwds):
"""Initialize a Movie object.
*movieID* -- the unique identifier for the movie.
*title* -- the title of the Movie, if not in the data dictionary.
*myTitle* -- your personal title for the movie.
*myID* -- your personal identifier for the movie.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)'.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
title = kwds.get('title')
if title and 'title' not in self.data:
self.set_title(title)
self.movieID = kwds.get('movieID', None)
self.myTitle = kwds.get('myTitle', '')
def _reset(self):
"""Reset the Movie object."""
self.movieID = None
self.myTitle = ''
def set_title(self, title):
"""Set the title of the movie."""
d_title = analyze_title(title)
self.data.update(d_title)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if 'title' in self.data:
addkeys += ['canonical title', 'long imdb title',
'long imdb canonical title',
'smart canonical title',
'smart long imdb canonical title']
if 'episode of' in self.data:
addkeys += ['long imdb episode title', 'series title',
'canonical series title', 'episode title',
'canonical episode title',
'smart canonical series title',
'smart canonical episode title']
if 'cover url' in self.data:
addkeys += ['full-size cover url']
return addkeys
def guessLanguage(self):
"""Guess the language of the title of this movie; returns None
if there are no hints."""
lang = self.get('languages')
if lang:
lang = lang[0]
else:
country = self.get('countries')
if country:
lang = linguistics.COUNTRY_LANG.get(country[0])
return lang
def smartCanonicalTitle(self, title=None, lang=None):
"""Return the canonical title, guessing its language.
The title can be forces with the 'title' argument (internally
used) and the language can be forced with the 'lang' argument,
otherwise it's auto-detected."""
if title is None:
title = self.data.get('title', '')
if lang is None:
lang = self.guessLanguage()
return canonicalTitle(title, lang=lang)
def _getSeriesTitle(self, obj):
"""Get the title from a Movie object or return the string itself."""
if isinstance(obj, Movie):
return obj.get('title', '')
return obj
def _getitem(self, key):
"""Handle special keys."""
if 'episode of' in self.data:
if key == 'long imdb episode title':
return build_title(self.data)
elif key == 'series title':
return self._getSeriesTitle(self.data['episode of'])
elif key == 'canonical series title':
ser_title = self._getSeriesTitle(self.data['episode of'])
return canonicalTitle(ser_title)
elif key == 'smart canonical series title':
ser_title = self._getSeriesTitle(self.data['episode of'])
return self.smartCanonicalTitle(ser_title)
elif key == 'episode title':
return self.data.get('title', '')
elif key == 'canonical episode title':
return canonicalTitle(self.data.get('title', ''))
elif key == 'smart canonical episode title':
return self.smartCanonicalTitle(self.data.get('title', ''))
if 'title' in self.data:
if key == 'title':
return self.data['title']
elif key == 'long imdb title':
return build_title(self.data)
elif key == 'canonical title':
return canonicalTitle(self.data['title'])
elif key == 'smart canonical title':
return self.smartCanonicalTitle(self.data['title'])
elif key == 'long imdb canonical title':
return build_title(self.data, canonical=True)
elif key == 'smart long imdb canonical title':
return build_title(self.data, canonical=True, lang=self.guessLanguage())
if key == 'full-size cover url':
return self.get_fullsizeURL()
return None
def getID(self):
"""Return the movieID."""
return self.movieID
def __bool__(self):
"""The Movie is "false" if the self.data does not contain a title."""
# XXX: check the title and the movieID?
return 'title' in self.data
def isSameTitle(self, other):
"""Return true if this and the compared object have the same
long imdb title and/or movieID.
"""
# XXX: obsolete?
if not isinstance(other, self.__class__):
return False
if 'title' in self.data and 'title' in other.data and \
build_title(self.data, canonical=False) == build_title(other.data, canonical=False):
return True
if self.accessSystem == other.accessSystem and \
self.movieID is not None and self.movieID == other.movieID:
return True
return False
isSameMovie = isSameTitle # XXX: just for backward compatiblity.
def __contains__(self, item):
"""Return true if the given Person object is listed in this Movie,
or if the the given Character is represented in this Movie."""
from .Character import Character
from .Company import Company
from .Person import Person
if isinstance(item, Person):
for p in flatten(self.data, yieldDictKeys=True, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p):
return True
elif isinstance(item, Character):
for p in flatten(self.data, yieldDictKeys=True, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p.currentRole):
return True
elif isinstance(item, Company):
for c in flatten(self.data, yieldDictKeys=True, scalar=Company,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(c):
return True
elif isinstance(item, str):
return item in self.data
return False
def __deepcopy__(self, memo):
"""Return a deep copy of a Movie instance."""
m = Movie(title='', movieID=self.movieID, myTitle=self.myTitle,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
m.current_info = list(self.current_info)
m.set_mod_funct(self.modFunct)
return m
def __repr__(self):
"""String representation of a Movie object."""
# XXX: add also currentRole and notes, if present?
if 'long imdb episode title' in self:
title = self.get('long imdb episode title')
else:
title = self.get('long imdb title')
return '<Movie id:%s[%s] title:_%s_>' % (self.movieID, self.accessSystem, title)
def __str__(self):
"""Simply print the short title."""
return self.get('title', '')
def summary(self):
"""Return a string with a pretty-printed summary for the movie."""
if not self:
return ''
def _nameAndRole(personList, joiner=', '):
"""Build a pretty string with name and role."""
nl = []
for person in personList:
n = person.get('name', '')
if person.currentRole:
n += ' (%s)' % person.currentRole
nl.append(n)
return joiner.join(nl)
s = 'Movie\n=====\nTitle: %s\n' % self.get('long imdb canonical title', '')
genres = self.get('genres')
if genres:
s += 'Genres: %s.\n' % ', '.join(genres)
director = self.get('director')
if director:
s += 'Director: %s.\n' % _nameAndRole(director)
writer = self.get('writer')
if writer:
s += 'Writer: %s.\n' % _nameAndRole(writer)
cast = self.get('cast')
if cast:
cast = cast[:5]
s += 'Cast: %s.\n' % _nameAndRole(cast)
runtime = self.get('runtimes')
if runtime:
s += 'Runtime: %s.\n' % ', '.join(runtime)
countries = self.get('countries')
if countries:
s += 'Country: %s.\n' % ', '.join(countries)
lang = self.get('languages')
if lang:
s += 'Language: %s.\n' % ', '.join(lang)
rating = self.get('rating')
if rating:
s += 'Rating: %s' % rating
nr_votes = self.get('votes')
if nr_votes:
s += ' (%s votes)' % nr_votes
s += '.\n'
plot = self.get('plot')
if not plot:
plot = self.get('plot summary')
if plot:
plot = [plot]
if plot:
plot = plot[0]
i = plot.find('::')
if i != -1:
plot = plot[:i]
s += 'Plot: %s' % plot
return s
| 13,798 | Python | .py | 314 | 33.191083 | 100 | 0.575931 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,195 | Character.py | cinemagoer_cinemagoer/imdb/Character.py | # Copyright 2007-2019 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the Character class, used to store information about
a given character.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
from imdb._exceptions import IMDbParserError
from imdb.utils import _Container, analyze_name, build_name, cmpPeople, flatten
class Character(_Container):
"""A Character.
Every information about a character can be accessed as::
characterObject['information']
to get a list of the kind of information stored in a
Character object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'mini biography': 'biography',
'bio': 'biography',
'character biography': 'biography',
'character biographies': 'biography',
'biographies': 'biography',
'character bio': 'biography',
'aka': 'akas',
'also known as': 'akas',
'alternate names': 'akas',
'personal quotes': 'quotes',
'keys': 'keywords',
'keyword': 'keywords'
}
keys_tomodify_list = ('biography', 'quotes')
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Character object.
*characterID* -- the unique identifier for the character.
*name* -- the name of the Character, if not in the data dictionary.
*myName* -- the nickname you use for this character.
*myID* -- your personal id for this character.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given character.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and 'name' not in self.data:
self.set_name(name)
self.characterID = kwds.get('characterID', None)
self.myName = kwds.get('myName', '')
def _reset(self):
"""Reset the Character object."""
self.characterID = None
self.myName = ''
def set_name(self, name):
"""Set the name of the character."""
try:
d = analyze_name(name)
self.data.update(d)
except IMDbParserError:
pass
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if 'name' in self.data:
addkeys += ['long imdb name']
if 'headshot' in self.data:
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
# XXX: can a character have an imdbIndex?
if 'name' in self.data:
if key == 'long imdb name':
return build_name(self.data)
return None
def getID(self):
"""Return the characterID."""
return self.characterID
def __bool__(self):
"""The Character is "false" if the self.data does not contain a name."""
# XXX: check the name and the characterID?
return bool(self.data.get('name'))
def __contains__(self, item):
"""Return true if this Character was portrayed in the given Movie
or it was impersonated by the given Person."""
from .Movie import Movie
from .Person import Person
if isinstance(item, Person):
for m in flatten(self.data, yieldDictKeys=True, scalar=Movie):
if item.isSame(m.currentRole):
return True
elif isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=True, scalar=Movie):
if item.isSame(m):
return True
elif isinstance(item, str):
return item in self.data
return False
def isSameName(self, other):
"""Return true if two character have the same name
and/or characterID."""
if not isinstance(other, self.__class__):
return False
if 'name' in self.data and 'name' in other.data and \
build_name(self.data, canonical=False) == build_name(other.data, canonical=False):
return True
if self.accessSystem == other.accessSystem and \
self.characterID is not None and \
self.characterID == other.characterID:
return True
return False
isSameCharacter = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a Character instance."""
c = Character(name='', characterID=self.characterID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Character object."""
return '<Character id:%s[%s] name:_%s_>' % (
self.characterID, self.accessSystem, self.get('name')
)
def __str__(self):
"""Simply print the short name."""
return self.get('name', '')
def summary(self):
"""Return a string with a pretty-printed summary for the character."""
if not self:
return ''
s = 'Character\n=====\nName: %s\n' % self.get('name', '')
bio = self.get('biography')
if bio:
s += 'Biography: %s\n' % bio[0]
filmo = self.get('filmography')
if filmo:
a_list = [x.get('long imdb canonical title', '') for x in filmo[:5]]
s += 'Last movies with this character: %s.\n' % '; '.join(a_list)
return s
| 7,142 | Python | .py | 166 | 34.325301 | 98 | 0.618849 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,196 | helpers.py | cinemagoer_cinemagoer/imdb/helpers.py | # Copyright 2006-2018 Davide Alberani <[email protected]>
# 2012 Alberto Malagoli <albemala AT gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides functions not used directly by the imdb package,
but useful for Cinemagoer-based programs.
"""
# XXX: Find better names for the functions in this module.
from __future__ import absolute_import, division, print_function, unicode_literals
import difflib
import gettext
import re
import sys
from imdb.locale import _
PY2 = sys.hexversion < 0x3000000
if PY2:
from cgi import escape
else:
from html import escape
# The modClearRefs can be used to strip names and titles references from
# the strings in Movie and Person objects.
from imdb import IMDb, imdbURL_character_base, imdbURL_movie_base, imdbURL_person_base
from imdb._exceptions import IMDbError
from imdb.Character import Character
from imdb.Company import Company
from imdb.linguistics import COUNTRY_LANG
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.utils import TAGS_TO_MODIFY, _tagAttr, re_characterRef, re_nameRef, re_titleRef
gettext.textdomain('imdbpy')
# An URL, more or less.
_re_href = re.compile(r'(http://.+?)(?=\s|$)', re.I)
_re_hrefsub = _re_href.sub
def makeCgiPrintEncoding(encoding):
"""Make a function to pretty-print strings for the web."""
def cgiPrint(s):
"""Encode the given string using the %s encoding, and replace
chars outside the given charset with XML char references.""" % encoding
s = escape(s, quote=1)
if isinstance(s, str):
s = s.encode(encoding, 'xmlcharrefreplace')
return s
return cgiPrint
# cgiPrint uses the utf8 encoding.
cgiPrint = makeCgiPrintEncoding('utf8')
# Regular expression for %(varname)s substitutions.
re_subst = re.compile(r'%\((.+?)\)s')
# Regular expression for <if condition>....</if condition> clauses.
re_conditional = re.compile(r'<if\s+(.+?)\s*>(.+?)</if\s+\1\s*>')
def makeTextNotes(replaceTxtNotes):
"""Create a function useful to handle text[::optional_note] values.
replaceTxtNotes is a format string, which can include the following
values: %(text)s and %(notes)s.
Portions of the text can be conditionally excluded, if one of the
values is absent. E.g.: <if notes>[%(notes)s]</if notes> will be replaced
with '[notes]' if notes exists, or by an empty string otherwise.
The returned function is suitable be passed as applyToValues argument
of the makeObject2Txt function."""
def _replacer(s):
outS = replaceTxtNotes
if not isinstance(s, str):
return s
ssplit = s.split('::', 1)
text = ssplit[0]
# Used to keep track of text and note existence.
keysDict = {}
if text:
keysDict['text'] = True
outS = outS.replace('%(text)s', text)
if len(ssplit) == 2:
keysDict['notes'] = True
outS = outS.replace('%(notes)s', ssplit[1])
else:
outS = outS.replace('%(notes)s', '')
def _excludeFalseConditionals(matchobj):
# Return an empty string if the conditional is false/empty.
if matchobj.group(1) in keysDict:
return matchobj.group(2)
return ''
while re_conditional.search(outS):
outS = re_conditional.sub(_excludeFalseConditionals, outS)
return outS
return _replacer
def makeObject2Txt(movieTxt=None, personTxt=None, characterTxt=None,
companyTxt=None, joiner=' / ',
applyToValues=lambda x: x, _recurse=True):
""""Return a function useful to pretty-print Movie, Person,
Character and Company instances.
*movieTxt* -- how to format a Movie object.
*personTxt* -- how to format a Person object.
*characterTxt* -- how to format a Character object.
*companyTxt* -- how to format a Company object.
*joiner* -- string used to join a list of objects.
*applyToValues* -- function to apply to values.
*_recurse* -- if True (default) manage only the given object.
"""
# Some useful defaults.
if movieTxt is None:
movieTxt = '%(long imdb title)s'
if personTxt is None:
personTxt = '%(long imdb name)s'
if characterTxt is None:
characterTxt = '%(long imdb name)s'
if companyTxt is None:
companyTxt = '%(long imdb name)s'
def object2txt(obj, _limitRecursion=None):
"""Pretty-print objects."""
# Prevent unlimited recursion.
if _limitRecursion is None:
_limitRecursion = 0
elif _limitRecursion > 5:
return ''
_limitRecursion += 1
if isinstance(obj, (list, tuple)):
return joiner.join([object2txt(o, _limitRecursion=_limitRecursion)
for o in obj])
elif isinstance(obj, dict):
# XXX: not exactly nice, neither useful, I fear.
return joiner.join(
['%s::%s' % (object2txt(k, _limitRecursion=_limitRecursion),
object2txt(v, _limitRecursion=_limitRecursion))
for k, v in list(obj.items())]
)
objData = {}
if isinstance(obj, Movie):
objData['movieID'] = obj.movieID
outs = movieTxt
elif isinstance(obj, Person):
objData['personID'] = obj.personID
outs = personTxt
elif isinstance(obj, Character):
objData['characterID'] = obj.characterID
outs = characterTxt
elif isinstance(obj, Company):
objData['companyID'] = obj.companyID
outs = companyTxt
else:
return obj
def _excludeFalseConditionals(matchobj):
# Return an empty string if the conditional is false/empty.
condition = matchobj.group(1)
proceed = obj.get(condition) or getattr(obj, condition, None)
if proceed:
return matchobj.group(2)
else:
return ''
while re_conditional.search(outs):
outs = re_conditional.sub(_excludeFalseConditionals, outs)
for key in re_subst.findall(outs):
value = obj.get(key) or getattr(obj, key, None)
if not isinstance(value, str):
if not _recurse:
if value:
value = str(value)
if value:
value = object2txt(value, _limitRecursion=_limitRecursion)
elif value:
value = applyToValues(str(value))
if not value:
value = ''
elif not isinstance(value, str):
value = str(value)
outs = outs.replace('%(' + key + ')s', value)
return outs
return object2txt
def makeModCGILinks(movieTxt, personTxt, characterTxt=None, encoding='utf8'):
"""Make a function used to pretty-print movies and persons refereces;
movieTxt and personTxt are the strings used for the substitutions.
movieTxt must contains %(movieID)s and %(title)s, while personTxt
must contains %(personID)s and %(name)s and characterTxt %(characterID)s
and %(name)s; characterTxt is optional, for backward compatibility."""
_cgiPrint = makeCgiPrintEncoding(encoding)
def modCGILinks(s, titlesRefs, namesRefs, characterRefs=None):
"""Substitute movies and persons references."""
if characterRefs is None:
characterRefs = {}
# XXX: look ma'... more nested scopes! <g>
def _replaceMovie(match):
to_replace = match.group(1)
item = titlesRefs.get(to_replace)
if item:
movieID = item.movieID
to_replace = movieTxt % {
'movieID': movieID,
'title': str(_cgiPrint(to_replace), encoding, 'xmlcharrefreplace')
}
return to_replace
def _replacePerson(match):
to_replace = match.group(1)
item = namesRefs.get(to_replace)
if item:
personID = item.personID
to_replace = personTxt % {
'personID': personID,
'name': str(_cgiPrint(to_replace), encoding, 'xmlcharrefreplace')
}
return to_replace
def _replaceCharacter(match):
to_replace = match.group(1)
if characterTxt is None:
return to_replace
item = characterRefs.get(to_replace)
if item:
characterID = item.characterID
if characterID is None:
return to_replace
to_replace = characterTxt % {
'characterID': characterID,
'name': str(_cgiPrint(to_replace), encoding, 'xmlcharrefreplace')
}
return to_replace
s = s.replace('<', '<').replace('>', '>')
s = _re_hrefsub(r'<a href="\1">\1</a>', s)
s = re_titleRef.sub(_replaceMovie, s)
s = re_nameRef.sub(_replacePerson, s)
s = re_characterRef.sub(_replaceCharacter, s)
return s
modCGILinks.movieTxt = movieTxt
modCGILinks.personTxt = personTxt
modCGILinks.characterTxt = characterTxt
return modCGILinks
# links to the imdb.com web site.
_movieTxt = '<a href="' + imdbURL_movie_base + 'tt%(movieID)s">%(title)s</a>'
_personTxt = '<a href="' + imdbURL_person_base + 'nm%(personID)s">%(name)s</a>'
_characterTxt = '<a href="' + imdbURL_character_base + \
'ch%(characterID)s">%(name)s</a>'
modHtmlLinks = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt,
characterTxt=_characterTxt)
modHtmlLinksASCII = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt,
characterTxt=_characterTxt,
encoding='ascii')
def sortedSeasons(m):
"""Return a sorted list of seasons of the given series."""
seasons = list(m.get('episodes', {}).keys())
seasons.sort()
return seasons
def sortedEpisodes(m, season=None):
"""Return a sorted list of episodes of the given series,
considering only the specified season(s) (every season, if None)."""
episodes = []
seasons = season
if season is None:
seasons = sortedSeasons(m)
else:
if not isinstance(season, (tuple, list)):
seasons = [season]
for s in seasons:
eps_indx = list(m.get('episodes', {}).get(s, {}).keys())
eps_indx.sort()
for e in eps_indx:
episodes.append(m['episodes'][s][e])
return episodes
# Idea and portions of the code courtesy of none none (dclist at gmail.com)
_re_imdbIDurl = re.compile(r'\b(nm|tt|ch|co)([0-9]{7,8})\b')
def get_byURL(url, info=None, args=None, kwds=None):
"""Return a Movie, Person, Character or Company object for the given URL;
info is the info set to retrieve, args and kwds are respectively a list
and a dictionary or arguments to initialize the data access system.
Returns None if unable to correctly parse the url; can raise
exceptions if unable to retrieve the data."""
if args is None:
args = []
if kwds is None:
kwds = {}
ia = IMDb(*args, **kwds)
match = _re_imdbIDurl.search(url)
if not match:
return None
imdbtype = match.group(1)
imdbID = match.group(2)
if imdbtype == 'tt':
return ia.get_movie(imdbID, info=info)
elif imdbtype == 'nm':
return ia.get_person(imdbID, info=info)
elif imdbtype == 'ch':
return ia.get_character(imdbID, info=info)
elif imdbtype == 'co':
return ia.get_company(imdbID, info=info)
return None
# Idea and portions of code courtesy of Basil Shubin.
# Beware that these information are now available directly by
# the Movie/Person/Character instances.
def fullSizeCoverURL(obj):
"""Given an URL string or a Movie, Person or Character instance,
returns an URL to the full-size version of the cover/headshot,
or None otherwise. This function is obsolete: the same information
are available as keys: 'full-size cover url' and 'full-size headshot',
respectively for movies and persons/characters."""
return obj.get_fullsizeURL()
def keyToXML(key):
"""Return a key (the ones used to access information in Movie and
other classes instances) converted to the style of the XML output."""
return _tagAttr(key, '')[0]
def translateKey(key):
"""Translate a given key."""
return _(keyToXML(key))
# Maps tags to classes.
_MAP_TOP_OBJ = {
'person': Person,
'movie': Movie,
'character': Character,
'company': Company
}
# Tags to be converted to lists.
_TAGS_TO_LIST = dict([(x[0], None) for x in list(TAGS_TO_MODIFY.values())])
_TAGS_TO_LIST.update(_MAP_TOP_OBJ)
def tagToKey(tag):
"""Return the name of the tag, taking it from the 'key' attribute,
if present."""
keyAttr = tag.get('key')
if keyAttr:
if tag.get('keytype') == 'int':
keyAttr = int(keyAttr)
return keyAttr
return tag.tag
def _valueWithType(tag, tagValue):
"""Return tagValue, handling some type conversions."""
tagType = tag.get('type')
if tagType == 'int':
tagValue = int(tagValue)
elif tagType == 'float':
tagValue = float(tagValue)
return tagValue
# Extra tags to get (if values were not already read from title/name).
_titleTags = ('imdbindex', 'kind', 'year')
_nameTags = ('imdbindex',)
_companyTags = ('imdbindex', 'country')
def parseTags(tag, _topLevel=True, _as=None, _infoset2keys=None, _key2infoset=None):
"""Recursively parse a tree of tags."""
# The returned object (usually a _Container subclass, but it can
# be a string, an int, a float, a list or a dictionary).
item = None
if _infoset2keys is None:
_infoset2keys = {}
if _key2infoset is None:
_key2infoset = {}
name = tagToKey(tag)
firstChild = (tag.getchildren() or [None])[0]
tagStr = (tag.text or '').strip()
if not tagStr and name == 'item':
# Handles 'item' tags containing text and a 'notes' sub-tag.
tagContent = tag.getchildren()
if tagContent and tagContent[0].text:
tagStr = (tagContent[0].text or '').strip()
infoset = tag.get('infoset')
if infoset:
_key2infoset[name] = infoset
_infoset2keys.setdefault(infoset, []).append(name)
# Here we use tag.name to avoid tags like <item title="company">
if tag.tag in _MAP_TOP_OBJ:
# One of the subclasses of _Container.
item = _MAP_TOP_OBJ[name]()
itemAs = tag.get('access-system')
if itemAs:
if not _as:
_as = itemAs
else:
itemAs = _as
item.accessSystem = itemAs
tagsToGet = []
theID = tag.get('id')
if name == 'movie':
item.movieID = theID
tagsToGet = _titleTags
ttitle = tag.find('title')
if ttitle is not None:
item.set_title(ttitle.text)
tag.remove(ttitle)
else:
if name == 'person':
item.personID = theID
tagsToGet = _nameTags
theName = tag.find('long imdb canonical name')
if not theName:
theName = tag.find('name')
elif name == 'character':
item.characterID = theID
tagsToGet = _nameTags
theName = tag.find('name')
elif name == 'company':
item.companyID = theID
tagsToGet = _companyTags
theName = tag.find('name')
if theName is not None:
item.set_name(theName.text)
tag.remove(theName)
for t in tagsToGet:
if t in item.data:
continue
dataTag = tag.find(t)
if dataTag is not None:
item.data[tagToKey(dataTag)] = _valueWithType(dataTag, dataTag.text)
notesTag = tag.find('notes')
if notesTag is not None:
item.notes = notesTag.text
tag.remove(notesTag)
episodeOf = tag.find('episode-of')
if episodeOf is not None:
item.data['episode of'] = parseTags(episodeOf, _topLevel=False,
_as=_as, _infoset2keys=_infoset2keys,
_key2infoset=_key2infoset)
tag.remove(episodeOf)
cRole = tag.find('current-role')
if cRole is not None:
cr = parseTags(cRole, _topLevel=False, _as=_as,
_infoset2keys=_infoset2keys, _key2infoset=_key2infoset)
item.currentRole = cr
tag.remove(cRole)
# XXX: big assumption, here. What about Movie instances used
# as keys in dictionaries? What about other keys (season and
# episode number, for example?)
if not _topLevel:
# tag.extract()
return item
_adder = lambda key, value: item.data.update({key: value})
elif tagStr:
tagNotes = tag.find('notes')
if tagNotes is not None:
notes = (tagNotes.text or '').strip()
if notes:
tagStr += '::%s' % notes
else:
tagStr = _valueWithType(tag, tagStr)
return tagStr
elif firstChild is not None:
firstChildName = tagToKey(firstChild)
if firstChildName in _TAGS_TO_LIST:
item = []
_adder = lambda key, value: item.append(value)
else:
item = {}
_adder = lambda key, value: item.update({key: value})
else:
item = {}
_adder = lambda key, value: item.update({name: value})
for subTag in tag.getchildren():
subTagKey = tagToKey(subTag)
# Exclude dinamically generated keys.
if tag.tag in _MAP_TOP_OBJ and subTagKey in item._additional_keys():
continue
subItem = parseTags(subTag, _topLevel=False, _as=_as,
_infoset2keys=_infoset2keys, _key2infoset=_key2infoset)
if subItem:
_adder(subTagKey, subItem)
if _topLevel and name in _MAP_TOP_OBJ:
# Add information about 'info sets', but only to the top-level object.
item.infoset2keys = _infoset2keys
item.key2infoset = _key2infoset
item.current_info = list(_infoset2keys.keys())
return item
def parseXML(xml):
"""Parse a XML string, returning an appropriate object (usually an
instance of a subclass of _Container."""
import lxml.etree
return parseTags(lxml.etree.fromstring(xml))
_re_akas_lang = re.compile('(?:[(])([a-zA-Z]+?)(?: title[)])')
_re_akas_country = re.compile(r'\(.*?\)')
# akasLanguages, sortAKAsBySimilarity and getAKAsInLanguage code
# copyright of Alberto Malagoli (refactoring by Davide Alberani).
def akasLanguages(movie):
"""Given a movie, return a list of tuples in (lang, AKA) format;
lang can be None, if unable to detect."""
lang_and_aka = []
akas = set((movie.get('akas') or []) + (movie.get('akas from release info') or []))
for aka in akas:
# split aka
aka = re.search(r'^(.*) \((.*?)\)', aka).group(1, 2)
# sometimes there is no countries information
if len(aka) == 2:
# search for something like "(... title)" where ... is a language
language = _re_akas_lang.search(aka[1])
if language:
language = language.groups()[0]
else:
# split countries using , and keep only the first one (it's sufficient)
country = aka[1].split(',')[0]
# remove parenthesis
country = _re_akas_country.sub('', country).strip()
# given the country, get corresponding language from dictionary
language = COUNTRY_LANG.get(country)
else:
language = None
lang_and_aka.append((language, aka[0]))
return lang_and_aka
def sortAKAsBySimilarity(movie, title, _titlesOnly=True, _preferredLang=None):
"""Return a list of movie AKAs, sorted by their similarity to
the given title.
If _titlesOnly is not True, similarity information are returned.
If _preferredLang is specified, AKAs in the given language will get
a higher score.
The return is a list of title, or a list of tuples if _titlesOnly is False."""
language = movie.guessLanguage()
# estimate string distance between current title and given title
m_title = movie['title'].lower()
l_title = title.lower()
scores = []
score = difflib.SequenceMatcher(None, m_title, l_title).ratio()
# set original title and corresponding score as the best match for given title
scores.append((score, movie['title'], None))
for language, aka in akasLanguages(movie):
# estimate string distance between current title and given title
m_title = aka.lower()
score = difflib.SequenceMatcher(None, m_title, l_title).ratio()
# if current language is the same as the given one, increase score
if _preferredLang and _preferredLang == language:
score += 1
scores.append((score, aka, language))
scores.sort(reverse=True)
if _titlesOnly:
return [x[1] for x in scores]
return scores
def getAKAsInLanguage(movie, lang, _searchedTitle=None):
"""Return a list of AKAs of a movie, in the specified language.
If _searchedTitle is given, the AKAs are sorted by their similarity
to it."""
akas = []
for language, aka in akasLanguages(movie):
if lang == language:
akas.append(aka)
if _searchedTitle:
scores = []
for aka in akas:
scores.append(difflib.SequenceMatcher(None, aka.lower(),
_searchedTitle.lower()), aka)
scores.sort(reverse=True)
akas = [x[1] for x in scores]
return akas
def resizeImage(image, width=None, height=None, crop=None, custom_regex=None):
"""Return resized and cropped image url."""
regexString = custom_regex if custom_regex else r'https://m.media-amazon.com/images/\w/\w+'
try:
resultImage = re.findall(regexString, image)[0]
except IndexError:
raise IMDbError('Image url not matched. Original url: "%s"' % (image))
if "@@" in image:
resultImage += '@'
if "@" not in image:
resultImage += '._V1_'
else:
resultImage += '@._V1_'
if width:
resultImage += 'SX%s_' % width
if height:
resultImage += 'SY%s_' % height
if crop:
cropVals = ','.join(crop)
resultImage += 'CR%s_' % cropVals
resultImage += '.jpg'
return resultImage
| 23,686 | Python | .py | 562 | 33.371886 | 95 | 0.615395 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,197 | Company.py | cinemagoer_cinemagoer/imdb/Company.py | # Copyright 2008-2017 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the company class, used to store information about
a given company.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
from imdb.utils import _Container, analyze_company_name, build_company_name, cmpCompanies, flatten
class Company(_Container):
"""A company.
Every information about a company can be accessed as::
companyObject['information']
to get a list of the kind of information stored in a
company object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main',)
# Aliases for some not-so-intuitive keys.
keys_alias = {
'distributor': 'distributors',
'special effects company': 'special effects companies',
'other company': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'production company': 'production companies'
}
keys_tomodify_list = ()
cmpFunct = cmpCompanies
def _init(self, **kwds):
"""Initialize a company object.
*companyID* -- the unique identifier for the company.
*name* -- the name of the company, if not in the data dictionary.
*myName* -- the nickname you use for this company.
*myID* -- your personal id for this company.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given company.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to companies.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and 'name' not in self.data:
self.set_name(name)
self.companyID = kwds.get('companyID', None)
self.myName = kwds.get('myName', '')
def _reset(self):
"""Reset the company object."""
self.companyID = None
self.myName = ''
def set_name(self, name):
"""Set the name of the company."""
# Company diverges a bit from other classes, being able
# to directly handle its "notes". AND THAT'S PROBABLY A BAD IDEA!
oname = name = name.strip()
notes = ''
if name.endswith(')'):
fparidx = name.find('(')
if fparidx != -1:
notes = name[fparidx:]
name = name[:fparidx].rstrip()
if self.notes:
name = oname
d = analyze_company_name(name)
self.data.update(d)
if notes and not self.notes:
self.notes = notes
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
if 'name' in self.data:
return ['long imdb name']
return []
def _getitem(self, key):
"""Handle special keys."""
# XXX: can a company have an imdbIndex?
if 'name' in self.data:
if key == 'long imdb name':
return build_company_name(self.data)
return None
def getID(self):
"""Return the companyID."""
return self.companyID
def __bool__(self):
"""The company is "false" if the self.data does not contain a name."""
# XXX: check the name and the companyID?
return bool(self.data.get('name'))
def __contains__(self, item):
"""Return true if this company and the given Movie are related."""
from .Movie import Movie
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=True, scalar=Movie):
if item.isSame(m):
return True
elif isinstance(item, str):
return item in self.data
return False
def isSameName(self, other):
"""Return true if two company have the same name
and/or companyID."""
if not isinstance(other, self.__class__):
return False
if 'name' in self.data and \
'name' in other.data and \
build_company_name(self.data) == \
build_company_name(other.data):
return True
if self.accessSystem == other.accessSystem and \
self.companyID is not None and \
self.companyID == other.companyID:
return True
return False
isSameCompany = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a company instance."""
c = Company(name='', companyID=self.companyID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Company object."""
return '<Company id:%s[%s] name:_%s_>' % (
self.companyID, self.accessSystem, self.get('long imdb name')
)
def __str__(self):
"""Simply print the short name."""
return self.get('name', '')
def summary(self):
"""Return a string with a pretty-printed summary for the company."""
if not self:
return ''
s = 'Company\n=======\nName: %s\n' % self.get('name', '')
for k in ('distributor', 'production company', 'miscellaneous company',
'special effects company'):
d = self.get(k, [])[:5]
if not d:
continue
s += 'Last movies from this company (%s): %s.\n' % (
k, '; '.join([x.get('long imdb title', '') for x in d])
)
return s
| 7,112 | Python | .py | 165 | 34.2 | 98 | 0.612628 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,198 | imdbpy-sr.po | cinemagoer_cinemagoer/imdb/locale/imdbpy-sr.po | # Gettext message file for imdbpy
msgid ""
msgstr ""
"Project-Id-Version: imdbpy\n"
"POT-Creation-Date: 2010-03-18 14:35+0000\n"
"PO-Revision-Date: YYYY-MM-DD HH:MM+0000\n"
"Last-Translator: YOUR NAME <YOUR@EMAIL>\n"
"Language-Team: TEAM NAME <TEAM@EMAIL>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Domain: imdbpy\n"
"Language-Code: en\n"
"Language-Name: English\n"
"Preferred-Encodings: utf-8\n"
# Default: Actor
msgid "actor"
msgstr "глумац"
# Default: Actress
msgid "actress"
msgstr "глумица"
# Default: Adaption
msgid "adaption"
msgstr "адаптација"
# Default: Additional information
msgid "additional-information"
msgstr "Додатне Информације"
# Default: Admissions
msgid "admissions"
msgstr "пријеми"
# Default: Agent address
msgid "agent-address"
msgstr "адреса агента"
# Default: Airing
msgid "airing"
msgstr "емитовање"
# Default: Akas
msgid "akas"
msgstr "псеудоним"
# Default: Akas from release info
msgid "akas-from-release-info"
msgstr "такође познато из информација о издању"
# Default: All products
msgid "all-products"
msgstr "сви производи"
# Default: Alternate language version of
msgid "alternate-language-version-of"
msgstr "верзија на алтернативном језику"
# Default: Alternate versions
msgid "alternate-versions"
msgstr "алтернативне верзије"
# Default: Amazon reviews
msgid "amazon-reviews"
msgstr "Амазонове критике"
# Default: Analog left
msgid "analog-left"
msgstr "аналогно лево"
# Default: Analog right
msgid "analog-right"
msgstr "аналогно десно"
# Default: Animation department
msgid "animation-department"
msgstr "одељење за анимацију"
# Default: Archive footage
msgid "archive-footage"
msgstr "архивски снимци"
# Default: Arithmetic mean
msgid "arithmetic-mean"
msgstr "аритметичка средина"
# Default: Art department
msgid "art-department"
msgstr "уметничко одељење"
# Default: Art direction
msgid "art-direction"
msgstr "уметничко усмерење"
# Default: Art director
msgid "art-director"
msgstr "уметнички директор"
# Default: Article
msgid "article"
msgstr "чланак"
# Default: Asin
msgid "asin"
msgstr ""
# Default: Aspect ratio
msgid "aspect-ratio"
msgstr "однос ширине и висине"
# Default: Assigner
msgid "assigner"
msgstr ""
# Default: Assistant director
msgid "assistant-director"
msgstr "асистент директора"
# Default: Auctions
msgid "auctions"
msgstr "аукције"
# Default: Audio noise
msgid "audio-noise"
msgstr "аудио шум"
# Default: Audio quality
msgid "audio-quality"
msgstr "квалитет звука"
# Default: Award
msgid "award"
msgstr "награда"
# Default: Awards
msgid "awards"
msgstr "награде"
# Default: Biographical movies
msgid "biographical-movies"
msgstr "биографски филмови"
# Default: Biography
msgid "biography"
msgstr "биографија"
# Default: Biography print
msgid "biography-print"
msgstr ""
# Default: Birth date
msgid "birth-date"
msgstr "Датум рођења"
# Default: Birth name
msgid "birth-name"
msgstr "рођено име"
# Default: Birth notes
msgid "birth-notes"
msgstr "белешке о рођењу"
# Default: Body
msgid "body"
msgstr "тело"
# Default: Book
msgid "book"
msgstr "књига"
# Default: Books
msgid "books"
msgstr "књиге"
# Default: Bottom 100 rank
msgid "bottom-100-rank"
msgstr ""
# Default: Budget
msgid "budget"
msgstr "буџет"
# Default: Business
msgid "business"
msgstr "бизнис"
# Default: By arrangement with
msgid "by-arrangement-with"
msgstr "по договору са"
# Default: Camera
msgid "camera"
msgstr "камера"
# Default: Camera and electrical department
msgid "camera-and-electrical-department"
msgstr "одељење за камере и електронику"
# Default: Canonical episode title
msgid "canonical-episode-title"
msgstr ""
# Default: Canonical name
msgid "canonical-name"
msgstr ""
# Default: Canonical series title
msgid "canonical-series-title"
msgstr ""
# Default: Canonical title
msgid "canonical-title"
msgstr ""
# Default: Cast
msgid "cast"
msgstr "глумци"
# Default: Casting department
msgid "casting-department"
msgstr "подела улога"
# Default: Casting director
msgid "casting-director"
msgstr "шеф одељења за расподелу улога"
# Default: Catalog number
msgid "catalog-number"
msgstr "каталошки број"
# Default: Category
msgid "category"
msgstr "категорија"
# Default: Certificate
msgid "certificate"
msgstr "потврда"
# Default: Certificates
msgid "certificates"
msgstr "потврде"
# Default: Certification
msgid "certification"
msgstr "сертификација"
# Default: Channel
msgid "channel"
msgstr "канал"
# Default: Character
msgid "character"
msgstr "карактер"
# Default: Cinematographer
msgid "cinematographer"
msgstr "кинематограф"
# Default: Cinematographic process
msgid "cinematographic-process"
msgstr "кинематографски процес"
# Default: Close captions teletext ld g
msgid "close-captions-teletext-ld-g"
msgstr ""
# Default: Color info
msgid "color-info"
msgstr "информације о боји"
# Default: Color information
msgid "color-information"
msgstr "информације о боји"
# Default: Color rendition
msgid "color-rendition"
msgstr "преношење боја"
# Default: Company
msgid "company"
msgstr "компанија"
# Default: Complete cast
msgid "complete-cast"
msgstr "комплетна глумачка екипа"
# Default: Complete crew
msgid "complete-crew"
msgstr "комплетна посада"
# Default: Composer
msgid "composer"
msgstr "композитор"
# Default: Connections
msgid "connections"
msgstr "везе"
# Default: Contrast
msgid "contrast"
msgstr "контраст"
# Default: Copyright holder
msgid "copyright-holder"
msgstr "носилац ауторских права"
# Default: Costume department
msgid "costume-department"
msgstr "одељење за костиме"
# Default: Costume designer
msgid "costume-designer"
msgstr "костимограф"
# Default: Countries
msgid "countries"
msgstr "земље"
# Default: Country
msgid "country"
msgstr "земља"
# Default: Courtesy of
msgid "courtesy-of"
msgstr "љубазношћу"
# Default: Cover
msgid "cover"
msgstr "насловница"
# Default: Cover url
msgid "cover-url"
msgstr "адреса насловнице"
# Default: Crazy credits
msgid "crazy-credits"
msgstr ""
# Default: Creator
msgid "creator"
msgstr "стваралац"
# Default: Current role
msgid "current-role"
msgstr "тренутна улога"
# Default: Database
msgid "database"
msgstr "база података"
# Default: Date
msgid "date"
msgstr "датум"
# Default: Death date
msgid "death-date"
msgstr "датум смрти"
# Default: Death notes
msgid "death-notes"
msgstr "смртне белешке"
# Default: Demographic
msgid "demographic"
msgstr "демографски"
# Default: Description
msgid "description"
msgstr "опис"
# Default: Dialogue intellegibility
msgid "dialogue-intellegibility"
msgstr "разумљивост дијалога"
# Default: Digital sound
msgid "digital-sound"
msgstr "дигитални звук"
# Default: Director
msgid "director"
msgstr "директор"
# Default: Disc format
msgid "disc-format"
msgstr "формат диска"
# Default: Disc size
msgid "disc-size"
msgstr "величина диска"
# Default: Distributors
msgid "distributors"
msgstr "дистрибутери"
# Default: Dvd
msgid "dvd"
msgstr "ДВД"
# Default: Dvd features
msgid "dvd-features"
msgstr "ДВД карактеристике"
# Default: Dvd format
msgid "dvd-format"
msgstr "ДВД формат"
# Default: Dvds
msgid "dvds"
msgstr "ДВД-ови"
# Default: Dynamic range
msgid "dynamic-range"
msgstr "динамички опсег"
# Default: Edited from
msgid "edited-from"
msgstr "уређено од"
# Default: Edited into
msgid "edited-into"
msgstr "уређивао у"
# Default: Editor
msgid "editor"
msgstr "уредник"
# Default: Editorial department
msgid "editorial-department"
msgstr "редакција"
# Default: Episode
msgid "episode"
msgstr "епизода"
# Default: Episode of
msgid "episode-of"
msgstr "епизода"
# Default: Episode title
msgid "episode-title"
msgstr "наслов епизоде"
# Default: Episodes
msgid "episodes"
msgstr "епизоде"
# Default: Episodes rating
msgid "episodes-rating"
msgstr "рејтинг епизода"
# Default: Essays
msgid "essays"
msgstr "есеји"
# Default: External reviews
msgid "external-reviews"
msgstr "екстерне критике"
# Default: Faqs
msgid "faqs"
msgstr "честа питања и одговори"
# Default: Feature
msgid "feature"
msgstr "одлика"
# Default: Featured in
msgid "featured-in"
msgstr "представљена у"
# Default: Features
msgid "features"
msgstr "карактеристике"
# Default: Film negative format
msgid "film-negative-format"
msgstr "филм негативног формата"
# Default: Filming dates
msgid "filming-dates"
msgstr "датуми снимања"
# Default: Filmography
msgid "filmography"
msgstr "филмографија"
# Default: Followed by
msgid "followed-by"
msgstr "затим"
# Default: Follows
msgid "follows"
msgstr "у наставку"
# Default: For
msgid "for"
msgstr "за"
# Default: Frequency response
msgid "frequency-response"
msgstr ""
# Default: From
msgid "from"
msgstr "од"
# Default: Full article link
msgid "full-article-link"
msgstr "цео линк до чланка"
# Default: Full size cover url
msgid "full-size-cover-url"
msgstr "адреса насловнице у пуној величини"
# Default: Full size headshot
msgid "full-size-headshot"
msgstr "портрет у пуној величини"
# Default: Genres
msgid "genres"
msgstr "жанрови"
# Default: Goofs
msgid "goofs"
msgstr "глупане"
# Default: Gross
msgid "gross"
msgstr ""
# Default: Group genre
msgid "group-genre"
msgstr "групни жанр"
# Default: Headshot
msgid "headshot"
msgstr "портрет"
# Default: Height
msgid "height"
msgstr "висина"
# Default: Imdbindex
msgid "imdbindex"
msgstr "ИМдБ индекс"
# Default: In development
msgid "in-development"
msgstr "у развоју"
# Default: Interview
msgid "interview"
msgstr "интервју"
# Default: Interviews
msgid "interviews"
msgstr "интервјуи"
# Default: Introduction
msgid "introduction"
msgstr "увод"
# Default: Item
msgid "item"
msgstr "ставка"
# Default: Keywords
msgid "keywords"
msgstr "кључне речи"
# Default: Kind
msgid "kind"
msgstr "врста"
# Default: Label
msgid "label"
msgstr "етикета"
# Default: Laboratory
msgid "laboratory"
msgstr "лабораторија"
# Default: Language
msgid "language"
msgstr "Језик"
# Default: Languages
msgid "languages"
msgstr "језици"
# Default: Laserdisc
msgid "laserdisc"
msgstr "ласерски диск"
# Default: Laserdisc title
msgid "laserdisc-title"
msgstr "наслов ласерског диска"
# Default: Length
msgid "length"
msgstr "дужина"
# Default: Line
msgid "line"
msgstr "линија"
# Default: Link
msgid "link"
msgstr "веза"
# Default: Link text
msgid "link-text"
msgstr "текст везе"
# Default: Literature
msgid "literature"
msgstr "књижевност"
# Default: Locations
msgid "locations"
msgstr "локације"
# Default: Long imdb canonical name
msgid "long-imdb-canonical-name"
msgstr ""
# Default: Long imdb canonical title
msgid "long-imdb-canonical-title"
msgstr ""
# Default: Long imdb episode title
msgid "long-imdb-episode-title"
msgstr "дуг ИМдБ наслов епизоде"
# Default: Long imdb name
msgid "long-imdb-name"
msgstr "дуго ИМдБ име"
# Default: Long imdb title
msgid "long-imdb-title"
msgstr "дуг ИМдБ наслов"
# Default: Magazine cover photo
msgid "magazine-cover-photo"
msgstr "насловна фотографија часописа"
# Default: Make up
msgid "make-up"
msgstr "надокнадити"
# Default: Master format
msgid "master-format"
msgstr "главни формат"
# Default: Median
msgid "median"
msgstr "медијана"
# Default: Merchandising links
msgid "merchandising-links"
msgstr "трговачке везе"
# Default: Mini biography
msgid "mini-biography"
msgstr "мини биографија"
# Default: Misc links
msgid "misc-links"
msgstr "разне везе"
# Default: Miscellaneous companies
msgid "miscellaneous-companies"
msgstr "разне компаније"
# Default: Miscellaneous crew
msgid "miscellaneous-crew"
msgstr "разна екипe"
# Default: Movie
msgid "movie"
msgstr "филм"
# Default: Mpaa
msgid "mpaa"
msgstr ""
# Default: Music department
msgid "music-department"
msgstr "музичко одељење"
# Default: Name
msgid "name"
msgstr "име"
# Default: News
msgid "news"
msgstr "вести"
# Default: Newsgroup reviews
msgid "newsgroup-reviews"
msgstr ""
# Default: Nick names
msgid "nick-names"
msgstr "надимци"
# Default: Notes
msgid "notes"
msgstr "белешке"
# Default: Novel
msgid "novel"
msgstr "роман"
# Default: Number
msgid "number"
msgstr "број"
# Default: Number of chapter stops
msgid "number-of-chapter-stops"
msgstr "број заустављања поглавља"
# Default: Number of episodes
msgid "number-of-episodes"
msgstr "број епизода"
# Default: Number of seasons
msgid "number-of-seasons"
msgstr "број сезона"
# Default: Number of sides
msgid "number-of-sides"
msgstr "број страница"
# Default: Number of votes
msgid "number-of-votes"
msgstr "број гласова"
# Default: Official retail price
msgid "official-retail-price"
msgstr "званична малопродајна цена"
# Default: Official sites
msgid "official-sites"
msgstr "званичне странице"
# Default: Opening weekend
msgid "opening-weekend"
msgstr "отварање викенда"
# Default: Original air date
msgid "original-air-date"
msgstr "оригинални датум емитовања"
# Default: Original music
msgid "original-music"
msgstr "оригинална музика"
# Default: Original title
msgid "original-title"
msgstr "оригинални наслов"
# Default: Other literature
msgid "other-literature"
msgstr "друга литература"
# Default: Other works
msgid "other-works"
msgstr "друга дела"
# Default: Parents guide
msgid "parents-guide"
msgstr "инструкције за родитеље"
# Default: Performed by
msgid "performed-by"
msgstr "извођено од"
# Default: Person
msgid "person"
msgstr "особа"
# Default: Photo sites
msgid "photo-sites"
msgstr "фото сајтови"
# Default: Pictorial
msgid "pictorial"
msgstr "сликовито"
# Default: Picture format
msgid "picture-format"
msgstr "формат слике"
# Default: Plot
msgid "plot"
msgstr "заплет"
# Default: Plot outline
msgid "plot-outline"
msgstr "обрис заплета"
# Default: Portrayed in
msgid "portrayed-in"
msgstr "приказан у"
# Default: Pressing plant
msgid "pressing-plant"
msgstr "постројење за пресовање"
# Default: Printed film format
msgid "printed-film-format"
msgstr "формат штампаног филма"
# Default: Printed media reviews
msgid "printed-media-reviews"
msgstr "критике штампаних медија"
# Default: Producer
msgid "producer"
msgstr "произвођач"
# Default: Production companies
msgid "production-companies"
msgstr "производне компаније"
# Default: Production country
msgid "production-country"
msgstr "земља производње"
# Default: Production dates
msgid "production-dates"
msgstr "датуми производње"
# Default: Production design
msgid "production-design"
msgstr "дизајн производње"
# Default: Production designer
msgid "production-designer"
msgstr "сценограф"
# Default: Production manager
msgid "production-manager"
msgstr "руководилац производње"
# Default: Production process protocol
msgid "production-process-protocol"
msgstr "протокол производног процеса"
# Default: Quality of source
msgid "quality-of-source"
msgstr "квалитет извора"
# Default: Quality program
msgid "quality-program"
msgstr "квалитетни програм"
# Default: Quote
msgid "quote"
msgstr "цитат"
# Default: Quotes
msgid "quotes"
msgstr "цитати"
# Default: Rating
msgid "rating"
msgstr "рејтинг"
# Default: Recommendations
msgid "recommendations"
msgstr "препоруке"
# Default: Referenced in
msgid "referenced-in"
msgstr "упућено у"
# Default: References
msgid "references"
msgstr "референце"
# Default: Region
msgid "region"
msgstr "регион"
# Default: Release country
msgid "release-country"
msgstr "држава издања"
# Default: Release date
msgid "release-date"
msgstr "Датум издања"
# Default: Release dates
msgid "release-dates"
msgstr "датуми издања"
# Default: Remade as
msgid "remade-as"
msgstr "преправљен као"
# Default: Remake of
msgid "remake-of"
msgstr "преправка од"
# Default: Rentals
msgid "rentals"
msgstr "изнајмљивање"
# Default: Result
msgid "result"
msgstr "резултат"
# Default: Review
msgid "review"
msgstr "преглед"
# Default: Review author
msgid "review-author"
msgstr "рецензије аутора"
# Default: Review kind
msgid "review-kind"
msgstr "врста рецензија"
# Default: Runtime
msgid "runtime"
msgstr "време приказвања"
# Default: Runtimes
msgid "runtimes"
msgstr "времена приказивања"
# Default: Salary history
msgid "salary-history"
msgstr "историја плата"
# Default: Screenplay teleplay
msgid "screenplay-teleplay"
msgstr ""
# Default: Season
msgid "season"
msgstr "сезона"
# Default: Second unit director or assistant director
msgid "second-unit-director-or-assistant-director"
msgstr "заменик директора"
# Default: Self
msgid "self"
msgstr "себе"
# Default: Series animation department
msgid "series-animation-department"
msgstr "одсек за анимацију серије"
# Default: Series art department
msgid "series-art-department"
msgstr ""
# Default: Series assistant directors
msgid "series-assistant-directors"
msgstr ""
# Default: Series camera department
msgid "series-camera-department"
msgstr ""
# Default: Series casting department
msgid "series-casting-department"
msgstr ""
# Default: Series cinematographers
msgid "series-cinematographers"
msgstr ""
# Default: Series costume department
msgid "series-costume-department"
msgstr ""
# Default: Series editorial department
msgid "series-editorial-department"
msgstr "уредништво серије"
# Default: Series editors
msgid "series-editors"
msgstr "уредници серија"
# Default: Series make up department
msgid "series-make-up-department"
msgstr ""
# Default: Series miscellaneous
msgid "series-miscellaneous"
msgstr ""
# Default: Series music department
msgid "series-music-department"
msgstr "музичко одељење серије"
# Default: Series producers
msgid "series-producers"
msgstr "продуценти серија"
# Default: Series production designers
msgid "series-production-designers"
msgstr "дизајнери серијске производње"
# Default: Series production managers
msgid "series-production-managers"
msgstr "руководиоци серијске производње"
# Default: Series sound department
msgid "series-sound-department"
msgstr "серија звучни одсек"
# Default: Series special effects department
msgid "series-special-effects-department"
msgstr "серија одељење за специјалне ефекте"
# Default: Series stunts
msgid "series-stunts"
msgstr "серијске вратоломије"
# Default: Series title
msgid "series-title"
msgstr "наслов серије"
# Default: Series transportation department
msgid "series-transportation-department"
msgstr "серијско одељење за транспорт"
# Default: Series visual effects department
msgid "series-visual-effects-department"
msgstr "серијско одељење за визуелне ефекте"
# Default: Series writers
msgid "series-writers"
msgstr "писци серија"
# Default: Series years
msgid "series-years"
msgstr "серије година"
# Default: Set decoration
msgid "set-decoration"
msgstr "декорација"
# Default: Sharpness
msgid "sharpness"
msgstr "оштрина"
# Default: Similar to
msgid "similar-to"
msgstr "слично"
# Default: Smart canonical episode title
msgid "smart-canonical-episode-title"
msgstr ""
# Default: Smart canonical series title
msgid "smart-canonical-series-title"
msgstr ""
# Default: Smart canonical title
msgid "smart-canonical-title"
msgstr ""
# Default: Smart long imdb canonical title
msgid "smart-long-imdb-canonical-title"
msgstr ""
# Default: Sound clips
msgid "sound-clips"
msgstr "звучни клипови"
# Default: Sound crew
msgid "sound-crew"
msgstr "звучна екипа"
# Default: Sound encoding
msgid "sound-encoding"
msgstr "кодирање звука"
# Default: Sound mix
msgid "sound-mix"
msgstr "звучни микс"
# Default: Soundtrack
msgid "soundtrack"
msgstr "звучна трака"
# Default: Spaciality
msgid "spaciality"
msgstr "просторност"
# Default: Special effects
msgid "special-effects"
msgstr "специјални ефекти"
# Default: Special effects companies
msgid "special-effects-companies"
msgstr "компаније за специјалне ефекте"
# Default: Special effects department
msgid "special-effects-department"
msgstr "одељење за специјалне ефекте"
# Default: Spin off
msgid "spin-off"
msgstr ""
# Default: Spin off from
msgid "spin-off-from"
msgstr ""
# Default: Spoofed in
msgid "spoofed-in"
msgstr "подваљен"
# Default: Spoofs
msgid "spoofs"
msgstr "подвале"
# Default: Spouse
msgid "spouse"
msgstr "супруга"
# Default: Status of availablility
msgid "status-of-availablility"
msgstr "доступност"
# Default: Studio
msgid "studio"
msgstr "студио"
# Default: Studios
msgid "studios"
msgstr "студија"
# Default: Stunt performer
msgid "stunt-performer"
msgstr "каскадер"
# Default: Stunts
msgid "stunts"
msgstr "вратоломије"
# Default: Subtitles
msgid "subtitles"
msgstr "титлови"
# Default: Supplement
msgid "supplement"
msgstr "додатак"
# Default: Supplements
msgid "supplements"
msgstr "суплементи"
# Default: Synopsis
msgid "synopsis"
msgstr "резиме"
# Default: Taglines
msgid "taglines"
msgstr "слогани"
# Default: Tech info
msgid "tech-info"
msgstr "техничке информације"
# Default: Thanks
msgid "thanks"
msgstr "хвала"
# Default: Time
msgid "time"
msgstr "време"
# Default: Title
msgid "title"
msgstr "наслов"
# Default: Titles in this product
msgid "titles-in-this-product"
msgstr "наслови у овом производу"
# Default: To
msgid "to"
msgstr "до"
# Default: Top 250 rank
msgid "top-250-rank"
msgstr ""
# Default: Trade mark
msgid "trade-mark"
msgstr "заштитни знак"
# Default: Transportation department
msgid "transportation-department"
msgstr "транспортно одељење"
# Default: Trivia
msgid "trivia"
msgstr "тривијалности"
# Default: Tv
msgid "tv"
msgstr "ТВ"
# Default: Under license from
msgid "under-license-from"
msgstr "по лиценци од"
# Default: Unknown link
msgid "unknown-link"
msgstr "непозната веза"
# Default: Upc
msgid "upc"
msgstr ""
# Default: Version of
msgid "version-of"
msgstr "верзија од"
# Default: Vhs
msgid "vhs"
msgstr ""
# Default: Video
msgid "video"
msgstr "видео"
# Default: Video artifacts
msgid "video-artifacts"
msgstr "видео артефакти"
# Default: Video clips
msgid "video-clips"
msgstr "видео исечци"
# Default: Video noise
msgid "video-noise"
msgstr "видео шум"
# Default: Video quality
msgid "video-quality"
msgstr "Видео квалитет"
# Default: Video standard
msgid "video-standard"
msgstr "видео стандард"
# Default: Visual effects
msgid "visual-effects"
msgstr "визуелни ефекти"
# Default: Votes
msgid "votes"
msgstr "гласови"
# Default: Votes distribution
msgid "votes-distribution"
msgstr "расподела гласова"
# Default: Weekend gross
msgid "weekend-gross"
msgstr ""
# Default: Where now
msgid "where-now"
msgstr "где сада"
# Default: With
msgid "with"
msgstr "са"
# Default: Writer
msgid "writer"
msgstr "писац"
# Default: Written by
msgid "written-by"
msgstr "написао"
# Default: Year
msgid "year"
msgstr "година"
# Default: Zshops
msgid "zshops"
msgstr ""
| 25,808 | Python | .py | 979 | 21.604699 | 53 | 0.806061 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
5,199 | imdbpy-pt_BR.po | cinemagoer_cinemagoer/imdb/locale/imdbpy-pt_BR.po | # Gettext message file for imdbpy
# Translators:
# Wagner Marques Oliveira <[email protected]>, 2015
msgid ""
msgstr ""
"Project-Id-Version: Cinemagoer\n"
"POT-Creation-Date: 2010-03-18 14:35+0000\n"
"PO-Revision-Date: 2016-03-28 20:40+0000\n"
"Last-Translator: Wagner Marques Oliveira <[email protected]>\n"
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/davide_alberani/imdbpy/language/pt_BR/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Domain: imdbpy\n"
"Language: pt_BR\n"
"Language-Code: en\n"
"Language-Name: English\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Preferred-Encodings: utf-8\n"
# Default: Actor
msgid "actor"
msgstr "ator"
# Default: Actress
msgid "actress"
msgstr "atriz"
# Default: Adaption
msgid "adaption"
msgstr "adaptação"
# Default: Additional information
msgid "additional-information"
msgstr "informação-adicional"
# Default: Admissions
msgid "admissions"
msgstr "admissões"
# Default: Agent address
msgid "agent-address"
msgstr "endereço-de-agente"
# Default: Airing
msgid "airing"
msgstr "no ar"
# Default: Akas
msgid "akas"
msgstr "mais conhecido como"
# Default: Akas from release info
msgid "akas-from-release-info"
msgstr "mais conhecido como-para-lançamento-informação"
# Default: All products
msgid "all-products"
msgstr "todos-produtos"
# Default: Alternate language version of
msgid "alternate-language-version-of"
msgstr "Versão em idioma alternativo de"
# Default: Alternate versions
msgid "alternate-versions"
msgstr "Versões alternativas"
# Default: Amazon reviews
msgid "amazon-reviews"
msgstr "Avaliações da Amazon"
# Default: Analog left
msgid "analog-left"
msgstr ""
# Default: Analog right
msgid "analog-right"
msgstr ""
# Default: Animation department
msgid "animation-department"
msgstr "Departamento de animação"
# Default: Archive footage
msgid "archive-footage"
msgstr "Arquivo de filmagem"
# Default: Arithmetic mean
msgid "arithmetic-mean"
msgstr "Média aritmética"
# Default: Art department
msgid "art-department"
msgstr "Departamento de arte"
# Default: Art direction
msgid "art-direction"
msgstr "Direção da arte"
# Default: Art director
msgid "art-director"
msgstr "Diretor de arte"
# Default: Article
msgid "article"
msgstr "Artigo"
# Default: Asin
msgid "asin"
msgstr "Como em"
# Default: Aspect ratio
msgid "aspect-ratio"
msgstr "Proporção da tela"
# Default: Assigner
msgid "assigner"
msgstr ""
# Default: Assistant director
msgid "assistant-director"
msgstr "Diretor assistente"
# Default: Auctions
msgid "auctions"
msgstr "Leilões"
# Default: Audio noise
msgid "audio-noise"
msgstr ""
# Default: Audio quality
msgid "audio-quality"
msgstr "Qualidade de áudio"
# Default: Award
msgid "award"
msgstr "Prêmio"
# Default: Awards
msgid "awards"
msgstr "Prêmios"
# Default: Biographical movies
msgid "biographical-movies"
msgstr "Filmes biográficos"
# Default: Biography
msgid "biography"
msgstr "Biografia"
# Default: Biography print
msgid "biography-print"
msgstr ""
# Default: Birth date
msgid "birth-date"
msgstr "Data de nascimento"
# Default: Birth name
msgid "birth-name"
msgstr "Nome de nascença"
# Default: Birth notes
msgid "birth-notes"
msgstr "Notas de nascimento"
# Default: Body
msgid "body"
msgstr "Corpo"
# Default: Book
msgid "book"
msgstr "Livro"
# Default: Books
msgid "books"
msgstr "Livros"
# Default: Bottom 100 rank
msgid "bottom-100-rank"
msgstr ""
# Default: Budget
msgid "budget"
msgstr ""
# Default: Business
msgid "business"
msgstr ""
# Default: By arrangement with
msgid "by-arrangement-with"
msgstr ""
# Default: Camera
msgid "camera"
msgstr ""
# Default: Camera and electrical department
msgid "camera-and-electrical-department"
msgstr ""
# Default: Canonical episode title
msgid "canonical-episode-title"
msgstr ""
# Default: Canonical name
msgid "canonical-name"
msgstr ""
# Default: Canonical series title
msgid "canonical-series-title"
msgstr ""
# Default: Canonical title
msgid "canonical-title"
msgstr ""
# Default: Cast
msgid "cast"
msgstr ""
# Default: Casting department
msgid "casting-department"
msgstr ""
# Default: Casting director
msgid "casting-director"
msgstr ""
# Default: Catalog number
msgid "catalog-number"
msgstr ""
# Default: Category
msgid "category"
msgstr ""
# Default: Certificate
msgid "certificate"
msgstr ""
# Default: Certificates
msgid "certificates"
msgstr ""
# Default: Certification
msgid "certification"
msgstr ""
# Default: Channel
msgid "channel"
msgstr ""
# Default: Character
msgid "character"
msgstr ""
# Default: Cinematographer
msgid "cinematographer"
msgstr ""
# Default: Cinematographic process
msgid "cinematographic-process"
msgstr ""
# Default: Close captions teletext ld g
msgid "close-captions-teletext-ld-g"
msgstr ""
# Default: Color info
msgid "color-info"
msgstr ""
# Default: Color information
msgid "color-information"
msgstr ""
# Default: Color rendition
msgid "color-rendition"
msgstr ""
# Default: Company
msgid "company"
msgstr ""
# Default: Complete cast
msgid "complete-cast"
msgstr ""
# Default: Complete crew
msgid "complete-crew"
msgstr ""
# Default: Composer
msgid "composer"
msgstr ""
# Default: Connections
msgid "connections"
msgstr ""
# Default: Contrast
msgid "contrast"
msgstr ""
# Default: Copyright holder
msgid "copyright-holder"
msgstr ""
# Default: Costume department
msgid "costume-department"
msgstr ""
# Default: Costume designer
msgid "costume-designer"
msgstr ""
# Default: Countries
msgid "countries"
msgstr ""
# Default: Country
msgid "country"
msgstr ""
# Default: Courtesy of
msgid "courtesy-of"
msgstr ""
# Default: Cover
msgid "cover"
msgstr ""
# Default: Cover url
msgid "cover-url"
msgstr ""
# Default: Crazy credits
msgid "crazy-credits"
msgstr ""
# Default: Creator
msgid "creator"
msgstr ""
# Default: Current role
msgid "current-role"
msgstr ""
# Default: Database
msgid "database"
msgstr ""
# Default: Date
msgid "date"
msgstr ""
# Default: Death date
msgid "death-date"
msgstr ""
# Default: Death notes
msgid "death-notes"
msgstr ""
# Default: Demographic
msgid "demographic"
msgstr ""
# Default: Description
msgid "description"
msgstr ""
# Default: Dialogue intellegibility
msgid "dialogue-intellegibility"
msgstr ""
# Default: Digital sound
msgid "digital-sound"
msgstr ""
# Default: Director
msgid "director"
msgstr ""
# Default: Disc format
msgid "disc-format"
msgstr ""
# Default: Disc size
msgid "disc-size"
msgstr ""
# Default: Distributors
msgid "distributors"
msgstr ""
# Default: Dvd
msgid "dvd"
msgstr ""
# Default: Dvd features
msgid "dvd-features"
msgstr ""
# Default: Dvd format
msgid "dvd-format"
msgstr ""
# Default: Dvds
msgid "dvds"
msgstr ""
# Default: Dynamic range
msgid "dynamic-range"
msgstr ""
# Default: Edited from
msgid "edited-from"
msgstr ""
# Default: Edited into
msgid "edited-into"
msgstr ""
# Default: Editor
msgid "editor"
msgstr ""
# Default: Editorial department
msgid "editorial-department"
msgstr ""
# Default: Episode
msgid "episode"
msgstr ""
# Default: Episode of
msgid "episode-of"
msgstr ""
# Default: Episode title
msgid "episode-title"
msgstr ""
# Default: Episodes
msgid "episodes"
msgstr ""
# Default: Episodes rating
msgid "episodes-rating"
msgstr ""
# Default: Essays
msgid "essays"
msgstr ""
# Default: External reviews
msgid "external-reviews"
msgstr ""
# Default: Faqs
msgid "faqs"
msgstr ""
# Default: Feature
msgid "feature"
msgstr ""
# Default: Featured in
msgid "featured-in"
msgstr ""
# Default: Features
msgid "features"
msgstr ""
# Default: Film negative format
msgid "film-negative-format"
msgstr ""
# Default: Filming dates
msgid "filming-dates"
msgstr ""
# Default: Filmography
msgid "filmography"
msgstr ""
# Default: Followed by
msgid "followed-by"
msgstr ""
# Default: Follows
msgid "follows"
msgstr ""
# Default: For
msgid "for"
msgstr ""
# Default: Frequency response
msgid "frequency-response"
msgstr ""
# Default: From
msgid "from"
msgstr ""
# Default: Full article link
msgid "full-article-link"
msgstr ""
# Default: Full size cover url
msgid "full-size-cover-url"
msgstr ""
# Default: Full size headshot
msgid "full-size-headshot"
msgstr ""
# Default: Genres
msgid "genres"
msgstr ""
# Default: Goofs
msgid "goofs"
msgstr ""
# Default: Gross
msgid "gross"
msgstr ""
# Default: Group genre
msgid "group-genre"
msgstr ""
# Default: Headshot
msgid "headshot"
msgstr ""
# Default: Height
msgid "height"
msgstr ""
# Default: Imdbindex
msgid "imdbindex"
msgstr ""
# Default: In development
msgid "in-development"
msgstr ""
# Default: Interview
msgid "interview"
msgstr ""
# Default: Interviews
msgid "interviews"
msgstr ""
# Default: Introduction
msgid "introduction"
msgstr ""
# Default: Item
msgid "item"
msgstr ""
# Default: Keywords
msgid "keywords"
msgstr ""
# Default: Kind
msgid "kind"
msgstr ""
# Default: Label
msgid "label"
msgstr ""
# Default: Laboratory
msgid "laboratory"
msgstr ""
# Default: Language
msgid "language"
msgstr ""
# Default: Languages
msgid "languages"
msgstr ""
# Default: Laserdisc
msgid "laserdisc"
msgstr ""
# Default: Laserdisc title
msgid "laserdisc-title"
msgstr ""
# Default: Length
msgid "length"
msgstr ""
# Default: Line
msgid "line"
msgstr ""
# Default: Link
msgid "link"
msgstr ""
# Default: Link text
msgid "link-text"
msgstr ""
# Default: Literature
msgid "literature"
msgstr ""
# Default: Locations
msgid "locations"
msgstr ""
# Default: Long imdb canonical name
msgid "long-imdb-canonical-name"
msgstr ""
# Default: Long imdb canonical title
msgid "long-imdb-canonical-title"
msgstr ""
# Default: Long imdb episode title
msgid "long-imdb-episode-title"
msgstr ""
# Default: Long imdb name
msgid "long-imdb-name"
msgstr ""
# Default: Long imdb title
msgid "long-imdb-title"
msgstr ""
# Default: Magazine cover photo
msgid "magazine-cover-photo"
msgstr ""
# Default: Make up
msgid "make-up"
msgstr ""
# Default: Master format
msgid "master-format"
msgstr ""
# Default: Median
msgid "median"
msgstr ""
# Default: Merchandising links
msgid "merchandising-links"
msgstr ""
# Default: Mini biography
msgid "mini-biography"
msgstr ""
# Default: Misc links
msgid "misc-links"
msgstr ""
# Default: Miscellaneous companies
msgid "miscellaneous-companies"
msgstr ""
# Default: Miscellaneous crew
msgid "miscellaneous-crew"
msgstr ""
# Default: Movie
msgid "movie"
msgstr ""
# Default: Mpaa
msgid "mpaa"
msgstr ""
# Default: Music department
msgid "music-department"
msgstr ""
# Default: Name
msgid "name"
msgstr ""
# Default: News
msgid "news"
msgstr ""
# Default: Newsgroup reviews
msgid "newsgroup-reviews"
msgstr ""
# Default: Nick names
msgid "nick-names"
msgstr ""
# Default: Notes
msgid "notes"
msgstr ""
# Default: Novel
msgid "novel"
msgstr ""
# Default: Number
msgid "number"
msgstr ""
# Default: Number of chapter stops
msgid "number-of-chapter-stops"
msgstr ""
# Default: Number of episodes
msgid "number-of-episodes"
msgstr ""
# Default: Number of seasons
msgid "number-of-seasons"
msgstr ""
# Default: Number of sides
msgid "number-of-sides"
msgstr ""
# Default: Number of votes
msgid "number-of-votes"
msgstr ""
# Default: Official retail price
msgid "official-retail-price"
msgstr ""
# Default: Official sites
msgid "official-sites"
msgstr ""
# Default: Opening weekend
msgid "opening-weekend"
msgstr ""
# Default: Original air date
msgid "original-air-date"
msgstr ""
# Default: Original music
msgid "original-music"
msgstr ""
# Default: Original title
msgid "original-title"
msgstr ""
# Default: Other literature
msgid "other-literature"
msgstr ""
# Default: Other works
msgid "other-works"
msgstr ""
# Default: Parents guide
msgid "parents-guide"
msgstr ""
# Default: Performed by
msgid "performed-by"
msgstr ""
# Default: Person
msgid "person"
msgstr ""
# Default: Photo sites
msgid "photo-sites"
msgstr ""
# Default: Pictorial
msgid "pictorial"
msgstr ""
# Default: Picture format
msgid "picture-format"
msgstr ""
# Default: Plot
msgid "plot"
msgstr ""
# Default: Plot outline
msgid "plot-outline"
msgstr ""
# Default: Portrayed in
msgid "portrayed-in"
msgstr ""
# Default: Pressing plant
msgid "pressing-plant"
msgstr ""
# Default: Printed film format
msgid "printed-film-format"
msgstr ""
# Default: Printed media reviews
msgid "printed-media-reviews"
msgstr ""
# Default: Producer
msgid "producer"
msgstr ""
# Default: Production companies
msgid "production-companies"
msgstr ""
# Default: Production country
msgid "production-country"
msgstr ""
# Default: Production dates
msgid "production-dates"
msgstr ""
# Default: Production design
msgid "production-design"
msgstr ""
# Default: Production designer
msgid "production-designer"
msgstr ""
# Default: Production manager
msgid "production-manager"
msgstr ""
# Default: Production process protocol
msgid "production-process-protocol"
msgstr ""
# Default: Quality of source
msgid "quality-of-source"
msgstr ""
# Default: Quality program
msgid "quality-program"
msgstr ""
# Default: Quote
msgid "quote"
msgstr ""
# Default: Quotes
msgid "quotes"
msgstr ""
# Default: Rating
msgid "rating"
msgstr ""
# Default: Recommendations
msgid "recommendations"
msgstr ""
# Default: Referenced in
msgid "referenced-in"
msgstr ""
# Default: References
msgid "references"
msgstr ""
# Default: Region
msgid "region"
msgstr ""
# Default: Release country
msgid "release-country"
msgstr ""
# Default: Release date
msgid "release-date"
msgstr ""
# Default: Release dates
msgid "release-dates"
msgstr ""
# Default: Remade as
msgid "remade-as"
msgstr ""
# Default: Remake of
msgid "remake-of"
msgstr ""
# Default: Rentals
msgid "rentals"
msgstr ""
# Default: Result
msgid "result"
msgstr ""
# Default: Review
msgid "review"
msgstr ""
# Default: Review author
msgid "review-author"
msgstr ""
# Default: Review kind
msgid "review-kind"
msgstr ""
# Default: Runtime
msgid "runtime"
msgstr ""
# Default: Runtimes
msgid "runtimes"
msgstr ""
# Default: Salary history
msgid "salary-history"
msgstr ""
# Default: Screenplay teleplay
msgid "screenplay-teleplay"
msgstr ""
# Default: Season
msgid "season"
msgstr ""
# Default: Second unit director or assistant director
msgid "second-unit-director-or-assistant-director"
msgstr ""
# Default: Self
msgid "self"
msgstr ""
# Default: Series animation department
msgid "series-animation-department"
msgstr ""
# Default: Series art department
msgid "series-art-department"
msgstr ""
# Default: Series assistant directors
msgid "series-assistant-directors"
msgstr ""
# Default: Series camera department
msgid "series-camera-department"
msgstr ""
# Default: Series casting department
msgid "series-casting-department"
msgstr ""
# Default: Series cinematographers
msgid "series-cinematographers"
msgstr ""
# Default: Series costume department
msgid "series-costume-department"
msgstr ""
# Default: Series editorial department
msgid "series-editorial-department"
msgstr ""
# Default: Series editors
msgid "series-editors"
msgstr ""
# Default: Series make up department
msgid "series-make-up-department"
msgstr ""
# Default: Series miscellaneous
msgid "series-miscellaneous"
msgstr ""
# Default: Series music department
msgid "series-music-department"
msgstr ""
# Default: Series producers
msgid "series-producers"
msgstr ""
# Default: Series production designers
msgid "series-production-designers"
msgstr ""
# Default: Series production managers
msgid "series-production-managers"
msgstr ""
# Default: Series sound department
msgid "series-sound-department"
msgstr ""
# Default: Series special effects department
msgid "series-special-effects-department"
msgstr ""
# Default: Series stunts
msgid "series-stunts"
msgstr ""
# Default: Series title
msgid "series-title"
msgstr ""
# Default: Series transportation department
msgid "series-transportation-department"
msgstr ""
# Default: Series visual effects department
msgid "series-visual-effects-department"
msgstr ""
# Default: Series writers
msgid "series-writers"
msgstr ""
# Default: Series years
msgid "series-years"
msgstr ""
# Default: Set decoration
msgid "set-decoration"
msgstr ""
# Default: Sharpness
msgid "sharpness"
msgstr ""
# Default: Similar to
msgid "similar-to"
msgstr ""
# Default: Smart canonical episode title
msgid "smart-canonical-episode-title"
msgstr ""
# Default: Smart canonical series title
msgid "smart-canonical-series-title"
msgstr ""
# Default: Smart canonical title
msgid "smart-canonical-title"
msgstr ""
# Default: Smart long imdb canonical title
msgid "smart-long-imdb-canonical-title"
msgstr ""
# Default: Sound clips
msgid "sound-clips"
msgstr ""
# Default: Sound crew
msgid "sound-crew"
msgstr ""
# Default: Sound encoding
msgid "sound-encoding"
msgstr ""
# Default: Sound mix
msgid "sound-mix"
msgstr ""
# Default: Soundtrack
msgid "soundtrack"
msgstr ""
# Default: Spaciality
msgid "spaciality"
msgstr ""
# Default: Special effects
msgid "special-effects"
msgstr ""
# Default: Special effects companies
msgid "special-effects-companies"
msgstr ""
# Default: Special effects department
msgid "special-effects-department"
msgstr ""
# Default: Spin off
msgid "spin-off"
msgstr ""
# Default: Spin off from
msgid "spin-off-from"
msgstr ""
# Default: Spoofed in
msgid "spoofed-in"
msgstr ""
# Default: Spoofs
msgid "spoofs"
msgstr ""
# Default: Spouse
msgid "spouse"
msgstr ""
# Default: Status of availablility
msgid "status-of-availablility"
msgstr ""
# Default: Studio
msgid "studio"
msgstr ""
# Default: Studios
msgid "studios"
msgstr ""
# Default: Stunt performer
msgid "stunt-performer"
msgstr ""
# Default: Stunts
msgid "stunts"
msgstr ""
# Default: Subtitles
msgid "subtitles"
msgstr ""
# Default: Supplement
msgid "supplement"
msgstr ""
# Default: Supplements
msgid "supplements"
msgstr ""
# Default: Synopsis
msgid "synopsis"
msgstr ""
# Default: Taglines
msgid "taglines"
msgstr ""
# Default: Tech info
msgid "tech-info"
msgstr ""
# Default: Thanks
msgid "thanks"
msgstr ""
# Default: Time
msgid "time"
msgstr ""
# Default: Title
msgid "title"
msgstr ""
# Default: Titles in this product
msgid "titles-in-this-product"
msgstr ""
# Default: To
msgid "to"
msgstr ""
# Default: Top 250 rank
msgid "top-250-rank"
msgstr ""
# Default: Trade mark
msgid "trade-mark"
msgstr ""
# Default: Transportation department
msgid "transportation-department"
msgstr ""
# Default: Trivia
msgid "trivia"
msgstr ""
# Default: Tv
msgid "tv"
msgstr ""
# Default: Under license from
msgid "under-license-from"
msgstr ""
# Default: Unknown link
msgid "unknown-link"
msgstr ""
# Default: Upc
msgid "upc"
msgstr ""
# Default: Version of
msgid "version-of"
msgstr ""
# Default: Vhs
msgid "vhs"
msgstr ""
# Default: Video
msgid "video"
msgstr ""
# Default: Video artifacts
msgid "video-artifacts"
msgstr ""
# Default: Video clips
msgid "video-clips"
msgstr ""
# Default: Video noise
msgid "video-noise"
msgstr ""
# Default: Video quality
msgid "video-quality"
msgstr "Qualidade de vídeo"
# Default: Video standard
msgid "video-standard"
msgstr ""
# Default: Visual effects
msgid "visual-effects"
msgstr "Efeitos Visuais"
# Default: Votes
msgid "votes"
msgstr "Votos"
# Default: Votes distribution
msgid "votes-distribution"
msgstr "Distribuição de votos"
# Default: Weekend gross
msgid "weekend-gross"
msgstr ""
# Default: Where now
msgid "where-now"
msgstr ""
# Default: With
msgid "with"
msgstr "Com"
# Default: Writer
msgid "writer"
msgstr "Escritor(a)"
# Default: Written by
msgid "written-by"
msgstr "Escrito por"
# Default: Year
msgid "year"
msgstr "Ano"
# Default: Zshops
msgid "zshops"
msgstr ""
| 19,695 | Python | .py | 982 | 18.698574 | 104 | 0.78401 | cinemagoer/cinemagoer | 1,219 | 352 | 80 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.