index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
723,939 | fiscalyear | __ne__ | null | def __ne__(self, other: object) -> bool:
if isinstance(other, FiscalYear):
return self._fiscal_year != other._fiscal_year
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
| (self, other: object) -> bool |
723,940 | fiscalyear | __new__ | Constructor.
:param fiscal_year: The fiscal year
:returns: A newly constructed FiscalYear object
:raises ValueError: If ``fiscal_year`` is out of range
| def __new__(cls, fiscal_year: int) -> "FiscalYear":
"""Constructor.
:param fiscal_year: The fiscal year
:returns: A newly constructed FiscalYear object
:raises ValueError: If ``fiscal_year`` is out of range
"""
fiscal_year = _check_year(fiscal_year)
self = super(FiscalYear, cls).__new__(cls)
self._fiscal_year = fiscal_year
return self
| (cls, fiscal_year: int) -> fiscalyear.FiscalYear |
723,941 | fiscalyear | __repr__ | Convert to formal string, for repr().
>>> fy = FiscalYear(2017)
>>> repr(fy)
'FiscalYear(2017)'
| def __repr__(self) -> str:
"""Convert to formal string, for repr().
>>> fy = FiscalYear(2017)
>>> repr(fy)
'FiscalYear(2017)'
"""
return f"{self.__class__.__name__}({self._fiscal_year})"
| (self) -> str |
723,942 | fiscalyear | __str__ | Convert to informal string, for str().
>>> fy = FiscalYear(2017)
>>> str(fy)
'FY2017'
| def __str__(self) -> str:
"""Convert to informal string, for str().
>>> fy = FiscalYear(2017)
>>> str(fy)
'FY2017'
"""
return f"FY{self._fiscal_year}"
| (self) -> str |
723,943 | fiscalyear | _FiscalMixin | Mixin for FiscalDate and FiscalDateTime that
provides the following common attributes in addition to
those provided by datetime.date and datetime.datetime:
| class _FiscalMixin:
"""Mixin for FiscalDate and FiscalDateTime that
provides the following common attributes in addition to
those provided by datetime.date and datetime.datetime:
"""
@property
def fiscal_year(self) -> int:
""":returns: The fiscal year"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
# The fiscal year can be at most 1 year away from the calendar year
if fiscal_self in FiscalYear(fiscal_self.year):
return fiscal_self.year
elif fiscal_self in FiscalYear(fiscal_self.year + 1):
return fiscal_self.year + 1
else:
return fiscal_self.year - 1
@property
def fiscal_quarter(self) -> int:
""":returns: The fiscal quarter"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
for quarter in range(1, 5):
q = FiscalQuarter(fiscal_self.fiscal_year, quarter)
if fiscal_self in q:
break
return quarter
@property
def fiscal_month(self) -> int:
""":returns: The fiscal month"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
for month in range(1, 13):
m = FiscalMonth(fiscal_self.fiscal_year, month)
if fiscal_self in m:
break
return month
@property
def fiscal_day(self) -> int:
""":returns: The fiscal day"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
fiscal_year = FiscalYear(fiscal_self.fiscal_year)
year_start = fiscal_year.start
if isinstance(fiscal_self, FiscalDate):
delta = cast(datetime.date, fiscal_self) - year_start.date()
else:
delta = fiscal_self - year_start
return delta.days + 1
@property
def prev_fiscal_year(self) -> FiscalYear:
""":returns: The previous fiscal year"""
return FiscalYear(self.fiscal_year - 1)
@property
def next_fiscal_year(self) -> FiscalYear:
""":returns: The next fiscal year"""
return FiscalYear(self.fiscal_year + 1)
@property
def prev_fiscal_quarter(self) -> FiscalQuarter:
""":returns: The previous fiscal quarter"""
fiscal_quarter = FiscalQuarter(self.fiscal_year, self.fiscal_quarter)
return fiscal_quarter.prev_fiscal_quarter
@property
def next_fiscal_quarter(self) -> FiscalQuarter:
""":returns: The next fiscal quarter"""
fiscal_quarter = FiscalQuarter(self.fiscal_year, self.fiscal_quarter)
return fiscal_quarter.next_fiscal_quarter
@property
def prev_fiscal_month(self) -> FiscalMonth:
""":returns: The previous fiscal month"""
fiscal_month = FiscalMonth(self.fiscal_year, self.fiscal_month)
return fiscal_month.prev_fiscal_month
@property
def next_fiscal_month(self) -> FiscalMonth:
""":returns: The next fiscal month"""
fiscal_month = FiscalMonth(self.fiscal_year, self.fiscal_month)
return fiscal_month.next_fiscal_month
@property
def prev_fiscal_day(self) -> FiscalDay:
""":returns: The previous fiscal day"""
fiscal_day = FiscalDay(self.fiscal_year, self.fiscal_day)
return fiscal_day.prev_fiscal_day
@property
def next_fiscal_day(self) -> FiscalDay:
""":returns: The next fiscal day"""
fiscal_day = FiscalDay(self.fiscal_year, self.fiscal_day)
return fiscal_day.next_fiscal_day
| () |
723,944 | fiscalyear | _Hashable | A class to make Fiscal objects hashable | class _Hashable:
"""A class to make Fiscal objects hashable"""
def __hash__(self) -> int:
"""Unique hash of an object instance based on __slots__
:returns: a unique hash
"""
return hash(tuple(map(lambda x: getattr(self, x), self.__slots__)))
| () |
723,946 | fiscalyear | _check_day | Check if ``day`` is a valid day of month.
:param month: The month to test
:param day: The day to test
:return: The day
:raises ValueError: If ``month`` or ``day`` is out of range
| def _check_day(month: int, day: int) -> int:
"""Check if ``day`` is a valid day of month.
:param month: The month to test
:param day: The day to test
:return: The day
:raises ValueError: If ``month`` or ``day`` is out of range
"""
month = _check_month(month)
# Find the last day of the month
# Use a non-leap year
max_day = calendar.monthrange(2001, month)[1]
if 1 <= day <= max_day:
return day
else:
raise ValueError(f"day {day} is out of range")
| (month: int, day: int) -> int |
723,947 | fiscalyear | _check_fiscal_day | Check if ``day`` is a valid day of the fiscal year.
:param fiscal_year: The fiscal year to test
:param fiscal_day: The fiscal day to test
:return: The fiscal day
:raises ValueError: If ``year`` or ``day`` is out of range
| def _check_fiscal_day(fiscal_year: int, fiscal_day: int) -> int:
"""Check if ``day`` is a valid day of the fiscal year.
:param fiscal_year: The fiscal year to test
:param fiscal_day: The fiscal day to test
:return: The fiscal day
:raises ValueError: If ``year`` or ``day`` is out of range
"""
fiscal_year = _check_year(fiscal_year)
# Find the length of the year
max_day = 366 if FiscalYear(fiscal_year).isleap else 365
if 1 <= fiscal_day <= max_day:
return fiscal_day
else:
raise ValueError(f"fiscal_day {fiscal_day} is out of range")
| (fiscal_year: int, fiscal_day: int) -> int |
723,948 | fiscalyear | _check_month | Check if ``month`` is a valid month.
:param month: The month to test
:return: The month
:raises ValueError: If ``month`` is out of range
| def _check_month(month: int) -> int:
"""Check if ``month`` is a valid month.
:param month: The month to test
:return: The month
:raises ValueError: If ``month`` is out of range
"""
if 1 <= month <= 12:
return month
else:
raise ValueError(f"month {month} is out of range")
| (month: int) -> int |
723,949 | fiscalyear | _check_quarter | Check if ``quarter`` is a valid quarter.
:param quarter: The quarter to test
:return: The quarter
:raises ValueError: If ``quarter`` is out of range
| def _check_quarter(quarter: int) -> int:
"""Check if ``quarter`` is a valid quarter.
:param quarter: The quarter to test
:return: The quarter
:raises ValueError: If ``quarter`` is out of range
"""
if MIN_QUARTER <= quarter <= MAX_QUARTER:
return quarter
else:
raise ValueError(f"quarter {quarter} is out of range")
| (quarter: int) -> int |
723,950 | fiscalyear | _check_year | Check if ``year`` is a valid year.
:param year: The year to test
:return: The year
:raises ValueError: If ``year`` is out of range
| def _check_year(year: int) -> int:
"""Check if ``year`` is a valid year.
:param year: The year to test
:return: The year
:raises ValueError: If ``year`` is out of range
"""
if datetime.MINYEAR <= year <= datetime.MAXYEAR:
return year
else:
raise ValueError(f"year {year} is out of range")
| (year: int) -> int |
723,951 | fiscalyear | _validate_fiscal_calendar_params | Raise an Exception if the calendar parameters are invalid.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
| def _validate_fiscal_calendar_params(
start_year: str, start_month: int, start_day: int
) -> None:
"""Raise an Exception if the calendar parameters are invalid.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
"""
if start_year not in ["previous", "same"]:
msg = f"'start_year' must be either 'previous' or 'same', not: '{start_year}'"
raise ValueError(msg)
_check_day(start_month, start_day)
| (start_year: str, start_month: int, start_day: int) -> NoneType |
723,956 | fiscalyear | fiscal_calendar | A context manager that lets you modify the start of the fiscal calendar
inside the scope of a with-statement.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
| @property
def next_fiscal_year(self) -> "FiscalYear":
""":returns: The next fiscal year"""
return FiscalYear(self._fiscal_year + 1)
| (start_year: Optional[str] = None, start_month: Optional[int] = None, start_day: Optional[int] = None) -> Iterator[NoneType] |
723,957 | fiscalyear | setup_fiscal_calendar | Modify the start of the fiscal calendar.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
| def setup_fiscal_calendar(
start_year: Optional[str] = None,
start_month: Optional[int] = None,
start_day: Optional[int] = None,
) -> None:
"""Modify the start of the fiscal calendar.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
"""
global START_YEAR, START_MONTH, START_DAY
# If arguments are omitted, use the currently active values.
start_year = START_YEAR if start_year is None else start_year
start_month = START_MONTH if start_month is None else start_month
start_day = START_DAY if start_day is None else start_day
_validate_fiscal_calendar_params(start_year, start_month, start_day)
START_YEAR = start_year
START_MONTH = start_month
START_DAY = start_day
| (start_year: Optional[str] = None, start_month: Optional[int] = None, start_day: Optional[int] = None) -> NoneType |
723,958 | displayfx | DisplayFx | null | class DisplayFx:
def __init__(self, p_max_val, p_verbose=False, p_msg='', p_bar_len=50):
self.bar_end_pos = 0
self.bar_start_pos = 0
self.bar_len = p_bar_len
self.calibrate = 0
self.leader_str = ''
self.marker_len = 0
self.markers = []
self.marker_slice = 0
self.max_val = p_max_val
self.progress = 0
self.silent = p_verbose
self.msg = p_msg
self.bar_len = max(self.bar_len, 20)
self.leader_str = '{:.<{leaderLen}}'.format('', leaderLen=self.bar_len)
if self.max_val <= 1:
self.markers = ['100%']
elif self.max_val == 2:
self.markers = ['50%', '100%']
elif self.max_val == 3:
self.markers = ['33%', '67%', '100%']
elif self.max_val == 4:
self.markers = ['25%', '50%', '75%', '100%']
else:
self.markers = ['20%', '40%', '60%', '80%', '100%']
self.marker_qty = len(self.markers)
self.marker_slice = self.bar_len / self.marker_qty
# self.remInc = ( self.bar_len / self.marker_qty ) - self.marker_slice
for i in range(self.marker_qty):
self.marker_len = len(self.markers[i])
self.bar_end_pos = round(self.marker_slice * (i + 1))
self.leader_str = (
self.leader_str[: self.bar_end_pos - self.marker_len]
+ self.markers[i]
+ self.leader_str[self.bar_end_pos :]
)
if self.max_val >= self.bar_len:
self.marker_slice = self.bar_len / self.max_val
if not self.silent:
print(f'{self.msg}', end='', flush=True)
if self.max_val == 0:
print(self.leader_str)
# end __init__
def update(self, p_i):
if not self.silent:
# self.barCurrPos = 0
if p_i == 0:
self.calibrate = 1
self.progress = (p_i + self.calibrate) / self.max_val
self.bar_end_pos = round(self.progress * self.bar_len)
if self.bar_end_pos > self.bar_start_pos:
print(
self.leader_str[self.bar_start_pos : self.bar_end_pos],
end='',
flush=True,
)
self.bar_start_pos = self.bar_end_pos
# self.marker_slice += self.marker_slice
if p_i + self.calibrate == self.max_val:
print()
| (p_max_val, p_verbose=False, p_msg='', p_bar_len=50) |
723,959 | displayfx | __init__ | null | def __init__(self, p_max_val, p_verbose=False, p_msg='', p_bar_len=50):
self.bar_end_pos = 0
self.bar_start_pos = 0
self.bar_len = p_bar_len
self.calibrate = 0
self.leader_str = ''
self.marker_len = 0
self.markers = []
self.marker_slice = 0
self.max_val = p_max_val
self.progress = 0
self.silent = p_verbose
self.msg = p_msg
self.bar_len = max(self.bar_len, 20)
self.leader_str = '{:.<{leaderLen}}'.format('', leaderLen=self.bar_len)
if self.max_val <= 1:
self.markers = ['100%']
elif self.max_val == 2:
self.markers = ['50%', '100%']
elif self.max_val == 3:
self.markers = ['33%', '67%', '100%']
elif self.max_val == 4:
self.markers = ['25%', '50%', '75%', '100%']
else:
self.markers = ['20%', '40%', '60%', '80%', '100%']
self.marker_qty = len(self.markers)
self.marker_slice = self.bar_len / self.marker_qty
# self.remInc = ( self.bar_len / self.marker_qty ) - self.marker_slice
for i in range(self.marker_qty):
self.marker_len = len(self.markers[i])
self.bar_end_pos = round(self.marker_slice * (i + 1))
self.leader_str = (
self.leader_str[: self.bar_end_pos - self.marker_len]
+ self.markers[i]
+ self.leader_str[self.bar_end_pos :]
)
if self.max_val >= self.bar_len:
self.marker_slice = self.bar_len / self.max_val
if not self.silent:
print(f'{self.msg}', end='', flush=True)
if self.max_val == 0:
print(self.leader_str)
| (self, p_max_val, p_verbose=False, p_msg='', p_bar_len=50) |
723,960 | displayfx | update | null | def update(self, p_i):
if not self.silent:
# self.barCurrPos = 0
if p_i == 0:
self.calibrate = 1
self.progress = (p_i + self.calibrate) / self.max_val
self.bar_end_pos = round(self.progress * self.bar_len)
if self.bar_end_pos > self.bar_start_pos:
print(
self.leader_str[self.bar_start_pos : self.bar_end_pos],
end='',
flush=True,
)
self.bar_start_pos = self.bar_end_pos
# self.marker_slice += self.marker_slice
if p_i + self.calibrate == self.max_val:
print()
| (self, p_i) |
723,961 | pytest_xvfb | Xvfb | null | class Xvfb:
def __init__(self, config: pytest.Config) -> None:
self.width = int(config.getini("xvfb_width"))
self.height = int(config.getini("xvfb_height"))
self.colordepth = int(config.getini("xvfb_colordepth"))
self.args = config.getini("xvfb_args") or []
self.xauth = config.getini("xvfb_xauth")
self.backend = config.getoption("--xvfb-backend")
self.display = None
self._virtual_display = None
def start(self) -> None:
self._virtual_display = pyvirtualdisplay.Display( # type: ignore[attr-defined]
backend=self.backend,
size=(self.width, self.height),
color_depth=self.colordepth,
use_xauth=self.xauth,
extra_args=self.args,
)
assert self._virtual_display is not None # mypy
self._virtual_display.start()
self.display = self._virtual_display.display
assert self._virtual_display.is_alive()
def stop(self) -> None:
if self.display is not None: # starting worked
self._virtual_display.stop()
| (config: 'pytest.Config') -> 'None' |
723,962 | pytest_xvfb | __init__ | null | def __init__(self, config: pytest.Config) -> None:
self.width = int(config.getini("xvfb_width"))
self.height = int(config.getini("xvfb_height"))
self.colordepth = int(config.getini("xvfb_colordepth"))
self.args = config.getini("xvfb_args") or []
self.xauth = config.getini("xvfb_xauth")
self.backend = config.getoption("--xvfb-backend")
self.display = None
self._virtual_display = None
| (self, config: _pytest.config.Config) -> NoneType |
723,963 | pytest_xvfb | start | null | def start(self) -> None:
self._virtual_display = pyvirtualdisplay.Display( # type: ignore[attr-defined]
backend=self.backend,
size=(self.width, self.height),
color_depth=self.colordepth,
use_xauth=self.xauth,
extra_args=self.args,
)
assert self._virtual_display is not None # mypy
self._virtual_display.start()
self.display = self._virtual_display.display
assert self._virtual_display.is_alive()
| (self) -> NoneType |
723,964 | pytest_xvfb | stop | null | def stop(self) -> None:
if self.display is not None: # starting worked
self._virtual_display.stop()
| (self) -> NoneType |
723,965 | pytest_xvfb | XvfbExitedError | null | class XvfbExitedError(Exception):
pass
| null |
723,967 | pytest_xvfb | has_executable | null | def has_executable(name: str) -> bool:
# http://stackoverflow.com/a/28909933/2085149
return any(
os.access(os.path.join(path, name), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
| (name: str) -> bool |
723,968 | pytest_xvfb | is_xdist_master | null | def is_xdist_master(config: pytest.Config) -> bool:
return config.getoption("dist", "no") != "no" and not os.environ.get(
"PYTEST_XDIST_WORKER"
)
| (config: _pytest.config.Config) -> bool |
723,971 | pytest_xvfb | pytest_addoption | null | def pytest_addoption(parser: pytest.Parser) -> None:
group = parser.getgroup("xvfb")
group.addoption("--no-xvfb", action="store_true", help="Disable Xvfb for tests.")
group.addoption(
"--xvfb-backend",
action="store",
choices=["xvfb", "xvnc", "xephyr"],
help="Use Xephyr or Xvnc instead of Xvfb for tests. Will be ignored if --no-xvfb is given.",
)
parser.addini("xvfb_width", "Width of the Xvfb display", default="800")
parser.addini("xvfb_height", "Height of the Xvfb display", default="600")
parser.addini("xvfb_colordepth", "Color depth of the Xvfb display", default="16")
parser.addini("xvfb_args", "Additional arguments for Xvfb", type="args")
parser.addini(
"xvfb_xauth",
"Generate an Xauthority token for Xvfb. Needs xauth.",
default=False,
type="bool",
)
| (parser: _pytest.config.argparsing.Parser) -> NoneType |
723,972 | pytest_xvfb | pytest_collection_modifyitems | null | def pytest_collection_modifyitems(items: list[pytest.Item]) -> None:
for item in items:
if item.get_closest_marker("no_xvfb") and xvfb_instance is not None:
skipif_marker = pytest.mark.skipif(True, reason="Skipped with Xvfb")
item.add_marker(skipif_marker)
| (items: list[_pytest.nodes.Item]) -> NoneType |
723,973 | pytest_xvfb | pytest_configure | null | def pytest_configure(config: pytest.Config) -> None:
global xvfb_instance
no_xvfb = config.getoption("--no-xvfb") or is_xdist_master(config)
backend = config.getoption("--xvfb-backend")
if no_xvfb:
pass
elif backend is None and not has_executable("Xvfb"):
# soft fail
if sys.platform.startswith("linux") and "DISPLAY" in os.environ:
print(
"pytest-xvfb could not find Xvfb. "
"You can install it to prevent windows from being shown."
)
elif (
backend == "xvfb"
and not has_executable("Xvfb")
or backend == "xvnc"
and not has_executable("Xvnc")
or backend == "xephyr"
and not has_executable("Xephyr")
):
raise pytest.UsageError(f"xvfb backend {backend} requested but not installed.")
else:
xvfb_instance = Xvfb(config)
xvfb_instance.start()
config.addinivalue_line("markers", "no_xvfb: Skip test when using Xvfb")
| (config: _pytest.config.Config) -> NoneType |
723,975 | pytest_xvfb | shutdown_xvfb | null | def shutdown_xvfb() -> None:
if xvfb_instance is not None:
xvfb_instance.stop()
| () -> NoneType |
723,978 | coolname.exceptions | InitializationError | Custom exception for all generator initialization errors. | class InitializationError(Exception):
"""Custom exception for all generator initialization errors."""
pass
| null |
723,979 | coolname.impl | RandomGenerator |
This class provides random name generation interface.
Create an instance of this class if you want to create custom
configuration.
If default implementation is enough, just use `generate`,
`generate_slug` and other exported functions.
| class RandomGenerator:
"""
This class provides random name generation interface.
Create an instance of this class if you want to create custom
configuration.
If default implementation is enough, just use `generate`,
`generate_slug` and other exported functions.
"""
def __init__(self, config, rand=None):
self.random = rand # sets _random and _randrange
config = dict(config)
_validate_config(config)
lists = {}
_create_lists(config, lists, 'all', [])
self._lists = {}
for key, listdef in config.items():
# Other generators independent from 'all'
if listdef.get(_CONF.FIELD.GENERATOR) and key not in lists:
_create_lists(config, lists, key, [])
if key == 'all' or key.isdigit() or listdef.get(_CONF.FIELD.GENERATOR):
if key.isdigit():
pattern = int(key)
elif key == 'all':
pattern = None
else:
pattern = key
self._lists[pattern] = lists[key]
self._lists[None] = self._lists[None].squash(True, {})
# Should we avoid duplicates?
try:
ensure_unique = config['all'][_CONF.FIELD.ENSURE_UNIQUE]
if not isinstance(ensure_unique, bool):
raise ValueError(f'expected boolean, got {ensure_unique!r}')
self._ensure_unique = ensure_unique
except KeyError:
self._ensure_unique = False
except ValueError as ex:
raise ConfigurationError(f'Invalid {_CONF.FIELD.ENSURE_UNIQUE} value: {ex}')
# Should we avoid duplicating prefixes?
try:
self._check_prefix = int(config['all'][_CONF.FIELD.ENSURE_UNIQUE_PREFIX])
if self._check_prefix <= 0:
raise ValueError(f'expected a positive integer, got {self._check_prefix!r}')
except KeyError:
self._check_prefix = None
except ValueError as ex:
raise ConfigurationError(f'Invalid {_CONF.FIELD.ENSURE_UNIQUE_PREFIX} value: {ex}')
# Get max slug length
try:
self._max_slug_length = int(config['all'][_CONF.FIELD.MAX_SLUG_LENGTH])
except KeyError:
self._max_slug_length = None
except ValueError as ex:
raise ConfigurationError(f'Invalid {_CONF.FIELD.MAX_SLUG_LENGTH} value: {ex}')
# Make sure that generate() does not go into long loop.
# Default generator is a special case, we don't need check.
if (not config['all'].get('__nocheck') and
self._ensure_unique or self._check_prefix or self._max_slug_length):
self._check_not_hanging()
# Fire it up
assert self.generate_slug()
@property
def random(self):
return self._random
@random.setter
def random(self, rand):
if rand:
self._random = rand
else:
self._random = random
self._randrange = self._random.randrange
def generate(self, pattern: Union[None, str, int] = None) -> List[str]:
"""
Generates and returns random name as a list of strings.
"""
lst = self._lists[pattern]
while True:
result = lst[self._randrange(lst.length)]
# 1. Check that there are no duplicates
# 2. Check that there are no duplicate prefixes
# 3. Check max slug length
n = len(result)
if (self._ensure_unique and len(set(result)) != n or
self._check_prefix and len(set(x[:self._check_prefix] for x in result)) != n or
self._max_slug_length and sum(len(x) for x in result) + n - 1 > self._max_slug_length):
continue
return result
def generate_slug(self, pattern: Union[None, str, int] = None) -> str:
"""
Generates and returns random name as a slug.
"""
return '-'.join(self.generate(pattern))
def get_combinations_count(self, pattern: Union[None, str, int] = None) -> int:
"""
Returns total number of unique combinations
for the given pattern.
"""
lst = self._lists[pattern]
return lst.length
def _dump(self, stream, pattern=None, object_ids=False):
"""Dumps current tree into a text stream."""
return self._lists[pattern]._dump(stream, '', object_ids=object_ids) # noqa
def _check_not_hanging(self):
"""
Rough check that generate() will not hang or be very slow.
Raises ConfigurationError if generate() spends too much time in retry loop.
Issues a warning.warn() if there is a risk of slowdown.
"""
# (field_name, predicate, warning_msg, exception_msg)
# predicate(g) is a function that returns True if generated combination g must be rejected,
# see checks in generate()
checks = []
# ensure_unique can lead to infinite loops for some tiny erroneous configs
if self._ensure_unique:
checks.append((
_CONF.FIELD.ENSURE_UNIQUE,
self._ensure_unique,
lambda g: len(set(g)) != len(g),
'{generate} may be slow because a significant fraction of combinations contain repeating words and {field_name} is set', # noqa
'Impossible to generate with {field_name}'
))
#
# max_slug_length can easily slow down or block generation if set too small
if self._max_slug_length:
checks.append((
_CONF.FIELD.MAX_SLUG_LENGTH,
self._max_slug_length,
lambda g: sum(len(x) for x in g) + len(g) - 1 > self._max_slug_length,
'{generate} may be slow because a significant fraction of combinations exceed {field_name}={field_value}', # noqa
'Impossible to generate with {field_name}={field_value}'
))
# Perform the relevant checks for all generators, starting from 'all'
n = 100
warning_threshold = 20 # fail probability: 0.04 for 2 attempts, 0.008 for 3 attempts, etc.
for lst_id, lst in sorted(self._lists.items(), key=lambda x: '' if x is None else str(x)):
context = {'generate': 'coolname.generate({})'.format('' if lst_id is None else repr(lst_id))}
# For each generator, perform checks
for field_name, field_value, predicate, warning_msg, exception_msg in checks:
context.update({'field_name': field_name, 'field_value': field_value})
bad_count = 0
for _ in range(n):
if predicate(lst[randrange(lst.length)]):
bad_count += 1
if bad_count >= n:
raise ConfigurationError(exception_msg.format(**context))
elif bad_count >= warning_threshold:
import warnings
warnings.warn(warning_msg.format(**context))
| (config, rand=None) |
723,980 | coolname.impl | __init__ | null | def __init__(self, config, rand=None):
self.random = rand # sets _random and _randrange
config = dict(config)
_validate_config(config)
lists = {}
_create_lists(config, lists, 'all', [])
self._lists = {}
for key, listdef in config.items():
# Other generators independent from 'all'
if listdef.get(_CONF.FIELD.GENERATOR) and key not in lists:
_create_lists(config, lists, key, [])
if key == 'all' or key.isdigit() or listdef.get(_CONF.FIELD.GENERATOR):
if key.isdigit():
pattern = int(key)
elif key == 'all':
pattern = None
else:
pattern = key
self._lists[pattern] = lists[key]
self._lists[None] = self._lists[None].squash(True, {})
# Should we avoid duplicates?
try:
ensure_unique = config['all'][_CONF.FIELD.ENSURE_UNIQUE]
if not isinstance(ensure_unique, bool):
raise ValueError(f'expected boolean, got {ensure_unique!r}')
self._ensure_unique = ensure_unique
except KeyError:
self._ensure_unique = False
except ValueError as ex:
raise ConfigurationError(f'Invalid {_CONF.FIELD.ENSURE_UNIQUE} value: {ex}')
# Should we avoid duplicating prefixes?
try:
self._check_prefix = int(config['all'][_CONF.FIELD.ENSURE_UNIQUE_PREFIX])
if self._check_prefix <= 0:
raise ValueError(f'expected a positive integer, got {self._check_prefix!r}')
except KeyError:
self._check_prefix = None
except ValueError as ex:
raise ConfigurationError(f'Invalid {_CONF.FIELD.ENSURE_UNIQUE_PREFIX} value: {ex}')
# Get max slug length
try:
self._max_slug_length = int(config['all'][_CONF.FIELD.MAX_SLUG_LENGTH])
except KeyError:
self._max_slug_length = None
except ValueError as ex:
raise ConfigurationError(f'Invalid {_CONF.FIELD.MAX_SLUG_LENGTH} value: {ex}')
# Make sure that generate() does not go into long loop.
# Default generator is a special case, we don't need check.
if (not config['all'].get('__nocheck') and
self._ensure_unique or self._check_prefix or self._max_slug_length):
self._check_not_hanging()
# Fire it up
assert self.generate_slug()
| (self, config, rand=None) |
723,981 | coolname.impl | _check_not_hanging |
Rough check that generate() will not hang or be very slow.
Raises ConfigurationError if generate() spends too much time in retry loop.
Issues a warning.warn() if there is a risk of slowdown.
| def _check_not_hanging(self):
"""
Rough check that generate() will not hang or be very slow.
Raises ConfigurationError if generate() spends too much time in retry loop.
Issues a warning.warn() if there is a risk of slowdown.
"""
# (field_name, predicate, warning_msg, exception_msg)
# predicate(g) is a function that returns True if generated combination g must be rejected,
# see checks in generate()
checks = []
# ensure_unique can lead to infinite loops for some tiny erroneous configs
if self._ensure_unique:
checks.append((
_CONF.FIELD.ENSURE_UNIQUE,
self._ensure_unique,
lambda g: len(set(g)) != len(g),
'{generate} may be slow because a significant fraction of combinations contain repeating words and {field_name} is set', # noqa
'Impossible to generate with {field_name}'
))
#
# max_slug_length can easily slow down or block generation if set too small
if self._max_slug_length:
checks.append((
_CONF.FIELD.MAX_SLUG_LENGTH,
self._max_slug_length,
lambda g: sum(len(x) for x in g) + len(g) - 1 > self._max_slug_length,
'{generate} may be slow because a significant fraction of combinations exceed {field_name}={field_value}', # noqa
'Impossible to generate with {field_name}={field_value}'
))
# Perform the relevant checks for all generators, starting from 'all'
n = 100
warning_threshold = 20 # fail probability: 0.04 for 2 attempts, 0.008 for 3 attempts, etc.
for lst_id, lst in sorted(self._lists.items(), key=lambda x: '' if x is None else str(x)):
context = {'generate': 'coolname.generate({})'.format('' if lst_id is None else repr(lst_id))}
# For each generator, perform checks
for field_name, field_value, predicate, warning_msg, exception_msg in checks:
context.update({'field_name': field_name, 'field_value': field_value})
bad_count = 0
for _ in range(n):
if predicate(lst[randrange(lst.length)]):
bad_count += 1
if bad_count >= n:
raise ConfigurationError(exception_msg.format(**context))
elif bad_count >= warning_threshold:
import warnings
warnings.warn(warning_msg.format(**context))
| (self) |
723,982 | coolname.impl | _dump | Dumps current tree into a text stream. | def _dump(self, stream, pattern=None, object_ids=False):
"""Dumps current tree into a text stream."""
return self._lists[pattern]._dump(stream, '', object_ids=object_ids) # noqa
| (self, stream, pattern=None, object_ids=False) |
723,983 | coolname.impl | generate |
Generates and returns random name as a list of strings.
| def generate(self, pattern: Union[None, str, int] = None) -> List[str]:
"""
Generates and returns random name as a list of strings.
"""
lst = self._lists[pattern]
while True:
result = lst[self._randrange(lst.length)]
# 1. Check that there are no duplicates
# 2. Check that there are no duplicate prefixes
# 3. Check max slug length
n = len(result)
if (self._ensure_unique and len(set(result)) != n or
self._check_prefix and len(set(x[:self._check_prefix] for x in result)) != n or
self._max_slug_length and sum(len(x) for x in result) + n - 1 > self._max_slug_length):
continue
return result
| (self, pattern: Union[NoneType, str, int] = None) -> List[str] |
723,984 | coolname.impl | generate_slug |
Generates and returns random name as a slug.
| def generate_slug(self, pattern: Union[None, str, int] = None) -> str:
"""
Generates and returns random name as a slug.
"""
return '-'.join(self.generate(pattern))
| (self, pattern: Union[NoneType, str, int] = None) -> str |
723,985 | coolname.impl | get_combinations_count |
Returns total number of unique combinations
for the given pattern.
| def get_combinations_count(self, pattern: Union[None, str, int] = None) -> int:
"""
Returns total number of unique combinations
for the given pattern.
"""
lst = self._lists[pattern]
return lst.length
| (self, pattern: Union[NoneType, str, int] = None) -> int |
723,990 | coolname.impl | replace_random | Replaces random number generator for the default RandomGenerator instance. | def replace_random(rand):
"""Replaces random number generator for the default RandomGenerator instance."""
_default.random = rand
| (rand) |
723,991 | pdfrw.objects.pdfdict | IndirectPdfDict | IndirectPdfDict is a convenience class. You could
create a direct PdfDict and then set indirect = True on it,
or you could just create an IndirectPdfDict.
| class IndirectPdfDict(PdfDict):
''' IndirectPdfDict is a convenience class. You could
create a direct PdfDict and then set indirect = True on it,
or you could just create an IndirectPdfDict.
'''
indirect = True
| (*args, **kw) |
723,992 | pdfrw.objects.pdfdict | __getattr__ | If the attribute doesn't exist on the dictionary object,
try to slap a '/' in front of it and get it out
of the actual dictionary itself.
| def __getattr__(self, name, PdfName=PdfName):
''' If the attribute doesn't exist on the dictionary object,
try to slap a '/' in front of it and get it out
of the actual dictionary itself.
'''
return self.get(PdfName(name))
| (self, name, PdfName=<pdfrw.objects.pdfname.PdfName object at 0x7ff9a47dfeb0>) |
723,994 | pdfrw.objects.pdfdict | __init__ | null | def __init__(self, *args, **kw):
if args:
if len(args) == 1:
args = args[0]
self.update(args)
if isinstance(args, PdfDict):
self.indirect = args.indirect
self._stream = args.stream
for key, value in iteritems(kw):
setattr(self, key, value)
| (self, *args, **kw) |
723,995 | pdfrw.objects.pdfdict | __iter__ | null | def __iter__(self):
for key, value in self.iteritems():
yield key
| (self) |
723,996 | pdfrw.objects.pdfdict | __setattr__ | Set an attribute on the dictionary. Handle the keywords
indirect, stream, and _stream specially (for content objects)
| def __setattr__(self, name, value, special=_special.get,
PdfName=PdfName, vars=vars):
''' Set an attribute on the dictionary. Handle the keywords
indirect, stream, and _stream specially (for content objects)
'''
info = special(name)
if info is None:
self[PdfName(name)] = value
else:
name, setlen = info
vars(self)[name] = value
if setlen:
notnone = value is not None
self.Length = notnone and PdfObject(len(value)) or None
| (self, name, value, special=<built-in method get of dict object at 0x7ff9a47ffa40>, PdfName=<pdfrw.objects.pdfname.PdfName object at 0x7ff9a47dfeb0>, vars=<built-in function vars>) |
723,997 | pdfrw.objects.pdfdict | __setitem__ | null | def __setitem__(self, name, value, setter=dict.__setitem__,
BasePdfName=BasePdfName, isinstance=isinstance):
if not isinstance(name, BasePdfName):
raise PdfParseError('Dict key %s is not a PdfName' % repr(name))
if value is not None:
setter(self, name, value)
elif name in self:
del self[name]
| (self, name, value, setter=<slot wrapper '__setitem__' of 'dict' objects>, BasePdfName=<class 'pdfrw.objects.pdfname.BasePdfName'>, isinstance=<built-in function isinstance>) |
723,999 | pdfrw.objects.pdfdict | get | Get a value out of the dictionary,
after resolving any indirect objects.
| def get(self, key, dictget=dict.get, isinstance=isinstance,
PdfIndirect=PdfIndirect):
''' Get a value out of the dictionary,
after resolving any indirect objects.
'''
value = dictget(self, key)
if isinstance(value, PdfIndirect):
# We used to use self[key] here, but that does an
# unwanted check on the type of the key (github issue #98).
# Python will keep the old key object in the dictionary,
# so that check is not necessary.
value = value.real_value()
if value is not None:
dict.__setitem__(self, key, value)
else:
del self[name]
return value
| (self, key, dictget=<method 'get' of 'dict' objects>, isinstance=<built-in function isinstance>, PdfIndirect=<class 'pdfrw.objects.pdfindirect.PdfIndirect'>) |
724,001 | pdfrw.objects.pdfdict | iteritems | Iterate over the dictionary, resolving any unresolved objects
| def iteritems(self, dictiter=iteritems,
isinstance=isinstance, PdfIndirect=PdfIndirect,
BasePdfName=BasePdfName):
''' Iterate over the dictionary, resolving any unresolved objects
'''
for key, value in list(dictiter(self)):
if isinstance(value, PdfIndirect):
self[key] = value = value.real_value()
if value is not None:
if not isinstance(key, BasePdfName):
raise PdfParseError('Dict key %s is not a PdfName' %
repr(key))
yield key, value
| (self, dictiter=<method 'items' of 'dict' objects>, isinstance=<built-in function isinstance>, PdfIndirect=<class 'pdfrw.objects.pdfindirect.PdfIndirect'>, BasePdfName=<class 'pdfrw.objects.pdfname.BasePdfName'>) |
724,002 | pdfrw.objects.pdfdict | iterkeys | null | def iterkeys(self):
return iter(self)
| (self) |
724,003 | pdfrw.objects.pdfdict | itervalues | null | def itervalues(self):
for key, value in self.iteritems():
yield value
| (self) |
724,004 | pdfrw.objects.pdfdict | keys | null | def keys(self):
return list((key for key, value in self.iteritems()))
| (self) |
724,005 | pdfrw.objects.pdfdict | pop | null | def pop(self, key):
value = self.get(key)
del self[key]
return value
| (self, key) |
724,006 | pdfrw.objects.pdfdict | popitem | null | def popitem(self):
key, value = dict.pop(self)
if isinstance(value, PdfIndirect):
value = value.real_value()
return value
| (self) |
724,007 | pdfrw.objects.pdfdict | values | null | def values(self):
return list((value for key, value in self.iteritems()))
| (self) |
724,008 | pdfrw.pagemerge | PageMerge | A PageMerge object can have 0 or 1 underlying pages
(that get edited with the results of the merge)
and 0-n RectXObjs that can be applied before or
after the underlying page.
| class PageMerge(list):
''' A PageMerge object can have 0 or 1 underlying pages
(that get edited with the results of the merge)
and 0-n RectXObjs that can be applied before or
after the underlying page.
'''
page = None
mbox = None
cbox = None
resources = None
rotate = None
contents = None
def __init__(self, page=None):
if page is not None:
self.setpage(page)
def setpage(self, page):
if page.Type != PdfName.Page:
raise TypeError("Expected page")
self.append(None) # Placeholder
self.page = page
inheritable = page.inheritable
self.mbox = inheritable.MediaBox
self.cbox = inheritable.CropBox
self.resources = inheritable.Resources
self.rotate = inheritable.Rotate
self.contents = page.Contents
def __add__(self, other):
if isinstance(other, dict):
other = [other]
for other in other:
self.add(other)
return self
def add(self, obj, prepend=False, **kw):
if kw:
obj = RectXObj(obj, **kw)
elif obj.Type == PdfName.Page:
obj = RectXObj(obj)
if prepend:
self.insert(0, obj)
else:
self.append(obj)
return self
def render(self):
def do_xobjs(xobj_list, restore_first=False):
content = ['Q'] if restore_first else []
for obj in xobj_list:
index = PdfName('pdfrw_%d' % (key_offset + len(xobjs)))
if xobjs.setdefault(index, obj) is not obj:
raise KeyError("XObj key %s already in use" % index)
content.append('%s Do' % index)
return PdfDict(indirect=True, stream='\n'.join(content))
mbox = self.mbox
cbox = self.cbox
page = self.page
old_contents = self.contents
resources = self.resources or PdfDict()
key_offset = 0
xobjs = resources.XObject
if xobjs is None:
xobjs = resources.XObject = PdfDict()
else:
allkeys = xobjs.keys()
if allkeys:
keys = (x for x in allkeys if x.startswith('/pdfrw_'))
keys = (x for x in keys if x[7:].isdigit())
keys = sorted(keys, key=lambda x: int(x[7:]))
key_offset = (int(keys[-1][7:]) + 1) if keys else 0
key_offset -= len(allkeys)
if old_contents is None:
new_contents = do_xobjs(self)
else:
isdict = isinstance(old_contents, PdfDict)
old_contents = [old_contents] if isdict else old_contents
new_contents = PdfArray()
index = self.index(None)
if index:
new_contents.append(do_xobjs(self[:index]))
index += 1
if index < len(self):
# There are elements to add after the original page contents,
# so push the graphics state to the stack. Restored below.
new_contents.append(PdfDict(indirect=True, stream='q'))
new_contents.extend(old_contents)
if index < len(self):
# Restore graphics state and add other elements.
new_contents.append(do_xobjs(self[index:], restore_first=True))
if mbox is None:
cbox = None
mbox = self.xobj_box
mbox[0] = min(0, mbox[0])
mbox[1] = min(0, mbox[1])
page = PdfDict(indirect=True) if page is None else page
page.Type = PdfName.Page
page.Resources = resources
page.MediaBox = mbox
page.CropBox = cbox
page.Rotate = self.rotate
page.Contents = new_contents
return page
@property
def xobj_box(self):
''' Return the smallest box that encloses every object
in the list.
'''
a, b, c, d = zip(*(xobj.box for xobj in self))
return PdfArray((min(a), min(b), max(c), max(d)))
| (page=None) |
724,009 | pdfrw.pagemerge | __add__ | null | def __add__(self, other):
if isinstance(other, dict):
other = [other]
for other in other:
self.add(other)
return self
| (self, other) |
724,010 | pdfrw.pagemerge | __init__ | null | def __init__(self, page=None):
if page is not None:
self.setpage(page)
| (self, page=None) |
724,011 | pdfrw.pagemerge | add | null | def add(self, obj, prepend=False, **kw):
if kw:
obj = RectXObj(obj, **kw)
elif obj.Type == PdfName.Page:
obj = RectXObj(obj)
if prepend:
self.insert(0, obj)
else:
self.append(obj)
return self
| (self, obj, prepend=False, **kw) |
724,012 | pdfrw.pagemerge | render | null | def render(self):
def do_xobjs(xobj_list, restore_first=False):
content = ['Q'] if restore_first else []
for obj in xobj_list:
index = PdfName('pdfrw_%d' % (key_offset + len(xobjs)))
if xobjs.setdefault(index, obj) is not obj:
raise KeyError("XObj key %s already in use" % index)
content.append('%s Do' % index)
return PdfDict(indirect=True, stream='\n'.join(content))
mbox = self.mbox
cbox = self.cbox
page = self.page
old_contents = self.contents
resources = self.resources or PdfDict()
key_offset = 0
xobjs = resources.XObject
if xobjs is None:
xobjs = resources.XObject = PdfDict()
else:
allkeys = xobjs.keys()
if allkeys:
keys = (x for x in allkeys if x.startswith('/pdfrw_'))
keys = (x for x in keys if x[7:].isdigit())
keys = sorted(keys, key=lambda x: int(x[7:]))
key_offset = (int(keys[-1][7:]) + 1) if keys else 0
key_offset -= len(allkeys)
if old_contents is None:
new_contents = do_xobjs(self)
else:
isdict = isinstance(old_contents, PdfDict)
old_contents = [old_contents] if isdict else old_contents
new_contents = PdfArray()
index = self.index(None)
if index:
new_contents.append(do_xobjs(self[:index]))
index += 1
if index < len(self):
# There are elements to add after the original page contents,
# so push the graphics state to the stack. Restored below.
new_contents.append(PdfDict(indirect=True, stream='q'))
new_contents.extend(old_contents)
if index < len(self):
# Restore graphics state and add other elements.
new_contents.append(do_xobjs(self[index:], restore_first=True))
if mbox is None:
cbox = None
mbox = self.xobj_box
mbox[0] = min(0, mbox[0])
mbox[1] = min(0, mbox[1])
page = PdfDict(indirect=True) if page is None else page
page.Type = PdfName.Page
page.Resources = resources
page.MediaBox = mbox
page.CropBox = cbox
page.Rotate = self.rotate
page.Contents = new_contents
return page
| (self) |
724,013 | pdfrw.pagemerge | setpage | null | def setpage(self, page):
if page.Type != PdfName.Page:
raise TypeError("Expected page")
self.append(None) # Placeholder
self.page = page
inheritable = page.inheritable
self.mbox = inheritable.MediaBox
self.cbox = inheritable.CropBox
self.resources = inheritable.Resources
self.rotate = inheritable.Rotate
self.contents = page.Contents
| (self, page) |
724,014 | pdfrw.objects.pdfarray | PdfArray | A PdfArray maps the PDF file array object into a Python list.
It has an indirect attribute which defaults to False.
| class PdfArray(list):
''' A PdfArray maps the PDF file array object into a Python list.
It has an indirect attribute which defaults to False.
'''
indirect = False
def __init__(self, source=[]):
self._resolve = self._resolver
self.extend(source)
def _resolver(self, isinstance=isinstance, enumerate=enumerate,
listiter=list.__iter__, PdfIndirect=PdfIndirect,
resolved=_resolved, PdfNull=PdfObject('null')):
for index, value in enumerate(list.__iter__(self)):
if isinstance(value, PdfIndirect):
value = value.real_value()
if value is None:
value = PdfNull
self[index] = value
self._resolve = resolved
def __getitem__(self, index, listget=list.__getitem__):
self._resolve()
return listget(self, index)
try:
def __getslice__(self, i, j, listget=list.__getslice__):
self._resolve()
return listget(self, i, j)
except AttributeError:
pass
def __iter__(self, listiter=list.__iter__):
self._resolve()
return listiter(self)
def count(self, item):
self._resolve()
return list.count(self, item)
def index(self, item):
self._resolve()
return list.index(self, item)
def remove(self, item):
self._resolve()
return list.remove(self, item)
def sort(self, *args, **kw):
self._resolve()
return list.sort(self, *args, **kw)
def pop(self, *args):
self._resolve()
return list.pop(self, *args)
def __reversed__(self):
self._resolve()
return list.__reversed__(self)
| (source=[]) |
724,015 | pdfrw.objects.pdfarray | __getitem__ | null | def __getitem__(self, index, listget=list.__getitem__):
self._resolve()
return listget(self, index)
| (self, index, listget=<method '__getitem__' of 'list' objects>) |
724,016 | pdfrw.objects.pdfarray | __init__ | null | def __init__(self, source=[]):
self._resolve = self._resolver
self.extend(source)
| (self, source=[]) |
724,017 | pdfrw.objects.pdfarray | __iter__ | null | def __iter__(self, listiter=list.__iter__):
self._resolve()
return listiter(self)
| (self, listiter=<slot wrapper '__iter__' of 'list' objects>) |
724,018 | pdfrw.objects.pdfarray | __reversed__ | null | def __reversed__(self):
self._resolve()
return list.__reversed__(self)
| (self) |
724,019 | pdfrw.objects.pdfarray | _resolver | null | def _resolver(self, isinstance=isinstance, enumerate=enumerate,
listiter=list.__iter__, PdfIndirect=PdfIndirect,
resolved=_resolved, PdfNull=PdfObject('null')):
for index, value in enumerate(list.__iter__(self)):
if isinstance(value, PdfIndirect):
value = value.real_value()
if value is None:
value = PdfNull
self[index] = value
self._resolve = resolved
| (self, isinstance=<built-in function isinstance>, enumerate=<class 'enumerate'>, listiter=<slot wrapper '__iter__' of 'list' objects>, PdfIndirect=<class 'pdfrw.objects.pdfindirect.PdfIndirect'>, resolved=<function _resolved at 0x7ff9a4801bd0>, PdfNull='null') |
724,020 | pdfrw.objects.pdfarray | count | null | def count(self, item):
self._resolve()
return list.count(self, item)
| (self, item) |
724,021 | pdfrw.objects.pdfarray | index | null | def index(self, item):
self._resolve()
return list.index(self, item)
| (self, item) |
724,022 | pdfrw.objects.pdfarray | pop | null | def pop(self, *args):
self._resolve()
return list.pop(self, *args)
| (self, *args) |
724,023 | pdfrw.objects.pdfarray | remove | null | def remove(self, item):
self._resolve()
return list.remove(self, item)
| (self, item) |
724,024 | pdfrw.objects.pdfarray | sort | null | def sort(self, *args, **kw):
self._resolve()
return list.sort(self, *args, **kw)
| (self, *args, **kw) |
724,025 | pdfrw.objects.pdfdict | PdfDict | PdfDict objects are subclassed dictionaries
with the following features:
- Every key in the dictionary starts with "/"
- A dictionary item can be deleted by assigning it to None
- Keys that (after the initial "/") conform to Python
naming conventions can also be accessed (set and retrieved)
as attributes of the dictionary. E.g. mydict.Page is the
same thing as mydict['/Page']
- Private attributes (not in the PDF space) can be set
on the dictionary object attribute dictionary by using
the private attribute:
mydict.private.foo = 3
mydict.foo = 5
x = mydict.foo # x will now contain 3
y = mydict['/foo'] # y will now contain 5
Most standard adobe dictionary keys start with an upper case letter,
so to avoid conflicts, it is best to start private attributes with
lower case letters.
- PdfDicts have the following read-only properties:
- private -- as discussed above, provides write access to
dictionary's attributes
- inheritable -- this creates and returns a "view" attribute
that will search through the object hierarchy for
any desired attribute, such as /Rotate or /MediaBox
- PdfDicts also have the following special attributes:
- indirect is not stored in the PDF dictionary, but in the object's
attribute dictionary
- stream is also stored in the object's attribute dictionary
and will also update the stream length.
- _stream will store in the object's attribute dictionary without
updating the stream length.
It is possible, for example, to have a PDF name such as "/indirect"
or "/stream", but you cannot access such a name as an attribute:
mydict.indirect -- accesses object's attribute dictionary
mydict["/indirect"] -- accesses actual PDF dictionary
| class PdfDict(dict):
''' PdfDict objects are subclassed dictionaries
with the following features:
- Every key in the dictionary starts with "/"
- A dictionary item can be deleted by assigning it to None
- Keys that (after the initial "/") conform to Python
naming conventions can also be accessed (set and retrieved)
as attributes of the dictionary. E.g. mydict.Page is the
same thing as mydict['/Page']
- Private attributes (not in the PDF space) can be set
on the dictionary object attribute dictionary by using
the private attribute:
mydict.private.foo = 3
mydict.foo = 5
x = mydict.foo # x will now contain 3
y = mydict['/foo'] # y will now contain 5
Most standard adobe dictionary keys start with an upper case letter,
so to avoid conflicts, it is best to start private attributes with
lower case letters.
- PdfDicts have the following read-only properties:
- private -- as discussed above, provides write access to
dictionary's attributes
- inheritable -- this creates and returns a "view" attribute
that will search through the object hierarchy for
any desired attribute, such as /Rotate or /MediaBox
- PdfDicts also have the following special attributes:
- indirect is not stored in the PDF dictionary, but in the object's
attribute dictionary
- stream is also stored in the object's attribute dictionary
and will also update the stream length.
- _stream will store in the object's attribute dictionary without
updating the stream length.
It is possible, for example, to have a PDF name such as "/indirect"
or "/stream", but you cannot access such a name as an attribute:
mydict.indirect -- accesses object's attribute dictionary
mydict["/indirect"] -- accesses actual PDF dictionary
'''
indirect = False
stream = None
_special = dict(indirect=('indirect', False),
stream=('stream', True),
_stream=('stream', False),
)
def __setitem__(self, name, value, setter=dict.__setitem__,
BasePdfName=BasePdfName, isinstance=isinstance):
if not isinstance(name, BasePdfName):
raise PdfParseError('Dict key %s is not a PdfName' % repr(name))
if value is not None:
setter(self, name, value)
elif name in self:
del self[name]
def __init__(self, *args, **kw):
if args:
if len(args) == 1:
args = args[0]
self.update(args)
if isinstance(args, PdfDict):
self.indirect = args.indirect
self._stream = args.stream
for key, value in iteritems(kw):
setattr(self, key, value)
def __getattr__(self, name, PdfName=PdfName):
''' If the attribute doesn't exist on the dictionary object,
try to slap a '/' in front of it and get it out
of the actual dictionary itself.
'''
return self.get(PdfName(name))
def get(self, key, dictget=dict.get, isinstance=isinstance,
PdfIndirect=PdfIndirect):
''' Get a value out of the dictionary,
after resolving any indirect objects.
'''
value = dictget(self, key)
if isinstance(value, PdfIndirect):
# We used to use self[key] here, but that does an
# unwanted check on the type of the key (github issue #98).
# Python will keep the old key object in the dictionary,
# so that check is not necessary.
value = value.real_value()
if value is not None:
dict.__setitem__(self, key, value)
else:
del self[name]
return value
def __getitem__(self, key):
return self.get(key)
def __setattr__(self, name, value, special=_special.get,
PdfName=PdfName, vars=vars):
''' Set an attribute on the dictionary. Handle the keywords
indirect, stream, and _stream specially (for content objects)
'''
info = special(name)
if info is None:
self[PdfName(name)] = value
else:
name, setlen = info
vars(self)[name] = value
if setlen:
notnone = value is not None
self.Length = notnone and PdfObject(len(value)) or None
def iteritems(self, dictiter=iteritems,
isinstance=isinstance, PdfIndirect=PdfIndirect,
BasePdfName=BasePdfName):
''' Iterate over the dictionary, resolving any unresolved objects
'''
for key, value in list(dictiter(self)):
if isinstance(value, PdfIndirect):
self[key] = value = value.real_value()
if value is not None:
if not isinstance(key, BasePdfName):
raise PdfParseError('Dict key %s is not a PdfName' %
repr(key))
yield key, value
def items(self):
return list(self.iteritems())
def itervalues(self):
for key, value in self.iteritems():
yield value
def values(self):
return list((value for key, value in self.iteritems()))
def keys(self):
return list((key for key, value in self.iteritems()))
def __iter__(self):
for key, value in self.iteritems():
yield key
def iterkeys(self):
return iter(self)
def copy(self):
return type(self)(self)
def pop(self, key):
value = self.get(key)
del self[key]
return value
def popitem(self):
key, value = dict.pop(self)
if isinstance(value, PdfIndirect):
value = value.real_value()
return value
def inheritable(self):
''' Search through ancestors as needed for inheritable
dictionary items.
NOTE: You might think it would be a good idea
to cache this class, but then you'd have to worry
about it pointing to the wrong dictionary if you
made a copy of the object...
'''
return _DictSearch(self)
inheritable = property(inheritable)
def private(self):
''' Allows setting private metadata for use in
processing (not sent to PDF file).
See note on inheritable
'''
return _Private(self)
private = property(private)
| (*args, **kw) |
724,042 | pdfrw.pdfreader | PdfReader | null | class PdfReader(PdfDict):
def findindirect(self, objnum, gennum, PdfIndirect=PdfIndirect, int=int):
''' Return a previously loaded indirect object, or create
a placeholder for it.
'''
key = int(objnum), int(gennum)
result = self.indirect_objects.get(key)
if result is None:
self.indirect_objects[key] = result = PdfIndirect(key)
self.deferred_objects.add(key)
result._loader = self.loadindirect
return result
def readarray(self, source, PdfArray=PdfArray):
''' Found a [ token. Parse the tokens after that.
'''
specialget = self.special.get
result = []
pop = result.pop
append = result.append
for value in source:
if value in ']R':
if value == ']':
break
generation = pop()
value = self.findindirect(pop(), generation)
else:
func = specialget(value)
if func is not None:
value = func(source)
append(value)
return PdfArray(result)
def readdict(self, source, PdfDict=PdfDict):
''' Found a << token. Parse the tokens after that.
'''
specialget = self.special.get
result = PdfDict()
next = source.next
tok = next()
while tok != '>>':
if not tok.startswith('/'):
source.error('Expected PDF /name object')
tok = next()
continue
key = tok
value = next()
func = specialget(value)
if func is not None:
value = func(source)
tok = next()
else:
tok = next()
if value.isdigit() and tok.isdigit():
tok2 = next()
if tok2 != 'R':
source.error('Expected "R" following two integers')
tok = tok2
continue
value = self.findindirect(value, tok)
tok = next()
result[key] = value
return result
def empty_obj(self, source, PdfObject=PdfObject):
''' Some silly git put an empty object in the
file. Back up so the caller sees the endobj.
'''
source.floc = source.tokstart
def badtoken(self, source):
''' Didn't see that coming.
'''
source.exception('Unexpected delimiter')
def findstream(self, obj, tok, source, len=len):
''' Figure out if there is a content stream
following an object, and return the start
pointer to the content stream if so.
(We can't read it yet, because we might not
know how long it is, because Length might
be an indirect object.)
'''
fdata = source.fdata
startstream = source.tokstart + len(tok)
gotcr = fdata[startstream] == '\r'
startstream += gotcr
gotlf = fdata[startstream] == '\n'
startstream += gotlf
if not gotlf:
if not gotcr:
source.error(r'stream keyword not followed by \n')
else:
source.warning(r"stream keyword terminated "
r"by \r without \n")
return startstream
def readstream(self, obj, startstream, source, exact_required=False,
streamending='endstream endobj'.split(), int=int):
fdata = source.fdata
length = int(obj.Length)
source.floc = target_endstream = startstream + length
endit = source.multiple(2)
obj._stream = fdata[startstream:target_endstream]
if endit == streamending:
return
if exact_required:
source.exception('Expected endstream endobj')
# The length attribute does not match the distance between the
# stream and endstream keywords.
# TODO: Extract maxstream from dictionary of object offsets
# and use rfind instead of find.
maxstream = len(fdata) - 20
endstream = fdata.find('endstream', startstream, maxstream)
source.floc = startstream
room = endstream - startstream
if endstream < 0:
source.error('Could not find endstream')
return
if (length == room + 1 and
fdata[startstream - 2:startstream] == '\r\n'):
source.warning(r"stream keyword terminated by \r without \n")
obj._stream = fdata[startstream - 1:target_endstream - 1]
return
source.floc = endstream
if length > room:
source.error('stream /Length attribute (%d) appears to '
'be too big (size %d) -- adjusting',
length, room)
obj.stream = fdata[startstream:endstream]
return
if fdata[target_endstream:endstream].rstrip():
source.error('stream /Length attribute (%d) appears to '
'be too small (size %d) -- adjusting',
length, room)
obj.stream = fdata[startstream:endstream]
return
endobj = fdata.find('endobj', endstream, maxstream)
if endobj < 0:
source.error('Could not find endobj after endstream')
return
if fdata[endstream:endobj].rstrip() != 'endstream':
source.error('Unexpected data between endstream and endobj')
return
source.error('Illegal endstream/endobj combination')
def loadindirect(self, key, PdfDict=PdfDict,
isinstance=isinstance):
result = self.indirect_objects.get(key)
if not isinstance(result, PdfIndirect):
return result
source = self.source
offset = int(self.source.obj_offsets.get(key, '0'))
if not offset:
source.warning("Did not find PDF object %s", key)
return None
# Read the object header and validate it
objnum, gennum = key
source.floc = offset
objid = source.multiple(3)
ok = len(objid) == 3
ok = ok and objid[0].isdigit() and int(objid[0]) == objnum
ok = ok and objid[1].isdigit() and int(objid[1]) == gennum
ok = ok and objid[2] == 'obj'
if not ok:
source.floc = offset
source.next()
objheader = '%d %d obj' % (objnum, gennum)
fdata = source.fdata
offset2 = (fdata.find('\n' + objheader) + 1 or
fdata.find('\r' + objheader) + 1)
if (not offset2 or
fdata.find(fdata[offset2 - 1] + objheader, offset2) > 0):
source.warning("Expected indirect object '%s'", objheader)
return None
source.warning("Indirect object %s found at incorrect "
"offset %d (expected offset %d)",
objheader, offset2, offset)
source.floc = offset2 + len(objheader)
# Read the object, and call special code if it starts
# an array or dictionary
obj = source.next()
func = self.special.get(obj)
if func is not None:
obj = func(source)
self.indirect_objects[key] = obj
self.deferred_objects.remove(key)
# Mark the object as indirect, and
# just return it if it is a simple object.
obj.indirect = key
tok = source.next()
if tok == 'endobj':
return obj
# Should be a stream. Either that or it's broken.
isdict = isinstance(obj, PdfDict)
if isdict and tok == 'stream':
self.readstream(obj, self.findstream(obj, tok, source), source)
return obj
# Houston, we have a problem, but let's see if it
# is easily fixable. Leaving out a space before endobj
# is apparently an easy mistake to make on generation
# (Because it won't be noticed unless you are specifically
# generating an indirect object that doesn't end with any
# sort of delimiter.) It is so common that things like
# okular just handle it.
if isinstance(obj, PdfObject) and obj.endswith('endobj'):
source.error('No space or delimiter before endobj')
obj = PdfObject(obj[:-6])
else:
source.error("Expected 'endobj'%s token",
isdict and " or 'stream'" or '')
obj = PdfObject('')
obj.indirect = key
self.indirect_objects[key] = obj
return obj
def read_all(self):
deferred = self.deferred_objects
prev = set()
while 1:
new = deferred - prev
if not new:
break
prev |= deferred
for key in new:
self.loadindirect(key)
def decrypt_all(self):
self.read_all()
if self.crypt_filters is not None:
crypt.decrypt_objects(
self.indirect_objects.values(), self.stream_crypt_filter,
self.crypt_filters)
def uncompress(self):
self.read_all()
uncompress(self.indirect_objects.values())
def load_stream_objects(self, object_streams):
# read object streams
objs = []
for num in object_streams:
obj = self.findindirect(num, 0).real_value()
assert obj.Type == '/ObjStm'
objs.append(obj)
# read objects from stream
if objs:
# Decrypt
if self.crypt_filters is not None:
crypt.decrypt_objects(
objs, self.stream_crypt_filter, self.crypt_filters)
# Decompress
uncompress(objs)
for obj in objs:
objsource = PdfTokens(obj.stream, 0, False)
next = objsource.next
offsets = []
firstoffset = int(obj.First)
while objsource.floc < firstoffset:
offsets.append((int(next()), firstoffset + int(next())))
for num, offset in offsets:
# Read the object, and call special code if it starts
# an array or dictionary
objsource.floc = offset
sobj = next()
func = self.special.get(sobj)
if func is not None:
sobj = func(objsource)
key = (num, 0)
self.indirect_objects[key] = sobj
if key in self.deferred_objects:
self.deferred_objects.remove(key)
# Mark the object as indirect, and
# add it to the list of streams if it starts a stream
sobj.indirect = key
def findxref(self, fdata):
''' Find the cross reference section at the end of a file
'''
startloc = fdata.rfind('startxref')
if startloc < 0:
raise PdfParseError('Did not find "startxref" at end of file')
source = PdfTokens(fdata, startloc, False, self.verbose)
tok = source.next()
assert tok == 'startxref' # (We just checked this...)
tableloc = source.next_default()
if not tableloc.isdigit():
source.exception('Expected table location')
if source.next_default().rstrip().lstrip('%') != 'EOF':
source.exception('Expected %%EOF')
return startloc, PdfTokens(fdata, int(tableloc), True, self.verbose)
def parse_xref_stream(self, source, int=int, range=range,
enumerate=enumerate, islice=itertools.islice,
defaultdict=collections.defaultdict,
hexlify=binascii.hexlify):
''' Parse (one of) the cross-reference file section(s)
'''
def readint(s, lengths):
offset = 0
for length in itertools.cycle(lengths):
next = offset + length
yield int(hexlify(s[offset:next]), 16) if length else None
offset = next
setdefault = source.obj_offsets.setdefault
next = source.next
# check for xref stream object
objid = source.multiple(3)
ok = len(objid) == 3
ok = ok and objid[0].isdigit()
ok = ok and objid[1] == 'obj'
ok = ok and objid[2] == '<<'
if not ok:
source.exception('Expected xref stream start')
obj = self.readdict(source)
if obj.Type != PdfName.XRef:
source.exception('Expected dict type of /XRef')
tok = next()
self.readstream(obj, self.findstream(obj, tok, source), source, True)
old_strm = obj.stream
if not uncompress([obj], True):
source.exception('Could not decompress Xref stream')
stream = obj.stream
# Fix for issue #76 -- goofy compressed xref stream
# that is NOT ACTUALLY COMPRESSED
stream = stream if stream is not old_strm else convert_store(old_strm)
num_pairs = obj.Index or PdfArray(['0', obj.Size])
num_pairs = [int(x) for x in num_pairs]
num_pairs = zip(num_pairs[0::2], num_pairs[1::2])
entry_sizes = [int(x) for x in obj.W]
if len(entry_sizes) != 3:
source.exception('Invalid entry size')
object_streams = defaultdict(list)
get = readint(stream, entry_sizes)
for objnum, size in num_pairs:
for cnt in range(size):
xtype, p1, p2 = islice(get, 3)
if xtype in (1, None):
if p1:
setdefault((objnum, p2 or 0), p1)
elif xtype == 2:
object_streams[p1].append((objnum, p2))
objnum += 1
obj.private.object_streams = object_streams
return obj
def parse_xref_table(self, source, int=int, range=range):
''' Parse (one of) the cross-reference file section(s)
'''
setdefault = source.obj_offsets.setdefault
next = source.next
# plain xref table
start = source.floc
try:
while 1:
tok = next()
if tok == 'trailer':
return
startobj = int(tok)
for objnum in range(startobj, startobj + int(next())):
offset = int(next())
generation = int(next())
inuse = next()
if inuse == 'n':
if offset != 0:
setdefault((objnum, generation), offset)
elif inuse != 'f':
raise ValueError
except:
pass
try:
# Table formatted incorrectly.
# See if we can figure it out anyway.
end = source.fdata.rindex('trailer', start)
table = source.fdata[start:end].splitlines()
for line in table:
tokens = line.split()
if len(tokens) == 2:
objnum = int(tokens[0])
elif len(tokens) == 3:
offset, generation, inuse = (int(tokens[0]),
int(tokens[1]), tokens[2])
if offset != 0 and inuse == 'n':
setdefault((objnum, generation), offset)
objnum += 1
elif tokens:
log.error('Invalid line in xref table: %s' %
repr(line))
raise ValueError
log.warning('Badly formatted xref table')
source.floc = end
next()
except:
source.floc = start
source.exception('Invalid table format')
def parsexref(self, source):
''' Parse (one of) the cross-reference file section(s)
'''
next = source.next
try:
tok = next()
except StopIteration:
tok = ''
if tok.isdigit():
return self.parse_xref_stream(source), True
elif tok == 'xref':
self.parse_xref_table(source)
tok = next()
if tok != '<<':
source.exception('Expected "<<" starting catalog')
return self.readdict(source), False
else:
source.exception('Expected "xref" keyword or xref stream object')
def readpages(self, node):
pagename = PdfName.Page
pagesname = PdfName.Pages
catalogname = PdfName.Catalog
typename = PdfName.Type
kidname = PdfName.Kids
try:
result = []
stack = [node]
append = result.append
pop = stack.pop
while stack:
node = pop()
nodetype = node[typename]
if nodetype == pagename:
append(node)
elif nodetype == pagesname:
stack.extend(reversed(node[kidname]))
elif nodetype == catalogname:
stack.append(node[pagesname])
else:
log.error('Expected /Page or /Pages dictionary, got %s' %
repr(node))
return result
except (AttributeError, TypeError) as s:
log.error('Invalid page tree: %s' % s)
return []
def _parse_encrypt_info(self, source, password, trailer):
"""Check password and initialize crypt filters."""
# Create and check password key
key = crypt.create_key(password, trailer)
if not crypt.check_user_password(key, trailer):
source.warning('User password does not validate')
# Create default crypt filters
private = self.private
crypt_filters = self.crypt_filters
version = int(trailer.Encrypt.V or 0)
if version in (1, 2):
crypt_filter = crypt.RC4CryptFilter(key)
private.stream_crypt_filter = crypt_filter
private.string_crypt_filter = crypt_filter
elif version == 4:
if PdfName.CF in trailer.Encrypt:
for name, params in iteritems(trailer.Encrypt.CF):
if name == PdfName.Identity:
continue
cfm = params.CFM
if cfm == PdfName.AESV2:
crypt_filters[name] = crypt.AESCryptFilter(key)
elif cfm == PdfName.V2:
crypt_filters[name] = crypt.RC4CryptFilter(key)
else:
source.warning(
'Unsupported crypt filter: {}, {}'.format(
name, cfm))
# Read default stream filter
if PdfName.StmF in trailer.Encrypt:
name = trailer.Encrypt.StmF
if name in crypt_filters:
private.stream_crypt_filter = crypt_filters[name]
else:
source.warning(
'Invalid crypt filter name in /StmF:'
' {}'.format(name))
# Read default string filter
if PdfName.StrF in trailer.Encrypt:
name = trailer.Encrypt.StrF
if name in crypt_filters:
private.string_crypt_filter = crypt_filters[name]
else:
source.warning(
'Invalid crypt filter name in /StrF:'
' {}'.format(name))
else:
source.warning(
'Unsupported Encrypt version: {}'.format(version))
def __init__(self, fname=None, fdata=None, decompress=False,
decrypt=False, password='', disable_gc=True, verbose=True):
self.private.verbose = verbose
# Runs a lot faster with GC off.
disable_gc = disable_gc and gc.isenabled()
if disable_gc:
gc.disable()
try:
if fname is not None:
assert fdata is None
# Allow reading preexisting streams like pyPdf
if hasattr(fname, 'read'):
fdata = fname.read()
else:
try:
f = open(fname, 'rb')
fdata = f.read()
f.close()
except IOError:
raise PdfParseError('Could not read PDF file %s' %
fname)
assert fdata is not None
fdata = convert_load(fdata)
if not fdata.startswith('%PDF-'):
startloc = fdata.find('%PDF-')
if startloc >= 0:
log.warning('PDF header not at beginning of file')
else:
lines = fdata.lstrip().splitlines()
if not lines:
raise PdfParseError('Empty PDF file!')
raise PdfParseError('Invalid PDF header: %s' %
repr(lines[0]))
self.private.version = fdata[5:8]
endloc = fdata.rfind('%EOF')
if endloc < 0:
raise PdfParseError('EOF mark not found: %s' %
repr(fdata[-20:]))
endloc += 6
junk = fdata[endloc:]
fdata = fdata[:endloc]
if junk.rstrip('\00').strip():
log.warning('Extra data at end of file')
private = self.private
private.indirect_objects = {}
private.deferred_objects = set()
private.special = {'<<': self.readdict,
'[': self.readarray,
'endobj': self.empty_obj,
}
for tok in r'\ ( ) < > { } ] >> %'.split():
self.special[tok] = self.badtoken
startloc, source = self.findxref(fdata)
private.source = source
# Find all the xref tables/streams, and
# then deal with them backwards.
xref_list = []
while 1:
source.obj_offsets = {}
trailer, is_stream = self.parsexref(source)
prev = trailer.Prev
if prev is None:
token = source.next()
if token != 'startxref' and not xref_list:
source.warning('Expected "startxref" '
'at end of xref table')
break
xref_list.append((source.obj_offsets, trailer, is_stream))
source.floc = int(prev)
# Handle document encryption
private.crypt_filters = None
if decrypt and PdfName.Encrypt in trailer:
identity_filter = crypt.IdentityCryptFilter()
crypt_filters = {
PdfName.Identity: identity_filter
}
private.crypt_filters = crypt_filters
private.stream_crypt_filter = identity_filter
private.string_crypt_filter = identity_filter
if not crypt.HAS_CRYPTO:
raise PdfParseError(
'Install PyCrypto to enable encryption support')
self._parse_encrypt_info(source, password, trailer)
if is_stream:
self.load_stream_objects(trailer.object_streams)
while xref_list:
later_offsets, later_trailer, is_stream = xref_list.pop()
source.obj_offsets.update(later_offsets)
if is_stream:
trailer.update(later_trailer)
self.load_stream_objects(later_trailer.object_streams)
else:
trailer = later_trailer
trailer.Prev = None
if (trailer.Version and
float(trailer.Version) > float(self.version)):
self.private.version = trailer.Version
if decrypt:
self.decrypt_all()
trailer.Encrypt = None
if is_stream:
self.Root = trailer.Root
self.Info = trailer.Info
self.ID = trailer.ID
self.Size = trailer.Size
self.Encrypt = trailer.Encrypt
else:
self.update(trailer)
# self.read_all_indirect(source)
private.pages = self.readpages(self.Root)
if decompress:
self.uncompress()
# For compatibility with pyPdf
private.numPages = len(self.pages)
finally:
if disable_gc:
gc.enable()
# For compatibility with pyPdf
def getPage(self, pagenum):
return self.pages[pagenum]
| (fname=None, fdata=None, decompress=False, decrypt=False, password='', disable_gc=True, verbose=True) |
724,045 | pdfrw.pdfreader | __init__ | null | def __init__(self, fname=None, fdata=None, decompress=False,
decrypt=False, password='', disable_gc=True, verbose=True):
self.private.verbose = verbose
# Runs a lot faster with GC off.
disable_gc = disable_gc and gc.isenabled()
if disable_gc:
gc.disable()
try:
if fname is not None:
assert fdata is None
# Allow reading preexisting streams like pyPdf
if hasattr(fname, 'read'):
fdata = fname.read()
else:
try:
f = open(fname, 'rb')
fdata = f.read()
f.close()
except IOError:
raise PdfParseError('Could not read PDF file %s' %
fname)
assert fdata is not None
fdata = convert_load(fdata)
if not fdata.startswith('%PDF-'):
startloc = fdata.find('%PDF-')
if startloc >= 0:
log.warning('PDF header not at beginning of file')
else:
lines = fdata.lstrip().splitlines()
if not lines:
raise PdfParseError('Empty PDF file!')
raise PdfParseError('Invalid PDF header: %s' %
repr(lines[0]))
self.private.version = fdata[5:8]
endloc = fdata.rfind('%EOF')
if endloc < 0:
raise PdfParseError('EOF mark not found: %s' %
repr(fdata[-20:]))
endloc += 6
junk = fdata[endloc:]
fdata = fdata[:endloc]
if junk.rstrip('\00').strip():
log.warning('Extra data at end of file')
private = self.private
private.indirect_objects = {}
private.deferred_objects = set()
private.special = {'<<': self.readdict,
'[': self.readarray,
'endobj': self.empty_obj,
}
for tok in r'\ ( ) < > { } ] >> %'.split():
self.special[tok] = self.badtoken
startloc, source = self.findxref(fdata)
private.source = source
# Find all the xref tables/streams, and
# then deal with them backwards.
xref_list = []
while 1:
source.obj_offsets = {}
trailer, is_stream = self.parsexref(source)
prev = trailer.Prev
if prev is None:
token = source.next()
if token != 'startxref' and not xref_list:
source.warning('Expected "startxref" '
'at end of xref table')
break
xref_list.append((source.obj_offsets, trailer, is_stream))
source.floc = int(prev)
# Handle document encryption
private.crypt_filters = None
if decrypt and PdfName.Encrypt in trailer:
identity_filter = crypt.IdentityCryptFilter()
crypt_filters = {
PdfName.Identity: identity_filter
}
private.crypt_filters = crypt_filters
private.stream_crypt_filter = identity_filter
private.string_crypt_filter = identity_filter
if not crypt.HAS_CRYPTO:
raise PdfParseError(
'Install PyCrypto to enable encryption support')
self._parse_encrypt_info(source, password, trailer)
if is_stream:
self.load_stream_objects(trailer.object_streams)
while xref_list:
later_offsets, later_trailer, is_stream = xref_list.pop()
source.obj_offsets.update(later_offsets)
if is_stream:
trailer.update(later_trailer)
self.load_stream_objects(later_trailer.object_streams)
else:
trailer = later_trailer
trailer.Prev = None
if (trailer.Version and
float(trailer.Version) > float(self.version)):
self.private.version = trailer.Version
if decrypt:
self.decrypt_all()
trailer.Encrypt = None
if is_stream:
self.Root = trailer.Root
self.Info = trailer.Info
self.ID = trailer.ID
self.Size = trailer.Size
self.Encrypt = trailer.Encrypt
else:
self.update(trailer)
# self.read_all_indirect(source)
private.pages = self.readpages(self.Root)
if decompress:
self.uncompress()
# For compatibility with pyPdf
private.numPages = len(self.pages)
finally:
if disable_gc:
gc.enable()
| (self, fname=None, fdata=None, decompress=False, decrypt=False, password='', disable_gc=True, verbose=True) |
724,049 | pdfrw.pdfreader | _parse_encrypt_info | Check password and initialize crypt filters. | def _parse_encrypt_info(self, source, password, trailer):
"""Check password and initialize crypt filters."""
# Create and check password key
key = crypt.create_key(password, trailer)
if not crypt.check_user_password(key, trailer):
source.warning('User password does not validate')
# Create default crypt filters
private = self.private
crypt_filters = self.crypt_filters
version = int(trailer.Encrypt.V or 0)
if version in (1, 2):
crypt_filter = crypt.RC4CryptFilter(key)
private.stream_crypt_filter = crypt_filter
private.string_crypt_filter = crypt_filter
elif version == 4:
if PdfName.CF in trailer.Encrypt:
for name, params in iteritems(trailer.Encrypt.CF):
if name == PdfName.Identity:
continue
cfm = params.CFM
if cfm == PdfName.AESV2:
crypt_filters[name] = crypt.AESCryptFilter(key)
elif cfm == PdfName.V2:
crypt_filters[name] = crypt.RC4CryptFilter(key)
else:
source.warning(
'Unsupported crypt filter: {}, {}'.format(
name, cfm))
# Read default stream filter
if PdfName.StmF in trailer.Encrypt:
name = trailer.Encrypt.StmF
if name in crypt_filters:
private.stream_crypt_filter = crypt_filters[name]
else:
source.warning(
'Invalid crypt filter name in /StmF:'
' {}'.format(name))
# Read default string filter
if PdfName.StrF in trailer.Encrypt:
name = trailer.Encrypt.StrF
if name in crypt_filters:
private.string_crypt_filter = crypt_filters[name]
else:
source.warning(
'Invalid crypt filter name in /StrF:'
' {}'.format(name))
else:
source.warning(
'Unsupported Encrypt version: {}'.format(version))
| (self, source, password, trailer) |
724,050 | pdfrw.pdfreader | badtoken | Didn't see that coming.
| def badtoken(self, source):
''' Didn't see that coming.
'''
source.exception('Unexpected delimiter')
| (self, source) |
724,052 | pdfrw.pdfreader | decrypt_all | null | def decrypt_all(self):
self.read_all()
if self.crypt_filters is not None:
crypt.decrypt_objects(
self.indirect_objects.values(), self.stream_crypt_filter,
self.crypt_filters)
| (self) |
724,053 | pdfrw.pdfreader | empty_obj | Some silly git put an empty object in the
file. Back up so the caller sees the endobj.
| def empty_obj(self, source, PdfObject=PdfObject):
''' Some silly git put an empty object in the
file. Back up so the caller sees the endobj.
'''
source.floc = source.tokstart
| (self, source, PdfObject=<class 'pdfrw.objects.pdfobject.PdfObject'>) |
724,054 | pdfrw.pdfreader | findindirect | Return a previously loaded indirect object, or create
a placeholder for it.
| def findindirect(self, objnum, gennum, PdfIndirect=PdfIndirect, int=int):
''' Return a previously loaded indirect object, or create
a placeholder for it.
'''
key = int(objnum), int(gennum)
result = self.indirect_objects.get(key)
if result is None:
self.indirect_objects[key] = result = PdfIndirect(key)
self.deferred_objects.add(key)
result._loader = self.loadindirect
return result
| (self, objnum, gennum, PdfIndirect=<class 'pdfrw.objects.pdfindirect.PdfIndirect'>, int=<class 'int'>) |
724,055 | pdfrw.pdfreader | findstream | Figure out if there is a content stream
following an object, and return the start
pointer to the content stream if so.
(We can't read it yet, because we might not
know how long it is, because Length might
be an indirect object.)
| def findstream(self, obj, tok, source, len=len):
''' Figure out if there is a content stream
following an object, and return the start
pointer to the content stream if so.
(We can't read it yet, because we might not
know how long it is, because Length might
be an indirect object.)
'''
fdata = source.fdata
startstream = source.tokstart + len(tok)
gotcr = fdata[startstream] == '\r'
startstream += gotcr
gotlf = fdata[startstream] == '\n'
startstream += gotlf
if not gotlf:
if not gotcr:
source.error(r'stream keyword not followed by \n')
else:
source.warning(r"stream keyword terminated "
r"by \r without \n")
return startstream
| (self, obj, tok, source, len=<built-in function len>) |
724,056 | pdfrw.pdfreader | findxref | Find the cross reference section at the end of a file
| def findxref(self, fdata):
''' Find the cross reference section at the end of a file
'''
startloc = fdata.rfind('startxref')
if startloc < 0:
raise PdfParseError('Did not find "startxref" at end of file')
source = PdfTokens(fdata, startloc, False, self.verbose)
tok = source.next()
assert tok == 'startxref' # (We just checked this...)
tableloc = source.next_default()
if not tableloc.isdigit():
source.exception('Expected table location')
if source.next_default().rstrip().lstrip('%') != 'EOF':
source.exception('Expected %%EOF')
return startloc, PdfTokens(fdata, int(tableloc), True, self.verbose)
| (self, fdata) |
724,058 | pdfrw.pdfreader | getPage | null | def getPage(self, pagenum):
return self.pages[pagenum]
| (self, pagenum) |
724,064 | pdfrw.pdfreader | load_stream_objects | null | def load_stream_objects(self, object_streams):
# read object streams
objs = []
for num in object_streams:
obj = self.findindirect(num, 0).real_value()
assert obj.Type == '/ObjStm'
objs.append(obj)
# read objects from stream
if objs:
# Decrypt
if self.crypt_filters is not None:
crypt.decrypt_objects(
objs, self.stream_crypt_filter, self.crypt_filters)
# Decompress
uncompress(objs)
for obj in objs:
objsource = PdfTokens(obj.stream, 0, False)
next = objsource.next
offsets = []
firstoffset = int(obj.First)
while objsource.floc < firstoffset:
offsets.append((int(next()), firstoffset + int(next())))
for num, offset in offsets:
# Read the object, and call special code if it starts
# an array or dictionary
objsource.floc = offset
sobj = next()
func = self.special.get(sobj)
if func is not None:
sobj = func(objsource)
key = (num, 0)
self.indirect_objects[key] = sobj
if key in self.deferred_objects:
self.deferred_objects.remove(key)
# Mark the object as indirect, and
# add it to the list of streams if it starts a stream
sobj.indirect = key
| (self, object_streams) |
724,065 | pdfrw.pdfreader | loadindirect | null | def loadindirect(self, key, PdfDict=PdfDict,
isinstance=isinstance):
result = self.indirect_objects.get(key)
if not isinstance(result, PdfIndirect):
return result
source = self.source
offset = int(self.source.obj_offsets.get(key, '0'))
if not offset:
source.warning("Did not find PDF object %s", key)
return None
# Read the object header and validate it
objnum, gennum = key
source.floc = offset
objid = source.multiple(3)
ok = len(objid) == 3
ok = ok and objid[0].isdigit() and int(objid[0]) == objnum
ok = ok and objid[1].isdigit() and int(objid[1]) == gennum
ok = ok and objid[2] == 'obj'
if not ok:
source.floc = offset
source.next()
objheader = '%d %d obj' % (objnum, gennum)
fdata = source.fdata
offset2 = (fdata.find('\n' + objheader) + 1 or
fdata.find('\r' + objheader) + 1)
if (not offset2 or
fdata.find(fdata[offset2 - 1] + objheader, offset2) > 0):
source.warning("Expected indirect object '%s'", objheader)
return None
source.warning("Indirect object %s found at incorrect "
"offset %d (expected offset %d)",
objheader, offset2, offset)
source.floc = offset2 + len(objheader)
# Read the object, and call special code if it starts
# an array or dictionary
obj = source.next()
func = self.special.get(obj)
if func is not None:
obj = func(source)
self.indirect_objects[key] = obj
self.deferred_objects.remove(key)
# Mark the object as indirect, and
# just return it if it is a simple object.
obj.indirect = key
tok = source.next()
if tok == 'endobj':
return obj
# Should be a stream. Either that or it's broken.
isdict = isinstance(obj, PdfDict)
if isdict and tok == 'stream':
self.readstream(obj, self.findstream(obj, tok, source), source)
return obj
# Houston, we have a problem, but let's see if it
# is easily fixable. Leaving out a space before endobj
# is apparently an easy mistake to make on generation
# (Because it won't be noticed unless you are specifically
# generating an indirect object that doesn't end with any
# sort of delimiter.) It is so common that things like
# okular just handle it.
if isinstance(obj, PdfObject) and obj.endswith('endobj'):
source.error('No space or delimiter before endobj')
obj = PdfObject(obj[:-6])
else:
source.error("Expected 'endobj'%s token",
isdict and " or 'stream'" or '')
obj = PdfObject('')
obj.indirect = key
self.indirect_objects[key] = obj
return obj
| (self, key, PdfDict=<class 'pdfrw.objects.pdfdict.PdfDict'>, isinstance=<built-in function isinstance>) |
724,066 | pdfrw.pdfreader | parse_xref_stream | Parse (one of) the cross-reference file section(s)
| def parse_xref_stream(self, source, int=int, range=range,
enumerate=enumerate, islice=itertools.islice,
defaultdict=collections.defaultdict,
hexlify=binascii.hexlify):
''' Parse (one of) the cross-reference file section(s)
'''
def readint(s, lengths):
offset = 0
for length in itertools.cycle(lengths):
next = offset + length
yield int(hexlify(s[offset:next]), 16) if length else None
offset = next
setdefault = source.obj_offsets.setdefault
next = source.next
# check for xref stream object
objid = source.multiple(3)
ok = len(objid) == 3
ok = ok and objid[0].isdigit()
ok = ok and objid[1] == 'obj'
ok = ok and objid[2] == '<<'
if not ok:
source.exception('Expected xref stream start')
obj = self.readdict(source)
if obj.Type != PdfName.XRef:
source.exception('Expected dict type of /XRef')
tok = next()
self.readstream(obj, self.findstream(obj, tok, source), source, True)
old_strm = obj.stream
if not uncompress([obj], True):
source.exception('Could not decompress Xref stream')
stream = obj.stream
# Fix for issue #76 -- goofy compressed xref stream
# that is NOT ACTUALLY COMPRESSED
stream = stream if stream is not old_strm else convert_store(old_strm)
num_pairs = obj.Index or PdfArray(['0', obj.Size])
num_pairs = [int(x) for x in num_pairs]
num_pairs = zip(num_pairs[0::2], num_pairs[1::2])
entry_sizes = [int(x) for x in obj.W]
if len(entry_sizes) != 3:
source.exception('Invalid entry size')
object_streams = defaultdict(list)
get = readint(stream, entry_sizes)
for objnum, size in num_pairs:
for cnt in range(size):
xtype, p1, p2 = islice(get, 3)
if xtype in (1, None):
if p1:
setdefault((objnum, p2 or 0), p1)
elif xtype == 2:
object_streams[p1].append((objnum, p2))
objnum += 1
obj.private.object_streams = object_streams
return obj
| (self, source, int=<class 'int'>, range=<class 'range'>, enumerate=<class 'enumerate'>, islice=<class 'itertools.islice'>, defaultdict=<class 'collections.defaultdict'>, hexlify=<built-in function hexlify>) |
724,067 | pdfrw.pdfreader | parse_xref_table | Parse (one of) the cross-reference file section(s)
| def parse_xref_table(self, source, int=int, range=range):
''' Parse (one of) the cross-reference file section(s)
'''
setdefault = source.obj_offsets.setdefault
next = source.next
# plain xref table
start = source.floc
try:
while 1:
tok = next()
if tok == 'trailer':
return
startobj = int(tok)
for objnum in range(startobj, startobj + int(next())):
offset = int(next())
generation = int(next())
inuse = next()
if inuse == 'n':
if offset != 0:
setdefault((objnum, generation), offset)
elif inuse != 'f':
raise ValueError
except:
pass
try:
# Table formatted incorrectly.
# See if we can figure it out anyway.
end = source.fdata.rindex('trailer', start)
table = source.fdata[start:end].splitlines()
for line in table:
tokens = line.split()
if len(tokens) == 2:
objnum = int(tokens[0])
elif len(tokens) == 3:
offset, generation, inuse = (int(tokens[0]),
int(tokens[1]), tokens[2])
if offset != 0 and inuse == 'n':
setdefault((objnum, generation), offset)
objnum += 1
elif tokens:
log.error('Invalid line in xref table: %s' %
repr(line))
raise ValueError
log.warning('Badly formatted xref table')
source.floc = end
next()
except:
source.floc = start
source.exception('Invalid table format')
| (self, source, int=<class 'int'>, range=<class 'range'>) |
724,068 | pdfrw.pdfreader | parsexref | Parse (one of) the cross-reference file section(s)
| def parsexref(self, source):
''' Parse (one of) the cross-reference file section(s)
'''
next = source.next
try:
tok = next()
except StopIteration:
tok = ''
if tok.isdigit():
return self.parse_xref_stream(source), True
elif tok == 'xref':
self.parse_xref_table(source)
tok = next()
if tok != '<<':
source.exception('Expected "<<" starting catalog')
return self.readdict(source), False
else:
source.exception('Expected "xref" keyword or xref stream object')
| (self, source) |
724,071 | pdfrw.pdfreader | read_all | null | def read_all(self):
deferred = self.deferred_objects
prev = set()
while 1:
new = deferred - prev
if not new:
break
prev |= deferred
for key in new:
self.loadindirect(key)
| (self) |
724,072 | pdfrw.pdfreader | readarray | Found a [ token. Parse the tokens after that.
| def readarray(self, source, PdfArray=PdfArray):
''' Found a [ token. Parse the tokens after that.
'''
specialget = self.special.get
result = []
pop = result.pop
append = result.append
for value in source:
if value in ']R':
if value == ']':
break
generation = pop()
value = self.findindirect(pop(), generation)
else:
func = specialget(value)
if func is not None:
value = func(source)
append(value)
return PdfArray(result)
| (self, source, PdfArray=<class 'pdfrw.objects.pdfarray.PdfArray'>) |
724,073 | pdfrw.pdfreader | readdict | Found a << token. Parse the tokens after that.
| def readdict(self, source, PdfDict=PdfDict):
''' Found a << token. Parse the tokens after that.
'''
specialget = self.special.get
result = PdfDict()
next = source.next
tok = next()
while tok != '>>':
if not tok.startswith('/'):
source.error('Expected PDF /name object')
tok = next()
continue
key = tok
value = next()
func = specialget(value)
if func is not None:
value = func(source)
tok = next()
else:
tok = next()
if value.isdigit() and tok.isdigit():
tok2 = next()
if tok2 != 'R':
source.error('Expected "R" following two integers')
tok = tok2
continue
value = self.findindirect(value, tok)
tok = next()
result[key] = value
return result
| (self, source, PdfDict=<class 'pdfrw.objects.pdfdict.PdfDict'>) |
724,074 | pdfrw.pdfreader | readpages | null | def readpages(self, node):
pagename = PdfName.Page
pagesname = PdfName.Pages
catalogname = PdfName.Catalog
typename = PdfName.Type
kidname = PdfName.Kids
try:
result = []
stack = [node]
append = result.append
pop = stack.pop
while stack:
node = pop()
nodetype = node[typename]
if nodetype == pagename:
append(node)
elif nodetype == pagesname:
stack.extend(reversed(node[kidname]))
elif nodetype == catalogname:
stack.append(node[pagesname])
else:
log.error('Expected /Page or /Pages dictionary, got %s' %
repr(node))
return result
except (AttributeError, TypeError) as s:
log.error('Invalid page tree: %s' % s)
return []
| (self, node) |
724,075 | pdfrw.pdfreader | readstream | null | def readstream(self, obj, startstream, source, exact_required=False,
streamending='endstream endobj'.split(), int=int):
fdata = source.fdata
length = int(obj.Length)
source.floc = target_endstream = startstream + length
endit = source.multiple(2)
obj._stream = fdata[startstream:target_endstream]
if endit == streamending:
return
if exact_required:
source.exception('Expected endstream endobj')
# The length attribute does not match the distance between the
# stream and endstream keywords.
# TODO: Extract maxstream from dictionary of object offsets
# and use rfind instead of find.
maxstream = len(fdata) - 20
endstream = fdata.find('endstream', startstream, maxstream)
source.floc = startstream
room = endstream - startstream
if endstream < 0:
source.error('Could not find endstream')
return
if (length == room + 1 and
fdata[startstream - 2:startstream] == '\r\n'):
source.warning(r"stream keyword terminated by \r without \n")
obj._stream = fdata[startstream - 1:target_endstream - 1]
return
source.floc = endstream
if length > room:
source.error('stream /Length attribute (%d) appears to '
'be too big (size %d) -- adjusting',
length, room)
obj.stream = fdata[startstream:endstream]
return
if fdata[target_endstream:endstream].rstrip():
source.error('stream /Length attribute (%d) appears to '
'be too small (size %d) -- adjusting',
length, room)
obj.stream = fdata[startstream:endstream]
return
endobj = fdata.find('endobj', endstream, maxstream)
if endobj < 0:
source.error('Could not find endobj after endstream')
return
if fdata[endstream:endobj].rstrip() != 'endstream':
source.error('Unexpected data between endstream and endobj')
return
source.error('Illegal endstream/endobj combination')
| (self, obj, startstream, source, exact_required=False, streamending=['endstream', 'endobj'], int=<class 'int'>) |
724,076 | pdfrw.pdfreader | uncompress | null | def uncompress(self):
self.read_all()
uncompress(self.indirect_objects.values())
| (self) |
724,078 | pdfrw.pdfwriter | PdfWriter | null | class PdfWriter(object):
_trailer = None
canonicalize = False
fname = None
def __init__(self, fname=None, version='1.3', compress=False, **kwargs):
"""
Parameters:
fname -- Output file name, or file-like binary object
with a write method
version -- PDF version to target. Currently only 1.3
supported.
compress -- True to do compression on output. Currently
compresses stream objects.
"""
# Legacy support: fname is new, was added in front
if fname is not None:
try:
float(fname)
except (ValueError, TypeError):
pass
else:
if version != '1.3':
assert compress == False
compress = version
version = fname
fname = None
self.fname = fname
self.version = version
self.compress = compress
if kwargs:
for name, value in iteritems(kwargs):
if name not in self.replaceable:
raise ValueError("Cannot set attribute %s "
"on PdfWriter instance" % name)
setattr(self, name, value)
self.pagearray = PdfArray()
self.killobj = {}
def addpage(self, page):
self._trailer = None
if page.Type != PdfName.Page:
raise PdfOutputError('Bad /Type: Expected %s, found %s'
% (PdfName.Page, page.Type))
inheritable = page.inheritable # searches for resources
self.pagearray.append(
IndirectPdfDict(
page,
Resources=inheritable.Resources,
MediaBox=inheritable.MediaBox,
CropBox=inheritable.CropBox,
Rotate=inheritable.Rotate,
)
)
# Add parents in the hierarchy to objects we
# don't want to output
killobj = self.killobj
obj, new_obj = page, self.pagearray[-1]
while obj is not None:
objid = id(obj)
if objid in killobj:
break
killobj[objid] = obj, new_obj
obj = obj.Parent
new_obj = None
return self
addPage = addpage # for compatibility with pyPdf
def addpages(self, pagelist):
for page in pagelist:
self.addpage(page)
return self
def _get_trailer(self):
trailer = self._trailer
if trailer is not None:
return trailer
if self.canonicalize:
self.make_canonical()
# Create the basic object structure of the PDF file
trailer = PdfDict(
Root=IndirectPdfDict(
Type=PdfName.Catalog,
Pages=IndirectPdfDict(
Type=PdfName.Pages,
Count=PdfObject(len(self.pagearray)),
Kids=self.pagearray
)
)
)
# Make all the pages point back to the page dictionary and
# ensure they are indirect references
pagedict = trailer.Root.Pages
for page in pagedict.Kids:
page.Parent = pagedict
page.indirect = True
self._trailer = trailer
return trailer
def _set_trailer(self, trailer):
self._trailer = trailer
trailer = property(_get_trailer, _set_trailer)
def write(self, fname=None, trailer=None, user_fmt=user_fmt,
disable_gc=True):
trailer = trailer or self.trailer
# Support fname for legacy applications
if (fname is not None) == (self.fname is not None):
raise PdfOutputError(
"PdfWriter fname must be specified exactly once")
fname = fname or self.fname
# Dump the data. We either have a filename or a preexisting
# file object.
preexisting = hasattr(fname, 'write')
f = preexisting and fname or open(fname, 'wb')
if disable_gc:
gc.disable()
try:
FormatObjects(f, trailer, self.version, self.compress,
self.killobj, user_fmt=user_fmt)
finally:
if not preexisting:
f.close()
if disable_gc:
gc.enable()
def make_canonical(self):
''' Canonicalizes a PDF. Assumes everything
is a Pdf object already.
'''
visited = set()
workitems = list(self.pagearray)
while workitems:
obj = workitems.pop()
objid = id(obj)
if objid in visited:
continue
visited.add(objid)
obj.indirect = False
if isinstance(obj, (PdfArray, PdfDict)):
obj.indirect = True
if isinstance(obj, PdfArray):
workitems += obj
else:
workitems += obj.values()
replaceable = set(vars())
| (fname=None, version='1.3', compress=False, **kwargs) |
724,079 | pdfrw.pdfwriter | __init__ |
Parameters:
fname -- Output file name, or file-like binary object
with a write method
version -- PDF version to target. Currently only 1.3
supported.
compress -- True to do compression on output. Currently
compresses stream objects.
| def __init__(self, fname=None, version='1.3', compress=False, **kwargs):
"""
Parameters:
fname -- Output file name, or file-like binary object
with a write method
version -- PDF version to target. Currently only 1.3
supported.
compress -- True to do compression on output. Currently
compresses stream objects.
"""
# Legacy support: fname is new, was added in front
if fname is not None:
try:
float(fname)
except (ValueError, TypeError):
pass
else:
if version != '1.3':
assert compress == False
compress = version
version = fname
fname = None
self.fname = fname
self.version = version
self.compress = compress
if kwargs:
for name, value in iteritems(kwargs):
if name not in self.replaceable:
raise ValueError("Cannot set attribute %s "
"on PdfWriter instance" % name)
setattr(self, name, value)
self.pagearray = PdfArray()
self.killobj = {}
| (self, fname=None, version='1.3', compress=False, **kwargs) |
724,080 | pdfrw.pdfwriter | _get_trailer | null | def _get_trailer(self):
trailer = self._trailer
if trailer is not None:
return trailer
if self.canonicalize:
self.make_canonical()
# Create the basic object structure of the PDF file
trailer = PdfDict(
Root=IndirectPdfDict(
Type=PdfName.Catalog,
Pages=IndirectPdfDict(
Type=PdfName.Pages,
Count=PdfObject(len(self.pagearray)),
Kids=self.pagearray
)
)
)
# Make all the pages point back to the page dictionary and
# ensure they are indirect references
pagedict = trailer.Root.Pages
for page in pagedict.Kids:
page.Parent = pagedict
page.indirect = True
self._trailer = trailer
return trailer
| (self) |
724,081 | pdfrw.pdfwriter | _set_trailer | null | def _set_trailer(self, trailer):
self._trailer = trailer
| (self, trailer) |
724,082 | pdfrw.pdfwriter | addpage | null | def addpage(self, page):
self._trailer = None
if page.Type != PdfName.Page:
raise PdfOutputError('Bad /Type: Expected %s, found %s'
% (PdfName.Page, page.Type))
inheritable = page.inheritable # searches for resources
self.pagearray.append(
IndirectPdfDict(
page,
Resources=inheritable.Resources,
MediaBox=inheritable.MediaBox,
CropBox=inheritable.CropBox,
Rotate=inheritable.Rotate,
)
)
# Add parents in the hierarchy to objects we
# don't want to output
killobj = self.killobj
obj, new_obj = page, self.pagearray[-1]
while obj is not None:
objid = id(obj)
if objid in killobj:
break
killobj[objid] = obj, new_obj
obj = obj.Parent
new_obj = None
return self
| (self, page) |
724,084 | pdfrw.pdfwriter | addpages | null | def addpages(self, pagelist):
for page in pagelist:
self.addpage(page)
return self
| (self, pagelist) |
724,085 | pdfrw.pdfwriter | make_canonical | Canonicalizes a PDF. Assumes everything
is a Pdf object already.
| def make_canonical(self):
''' Canonicalizes a PDF. Assumes everything
is a Pdf object already.
'''
visited = set()
workitems = list(self.pagearray)
while workitems:
obj = workitems.pop()
objid = id(obj)
if objid in visited:
continue
visited.add(objid)
obj.indirect = False
if isinstance(obj, (PdfArray, PdfDict)):
obj.indirect = True
if isinstance(obj, PdfArray):
workitems += obj
else:
workitems += obj.values()
| (self) |
724,086 | pdfrw.pdfwriter | write | null | def write(self, fname=None, trailer=None, user_fmt=user_fmt,
disable_gc=True):
trailer = trailer or self.trailer
# Support fname for legacy applications
if (fname is not None) == (self.fname is not None):
raise PdfOutputError(
"PdfWriter fname must be specified exactly once")
fname = fname or self.fname
# Dump the data. We either have a filename or a preexisting
# file object.
preexisting = hasattr(fname, 'write')
f = preexisting and fname or open(fname, 'wb')
if disable_gc:
gc.disable()
try:
FormatObjects(f, trailer, self.version, self.compress,
self.killobj, user_fmt=user_fmt)
finally:
if not preexisting:
f.close()
if disable_gc:
gc.enable()
| (self, fname=None, trailer=None, user_fmt=<function user_fmt at 0x7ff9a47afd00>, disable_gc=True) |
724,087 | pdfrw.objects.pdfobject | PdfObject | A PdfObject is a textual representation of any PDF file object
other than an array, dict or string. It has an indirect attribute
which defaults to False.
| class PdfObject(str):
''' A PdfObject is a textual representation of any PDF file object
other than an array, dict or string. It has an indirect attribute
which defaults to False.
'''
indirect = False
| null |
724,088 | pdfrw.errors | PdfParseError | Error thrown by parser/tokenizer | class PdfParseError(PdfError):
"Error thrown by parser/tokenizer"
| (msg) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.