index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
8,162
versioningit.core
do_format
Run the ``format`` step .. versionchanged:: 2.0.0 The ``version`` argument was renamed to ``base_version``. :raises MethodError: if the method does not return a `str`
def do_format( self, description: VCSDescription, base_version: str, next_version: str ) -> str: """ Run the ``format`` step .. versionchanged:: 2.0.0 The ``version`` argument was renamed to ``base_version``. :raises MethodError: if the method does not return a `str` """ new_version = self.format( description=description, base_version=base_version, next_version=next_version, ) if not isinstance(new_version, str): raise MethodError( f"format method returned {new_version!r} instead of a string" ) return new_version
(self, description: versioningit.core.VCSDescription, base_version: str, next_version: str) -> str
8,163
versioningit.core
do_next_version
Run the ``next-version`` step :raises MethodError: if the method does not return a `str`
def do_next_version(self, version: str, branch: Optional[str]) -> str: """ Run the ``next-version`` step :raises MethodError: if the method does not return a `str` """ next_version = self.next_version(version=version, branch=branch) if not isinstance(next_version, str): raise MethodError( f"next-version method returned {next_version!r} instead of a string" ) log.info("next-version returned version %s", next_version) warn_bad_version(next_version, "Calculated next version") return next_version
(self, version: str, branch: Optional[str]) -> str
8,164
versioningit.core
do_onbuild
.. versionadded:: 1.1.0 Run the ``onbuild`` step .. versionchanged:: 2.0.0 ``version`` argument replaced with ``template_fields`` .. versionchanged:: 3.0.0 ``build_dir`` argument replaced with ``file_provider``
def do_onbuild( self, file_provider: OnbuildFileProvider, is_source: bool, template_fields: dict[str, Any], ) -> None: """ .. versionadded:: 1.1.0 Run the ``onbuild`` step .. versionchanged:: 2.0.0 ``version`` argument replaced with ``template_fields`` .. versionchanged:: 3.0.0 ``build_dir`` argument replaced with ``file_provider`` """ if self.onbuild is not None: self.onbuild( file_provider=file_provider, is_source=is_source, template_fields=template_fields, ) else: log.info("'onbuild' step not configured; not doing anything")
(self, file_provider: versioningit.onbuild.OnbuildFileProvider, is_source: bool, template_fields: dict[str, typing.Any]) -> NoneType
8,165
versioningit.core
do_tag2version
Run the ``tag2version`` step :raises MethodError: if the method does not return a `str`
def do_tag2version(self, tag: str) -> str: """ Run the ``tag2version`` step :raises MethodError: if the method does not return a `str` """ version = self.tag2version(tag=tag) if not isinstance(version, str): raise MethodError( f"tag2version method returned {version!r} instead of a string" ) log.info("tag2version returned version %s", version) warn_bad_version(version, "Version extracted from tag") return version
(self, tag: str) -> str
8,166
versioningit.core
do_template_fields
.. versionadded:: 2.0.0 Run the ``template_fields`` step :raises MethodError: if the method does not return a `dict`
def do_template_fields( self, version: str, description: Optional[VCSDescription], base_version: Optional[str], next_version: Optional[str], ) -> dict: """ .. versionadded:: 2.0.0 Run the ``template_fields`` step :raises MethodError: if the method does not return a `dict` """ fields = self.template_fields( version=version, description=description, base_version=base_version, next_version=next_version, ) if not isinstance(fields, dict): raise MethodError( f"template-fields method returned {fields!r} instead of a dict" ) log.debug("Template fields available to `write` and `onbuild`: %r", fields) return fields
(self, version: str, description: Optional[versioningit.core.VCSDescription], base_version: Optional[str], next_version: Optional[str]) -> dict
8,167
versioningit.core
do_vcs
Run the ``vcs`` step :raises MethodError: if the method does not return a `VCSDescription`
def do_vcs(self) -> VCSDescription: """ Run the ``vcs`` step :raises MethodError: if the method does not return a `VCSDescription` """ description = self.vcs(project_dir=self.project_dir) if not isinstance(description, VCSDescription): raise MethodError( f"vcs method returned {description!r} instead of a VCSDescription" ) log.info("vcs returned tag %s", description.tag) log.debug("vcs state: %s", description.state) log.debug("vcs branch: %s", description.branch) log.debug("vcs fields: %r", description.fields) return description
(self) -> versioningit.core.VCSDescription
8,168
versioningit.core
do_write
Run the ``write`` step .. versionchanged:: 2.0.0 ``version`` argument replaced with ``template_fields``
def do_write(self, template_fields: dict[str, Any]) -> None: """ Run the ``write`` step .. versionchanged:: 2.0.0 ``version`` argument replaced with ``template_fields`` """ if self.write is not None: self.write(project_dir=self.project_dir, template_fields=template_fields) else: log.info("'write' step not configured; not writing anything")
(self, template_fields: dict[str, typing.Any]) -> NoneType
8,169
versioningit.core
get_version
Determine the version for the project. If ``write`` is true, then the file specified in the ``write`` subtable of the versioningit configuration, if any, will be updated. If ``fallback`` is true, then if ``project_dir`` is not under version control (or if the VCS executable is not installed), ``versioningit`` will assume that the directory is an unpacked sdist and will read the version from the :file:`PKG-INFO` file. .. versionchanged:: 2.0.0 ``write`` and ``fallback`` arguments added :raises NotVCSError: if ``fallback`` is false and ``project_dir`` is not under version control :raises NotSdistError: if ``fallback`` is true, ``project_dir`` is not under version control, and there is no :file:`PKG-INFO` file in ``project_dir`` :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type
def get_version(self, write: bool = False, fallback: bool = True) -> str: """ Determine the version for the project. If ``write`` is true, then the file specified in the ``write`` subtable of the versioningit configuration, if any, will be updated. If ``fallback`` is true, then if ``project_dir`` is not under version control (or if the VCS executable is not installed), ``versioningit`` will assume that the directory is an unpacked sdist and will read the version from the :file:`PKG-INFO` file. .. versionchanged:: 2.0.0 ``write`` and ``fallback`` arguments added :raises NotVCSError: if ``fallback`` is false and ``project_dir`` is not under version control :raises NotSdistError: if ``fallback`` is true, ``project_dir`` is not under version control, and there is no :file:`PKG-INFO` file in ``project_dir`` :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type """ return self.run(write=write, fallback=fallback).version
(self, write: bool = False, fallback: bool = True) -> str
8,170
versioningit.core
run
.. versionadded:: 2.0.0 Run all of the steps for the project — aside from "onbuild" and, optionally, "write" — and return an object containing the final version and intermediate values. If ``write`` is true, then the file specified in the ``write`` subtable of the versioningit configuration, if any, will be updated. If ``fallback`` is true, then if ``project_dir`` is not under version control (or if the VCS executable is not installed), ``versioningit`` will assume that the directory is an unpacked sdist and will read the version from the :file:`PKG-INFO` file, returning a `FallbackReport` instance instead of a `Report`. :raises NotVCSError: if ``fallback`` is false and ``project_dir`` is not under version control :raises NotSdistError: if ``fallback`` is true, ``project_dir`` is not under version control, and there is no :file:`PKG-INFO` file in ``project_dir`` :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type
def run( self, write: bool = False, fallback: bool = True ) -> Report | FallbackReport: """ .. versionadded:: 2.0.0 Run all of the steps for the project — aside from "onbuild" and, optionally, "write" — and return an object containing the final version and intermediate values. If ``write`` is true, then the file specified in the ``write`` subtable of the versioningit configuration, if any, will be updated. If ``fallback`` is true, then if ``project_dir`` is not under version control (or if the VCS executable is not installed), ``versioningit`` will assume that the directory is an unpacked sdist and will read the version from the :file:`PKG-INFO` file, returning a `FallbackReport` instance instead of a `Report`. :raises NotVCSError: if ``fallback`` is false and ``project_dir`` is not under version control :raises NotSdistError: if ``fallback`` is true, ``project_dir`` is not under version control, and there is no :file:`PKG-INFO` file in ``project_dir`` :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type """ description: Optional[VCSDescription] = None base_version: Optional[str] = None next_version: Optional[str] = None using_default_version: bool = False try: description = self.do_vcs() base_version = self.do_tag2version(description.tag) next_version = self.do_next_version(base_version, description.branch) if description.state == "exact": log.info("Tag is exact match; returning extracted version") version = base_version else: log.info("VCS state is %r; formatting version", description.state) version = self.do_format( description=description, base_version=base_version, next_version=next_version, ) log.info("Final version: %s", version) except Error as e: if ( isinstance(e, NotVCSError) and fallback and (is_sdist(self.project_dir) or self.default_version is None) ): log.info("Could not get VCS data from %s: %s", self.project_dir, str(e)) log.info("Falling back to reading from PKG-INFO") return FallbackReport( version=get_version_from_pkg_info(self.project_dir) ) if self.default_version is not None: log.error("%s: %s", type(e).__name__, str(e)) log.info("Falling back to default-version") version = self.default_version using_default_version = True else: raise except Exception: # pragma: no cover if self.default_version is not None: log.exception("An unexpected error occurred:") log.info("Falling back to default-version") version = self.default_version using_default_version = True else: raise warn_bad_version(version, "Final version") template_fields = self.do_template_fields( version=version, description=description, base_version=base_version, next_version=next_version, ) if write: self.do_write(template_fields) return Report( version=version, description=description, base_version=base_version, next_version=next_version, template_fields=template_fields, using_default_version=using_default_version, )
(self, write: bool = False, fallback: bool = True) -> versioningit.core.Report | versioningit.core.FallbackReport
8,174
versioningit.get_cmdclasses
get_cmdclasses
.. versionadded:: 1.1.0 Return a `dict` of custom setuptools `Command` classes, suitable for passing to the ``cmdclass`` argument of `setuptools.setup()`, that run the ``onbuild`` step for the project when building an sdist or wheel. Specifically, the `dict` contains a subclass of `setuptools.command.sdist.sdist` at the ``"sdist"`` key and a subclass of `setuptools.command.build_py.build_py` at the ``"build_py"`` key. A `dict` of alternative base classes can optionally be supplied; if the `dict` contains an ``"sdist"`` entry, that entry will be used as the base class for the customized ``sdist`` command, and likewise for ``"build_py"``. All other classes in the input `dict` are passed through unchanged.
def get_cmdclasses( bases: Optional[dict[str, type[Command]]] = None ) -> dict[str, type[Command]]: """ .. versionadded:: 1.1.0 Return a `dict` of custom setuptools `Command` classes, suitable for passing to the ``cmdclass`` argument of `setuptools.setup()`, that run the ``onbuild`` step for the project when building an sdist or wheel. Specifically, the `dict` contains a subclass of `setuptools.command.sdist.sdist` at the ``"sdist"`` key and a subclass of `setuptools.command.build_py.build_py` at the ``"build_py"`` key. A `dict` of alternative base classes can optionally be supplied; if the `dict` contains an ``"sdist"`` entry, that entry will be used as the base class for the customized ``sdist`` command, and likewise for ``"build_py"``. All other classes in the input `dict` are passed through unchanged. """ # Import setuptools here so there isn't a slowdown from importing it # unconditionally whenever versioningit is imported from setuptools.command.build_py import build_py from setuptools.command.sdist import sdist cmds = {} if bases is None else bases.copy() sdist_base = cmds.get("sdist", sdist) class VersioningitSdist(sdist_base): # type: ignore[valid-type,misc] def make_release_tree(self, base_dir: str, files: Any) -> None: super().make_release_tree(base_dir, files) init_logging() template_fields = get_template_fields_from_distribution(self.distribution) if template_fields is not None: PROJECT_ROOT = Path().resolve() log.debug("Running onbuild step; cwd=%s", PROJECT_ROOT) run_onbuild( project_dir=PROJECT_ROOT, build_dir=base_dir, is_source=True, template_fields=template_fields, ) else: log.debug( "Appear to be building from an sdist; not running onbuild step" ) cmds["sdist"] = VersioningitSdist build_py_base = cmds.get("build_py", build_py) class VersioningitBuildPy(build_py_base): # type: ignore[valid-type,misc] editable_mode: bool = False def run(self) -> None: super().run() init_logging() template_fields = get_template_fields_from_distribution(self.distribution) if not self.editable_mode and template_fields is not None: PROJECT_ROOT = Path().resolve() log.debug("Running onbuild step; cwd=%s", PROJECT_ROOT) run_onbuild( project_dir=PROJECT_ROOT, build_dir=self.build_lib, is_source=False, template_fields=template_fields, ) else: log.debug( "Appear to be building from an sdist; not running onbuild step" ) cmds["build_py"] = VersioningitBuildPy return cmds
(bases: 'Optional[dict[str, type[Command]]]' = None) -> 'dict[str, type[Command]]'
8,175
versioningit.core
get_next_version
.. versionadded:: 0.3.0 Determine the next version after the current VCS-tagged version for ``project_dir``. If ``config`` is `None`, then ``project_dir`` must contain a :file:`pyproject.toml` file containing either a ``[tool.versioningit]`` table or a ``[tool.hatch.version]`` table with the ``source`` key set to ``"versioningit"``; if it does not, a `NotVersioningitError` is raised. If ``config`` is not `None`, then any :file:`pyproject.toml` file in ``project_dir`` will be ignored, and the configuration will be taken from ``config`` instead. See ":ref:`config_dict`". :raises NotVCSError: if ``project_dir`` is not under version control :raises NotVersioningitError: - if ``config`` is `None` and ``project_dir`` does not contain a :file:`pyproject.toml` file - if ``config`` is `None` and the :file:`pyproject.toml` file does not contain a versioningit configuration table :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type
def get_next_version( project_dir: str | Path = os.curdir, config: Optional[dict] = None ) -> str: """ .. versionadded:: 0.3.0 Determine the next version after the current VCS-tagged version for ``project_dir``. If ``config`` is `None`, then ``project_dir`` must contain a :file:`pyproject.toml` file containing either a ``[tool.versioningit]`` table or a ``[tool.hatch.version]`` table with the ``source`` key set to ``"versioningit"``; if it does not, a `NotVersioningitError` is raised. If ``config`` is not `None`, then any :file:`pyproject.toml` file in ``project_dir`` will be ignored, and the configuration will be taken from ``config`` instead. See ":ref:`config_dict`". :raises NotVCSError: if ``project_dir`` is not under version control :raises NotVersioningitError: - if ``config`` is `None` and ``project_dir`` does not contain a :file:`pyproject.toml` file - if ``config`` is `None` and the :file:`pyproject.toml` file does not contain a versioningit configuration table :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type """ vgit = Versioningit.from_project_dir(project_dir, config) description = vgit.do_vcs() tag_version = vgit.do_tag2version(description.tag) next_version = vgit.do_next_version(tag_version, description.branch) return next_version
(project_dir: str | pathlib.Path = '.', config: Optional[dict] = None) -> str
8,176
versioningit.core
get_template_fields_from_distribution
Extract the template fields (calculated by the "template-fields" step) that were stashed on the `setuptools.Distribution` by ``versioningit``'s setuptools hook, for passing to the "onbuild" step. If setuptools is building from an sdist instead of a repository, no template fields will have been calculated, and `None` will be returned, indicating that the "onbuild" step should not be run.
def get_template_fields_from_distribution( dist: Distribution, ) -> Optional[dict[str, Any]]: """ Extract the template fields (calculated by the "template-fields" step) that were stashed on the `setuptools.Distribution` by ``versioningit``'s setuptools hook, for passing to the "onbuild" step. If setuptools is building from an sdist instead of a repository, no template fields will have been calculated, and `None` will be returned, indicating that the "onbuild" step should not be run. """ return getattr(dist, "_versioningit_template_fields", None)
(dist: 'Distribution') -> 'Optional[dict[str, Any]]'
8,177
versioningit.core
get_version
Determine the version for the project at ``project_dir``. If ``config`` is `None`, then ``project_dir`` must contain a :file:`pyproject.toml` file containing either a ``[tool.versioningit]`` table or a ``[tool.hatch.version]`` table with the ``source`` key set to ``"versioningit"``; if it does not, a `NotVersioningitError` is raised. If ``config`` is not `None`, then any :file:`pyproject.toml` file in ``project_dir`` will be ignored, and the configuration will be taken from ``config`` instead. See ":ref:`config_dict`". If ``write`` is true, then the file specified in the ``write`` subtable of the versioningit configuration, if any, will be updated. If ``fallback`` is true, then if ``project_dir`` is not under version control (or if the VCS executable is not installed), ``versioningit`` will assume that the directory is an unpacked sdist and will read the version from the :file:`PKG-INFO` file. :raises NotVCSError: if ``fallback`` is false and ``project_dir`` is not under version control :raises NotSdistError: if ``fallback`` is true, ``project_dir`` is not under version control, and there is no :file:`PKG-INFO` file in ``project_dir`` :raises NotVersioningitError: - if ``config`` is `None` and ``project_dir`` does not contain a :file:`pyproject.toml` file - if ``config`` is `None` and the :file:`pyproject.toml` file does not contain a versioningit configuration table :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type
def get_version( project_dir: str | Path = os.curdir, config: Optional[dict] = None, write: bool = False, fallback: bool = True, ) -> str: """ Determine the version for the project at ``project_dir``. If ``config`` is `None`, then ``project_dir`` must contain a :file:`pyproject.toml` file containing either a ``[tool.versioningit]`` table or a ``[tool.hatch.version]`` table with the ``source`` key set to ``"versioningit"``; if it does not, a `NotVersioningitError` is raised. If ``config`` is not `None`, then any :file:`pyproject.toml` file in ``project_dir`` will be ignored, and the configuration will be taken from ``config`` instead. See ":ref:`config_dict`". If ``write`` is true, then the file specified in the ``write`` subtable of the versioningit configuration, if any, will be updated. If ``fallback`` is true, then if ``project_dir`` is not under version control (or if the VCS executable is not installed), ``versioningit`` will assume that the directory is an unpacked sdist and will read the version from the :file:`PKG-INFO` file. :raises NotVCSError: if ``fallback`` is false and ``project_dir`` is not under version control :raises NotSdistError: if ``fallback`` is true, ``project_dir`` is not under version control, and there is no :file:`PKG-INFO` file in ``project_dir`` :raises NotVersioningitError: - if ``config`` is `None` and ``project_dir`` does not contain a :file:`pyproject.toml` file - if ``config`` is `None` and the :file:`pyproject.toml` file does not contain a versioningit configuration table :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type """ vgit = Versioningit.from_project_dir(project_dir, config) return vgit.get_version(write=write, fallback=fallback)
(project_dir: str | pathlib.Path = '.', config: Optional[dict] = None, write: bool = False, fallback: bool = True) -> str
8,178
versioningit.core
get_version_from_pkg_info
Return the :mailheader:`Version` field from the :file:`PKG-INFO` file in ``project_dir`` :raises NotSdistError: if there is no :file:`PKG-INFO` file :raises ValueError: if the :file:`PKG-INFO` file does not contain a :mailheader:`Version` field
def get_version_from_pkg_info(project_dir: str | Path) -> str: """ Return the :mailheader:`Version` field from the :file:`PKG-INFO` file in ``project_dir`` :raises NotSdistError: if there is no :file:`PKG-INFO` file :raises ValueError: if the :file:`PKG-INFO` file does not contain a :mailheader:`Version` field """ try: return parse_version_from_metadata( Path(project_dir, "PKG-INFO").read_text(encoding="utf-8") ) except FileNotFoundError: raise NotSdistError(f"{project_dir} does not contain a PKG-INFO file")
(project_dir: str | pathlib.Path) -> str
8,182
versioningit.core
run_onbuild
.. versionadded:: 1.1.0 Run the ``onbuild`` step for the given setuptools project. This function is intended to be used by custom setuptools command classes that are used in place of ``versioningit``'s command classes but still need to be able to run the ``onbuild`` step. The ``template_fields`` value can be obtained by passing a command class's ``distribution`` attribute to `get_template_fields_from_distribution()`; if this returns `None`, then we are building from an sdist, and `run_onbuild()` should not be called. If ``config`` is `None`, then ``project_dir`` must contain a :file:`pyproject.toml` file containing a ``[tool.versioningit]`` table; if it does not, a `NotVersioningitError` is raised. If ``config`` is not `None`, then any :file:`pyproject.toml` file in ``project_dir`` will be ignored, and the configuration will be taken from ``config`` instead; see ":ref:`config_dict`". .. versionchanged:: 2.0.0 ``version`` argument replaced with ``template_fields`` :param build_dir: The directory containing the in-progress build :param is_source: Set to `True` if building an sdist or other artifact that preserves source paths, `False` if building a wheel or other artifact that uses installation paths :param template_fields: A `dict` of fields to be used when templating :raises NotVersioningitError: - if ``config`` is `None` and ``project_dir`` does not contain a :file:`pyproject.toml` file - if the :file:`pyproject.toml` file does not contain a ``[tool.versioningit]`` table :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type
def run_onbuild( *, build_dir: str | Path, is_source: bool, template_fields: dict[str, Any], project_dir: str | Path = os.curdir, config: Optional[dict] = None, ) -> None: """ .. versionadded:: 1.1.0 Run the ``onbuild`` step for the given setuptools project. This function is intended to be used by custom setuptools command classes that are used in place of ``versioningit``'s command classes but still need to be able to run the ``onbuild`` step. The ``template_fields`` value can be obtained by passing a command class's ``distribution`` attribute to `get_template_fields_from_distribution()`; if this returns `None`, then we are building from an sdist, and `run_onbuild()` should not be called. If ``config`` is `None`, then ``project_dir`` must contain a :file:`pyproject.toml` file containing a ``[tool.versioningit]`` table; if it does not, a `NotVersioningitError` is raised. If ``config`` is not `None`, then any :file:`pyproject.toml` file in ``project_dir`` will be ignored, and the configuration will be taken from ``config`` instead; see ":ref:`config_dict`". .. versionchanged:: 2.0.0 ``version`` argument replaced with ``template_fields`` :param build_dir: The directory containing the in-progress build :param is_source: Set to `True` if building an sdist or other artifact that preserves source paths, `False` if building a wheel or other artifact that uses installation paths :param template_fields: A `dict` of fields to be used when templating :raises NotVersioningitError: - if ``config`` is `None` and ``project_dir`` does not contain a :file:`pyproject.toml` file - if the :file:`pyproject.toml` file does not contain a ``[tool.versioningit]`` table :raises ConfigError: if any of the values in ``config`` are not of the correct type :raises MethodError: if a method returns a value of the wrong type """ vgit = Versioningit.from_project_dir(project_dir, config) vgit.do_onbuild( file_provider=SetuptoolsFileProvider(build_dir=Path(build_dir)), is_source=is_source, template_fields=template_fields, )
(*, build_dir: str | pathlib.Path, is_source: bool, template_fields: dict[str, typing.Any], project_dir: str | pathlib.Path = '.', config: Optional[dict] = None) -> NoneType
8,184
sinlib.tokenizer
Tokenizer
null
class Tokenizer: def __init__(self): self.unknown_token = "<unk>" self.tokenized_chars = [] self.unique_chars = [] def __encode(self, text): processed_text = self.__process_text(text) encoded_text = [self.vocab_map.get(char, self.unknown_token_id) for char in processed_text] return encoded_text def __call__(self, text): return self.__encode(text) def decode(self, ids): return "".join([self.token_id_to_token_map.get(token,self.unknown_token) for token in ids]) def train(self, text_list): self.__train_chracter_level_tokenizer(text_list) def __len__(self): return len(self.vocab_map) @staticmethod def __process_text(t): return process_text(t) def __train_chracter_level_tokenizer(self, text_list): with concurrent.futures.ThreadPoolExecutor() as executor: results = list(executor.map(self.__process_text, text_list)) self.tokenized_chars = [char for sublist in results for char in sublist] self.unique_chars = set(self.tokenized_chars) self.vocab_map = dict(zip(self.unique_chars,range(len(self.unique_chars)))) self.vocab_map[self.unknown_token] = len(self.vocab_map) self.unknown_token_id = self.vocab_map[self.unknown_token] self.token_id_to_token_map = {value:key for key,value in self.vocab_map.items()}
()
8,185
sinlib.tokenizer
__encode
null
def __encode(self, text): processed_text = self.__process_text(text) encoded_text = [self.vocab_map.get(char, self.unknown_token_id) for char in processed_text] return encoded_text
(self, text)
8,186
sinlib.tokenizer
__process_text
null
@staticmethod def __process_text(t): return process_text(t)
(t)
8,187
sinlib.tokenizer
__train_chracter_level_tokenizer
null
def __train_chracter_level_tokenizer(self, text_list): with concurrent.futures.ThreadPoolExecutor() as executor: results = list(executor.map(self.__process_text, text_list)) self.tokenized_chars = [char for sublist in results for char in sublist] self.unique_chars = set(self.tokenized_chars) self.vocab_map = dict(zip(self.unique_chars,range(len(self.unique_chars)))) self.vocab_map[self.unknown_token] = len(self.vocab_map) self.unknown_token_id = self.vocab_map[self.unknown_token] self.token_id_to_token_map = {value:key for key,value in self.vocab_map.items()}
(self, text_list)
8,188
sinlib.tokenizer
__call__
null
def __call__(self, text): return self.__encode(text)
(self, text)
8,189
sinlib.tokenizer
__init__
null
def __init__(self): self.unknown_token = "<unk>" self.tokenized_chars = [] self.unique_chars = []
(self)
8,190
sinlib.tokenizer
__len__
null
def __len__(self): return len(self.vocab_map)
(self)
8,191
sinlib.tokenizer
decode
null
def decode(self, ids): return "".join([self.token_id_to_token_map.get(token,self.unknown_token) for token in ids])
(self, ids)
8,192
sinlib.tokenizer
train
null
def train(self, text_list): self.__train_chracter_level_tokenizer(text_list)
(self, text_list)
8,196
feapder.core.spiders.air_spider
AirSpider
null
class AirSpider(BaseParser, TailThread): __custom_setting__ = {} def __init__(self, thread_count=None): """ 基于内存队列的爬虫,不支持分布式 :param thread_count: 线程数 """ super(AirSpider, self).__init__() for key, value in self.__class__.__custom_setting__.items(): setattr(setting, key, value) if thread_count: setattr(setting, "SPIDER_THREAD_COUNT", thread_count) self._thread_count = setting.SPIDER_THREAD_COUNT self._memory_db = MemoryDB() self._parser_controls = [] self._item_buffer = ItemBuffer(redis_key=self.name) self._request_buffer = AirSpiderRequestBuffer( db=self._memory_db, dedup_name=self.name ) self._stop_spider = False metrics.init(**setting.METRICS_OTHER_ARGS) def distribute_task(self): for request in self.start_requests(): if not isinstance(request, Request): raise ValueError("仅支持 yield Request") request.parser_name = request.parser_name or self.name self._request_buffer.put_request(request, ignore_max_size=False) def all_thread_is_done(self): for i in range(3): # 降低偶然性, 因为各个环节不是并发的,很有可能当时状态为假,但检测下一条时该状态为真。一次检测很有可能遇到这种偶然性 # 检测 parser_control 状态 for parser_control in self._parser_controls: if not parser_control.is_not_task(): return False # 检测 任务队列 状态 if not self._memory_db.empty(): return False # 检测 item_buffer 状态 if ( self._item_buffer.get_items_count() > 0 or self._item_buffer.is_adding_to_db() ): return False tools.delay_time(1) return True def run(self): self.start_callback() for i in range(self._thread_count): parser_control = AirSpiderParserControl( memory_db=self._memory_db, request_buffer=self._request_buffer, item_buffer=self._item_buffer, ) parser_control.add_parser(self) parser_control.start() self._parser_controls.append(parser_control) self._item_buffer.start() self.distribute_task() while True: try: if self._stop_spider or self.all_thread_is_done(): # 停止 parser_controls for parser_control in self._parser_controls: parser_control.stop() # 关闭item_buffer self._item_buffer.stop() # 关闭webdirver Request.render_downloader and Request.render_downloader.close_all() if self._stop_spider: log.info("爬虫被终止") else: log.info("无任务,爬虫结束") break except Exception as e: log.exception(e) tools.delay_time(1) # 1秒钟检查一次爬虫状态 self.end_callback() # 为了线程可重复start self._started.clear() # 关闭打点 metrics.close() def join(self, timeout=None): """ 重写线程的join """ if not self._started.is_set(): return super().join() def stop_spider(self): self._stop_spider = True
(thread_count=None)
8,197
feapder.core.spiders.air_spider
__init__
基于内存队列的爬虫,不支持分布式 :param thread_count: 线程数
def __init__(self, thread_count=None): """ 基于内存队列的爬虫,不支持分布式 :param thread_count: 线程数 """ super(AirSpider, self).__init__() for key, value in self.__class__.__custom_setting__.items(): setattr(setting, key, value) if thread_count: setattr(setting, "SPIDER_THREAD_COUNT", thread_count) self._thread_count = setting.SPIDER_THREAD_COUNT self._memory_db = MemoryDB() self._parser_controls = [] self._item_buffer = ItemBuffer(redis_key=self.name) self._request_buffer = AirSpiderRequestBuffer( db=self._memory_db, dedup_name=self.name ) self._stop_spider = False metrics.init(**setting.METRICS_OTHER_ARGS)
(self, thread_count=None)
8,208
feapder.core.spiders.air_spider
all_thread_is_done
null
def all_thread_is_done(self): for i in range(3): # 降低偶然性, 因为各个环节不是并发的,很有可能当时状态为假,但检测下一条时该状态为真。一次检测很有可能遇到这种偶然性 # 检测 parser_control 状态 for parser_control in self._parser_controls: if not parser_control.is_not_task(): return False # 检测 任务队列 状态 if not self._memory_db.empty(): return False # 检测 item_buffer 状态 if ( self._item_buffer.get_items_count() > 0 or self._item_buffer.is_adding_to_db() ): return False tools.delay_time(1) return True
(self)
8,209
feapder.core.base_parser
close
null
def close(self): pass
(self)
8,210
feapder.core.spiders.air_spider
distribute_task
null
def distribute_task(self): for request in self.start_requests(): if not isinstance(request, Request): raise ValueError("仅支持 yield Request") request.parser_name = request.parser_name or self.name self._request_buffer.put_request(request, ignore_max_size=False)
(self)
8,211
feapder.core.base_parser
download_midware
@summary: 下载中间件 可修改请求的一些参数, 或可自定义下载,然后返回 request, response --------- @param request: --------- @result: return request / request, response
def download_midware(self, request: Request): """ @summary: 下载中间件 可修改请求的一些参数, 或可自定义下载,然后返回 request, response --------- @param request: --------- @result: return request / request, response """ pass
(self, request: feapder.network.request.Request)
8,212
feapder.core.base_parser
end_callback
@summary: 程序结束的回调 --------- --------- @result: None
def end_callback(self): """ @summary: 程序结束的回调 --------- --------- @result: None """ pass
(self)
8,213
feapder.core.base_parser
exception_request
@summary: 请求或者parser里解析出异常的request --------- @param request: @param response: @param e: 异常 --------- @result: request / callback / None (返回值必须可迭代)
def exception_request(self, request: Request, response: Response, e: Exception): """ @summary: 请求或者parser里解析出异常的request --------- @param request: @param response: @param e: 异常 --------- @result: request / callback / None (返回值必须可迭代) """ pass
(self, request: feapder.network.request.Request, response: feapder.network.response.Response, e: Exception)
8,214
feapder.core.base_parser
failed_request
@summary: 超过最大重试次数的request 可返回修改后的request 若不返回request,则将传进来的request直接人redis的failed表。否则将修改后的request入failed表 --------- @param request: @param response: @param e: 异常 --------- @result: request / item / callback / None (返回值必须可迭代)
def failed_request(self, request: Request, response: Response, e: Exception): """ @summary: 超过最大重试次数的request 可返回修改后的request 若不返回request,则将传进来的request直接人redis的failed表。否则将修改后的request入failed表 --------- @param request: @param response: @param e: 异常 --------- @result: request / item / callback / None (返回值必须可迭代) """ pass
(self, request: feapder.network.request.Request, response: feapder.network.response.Response, e: Exception)
8,218
feapder.core.spiders.air_spider
join
重写线程的join
def join(self, timeout=None): """ 重写线程的join """ if not self._started.is_set(): return super().join()
(self, timeout=None)
8,219
feapder.core.base_parser
parse
@summary: 默认的解析函数 --------- @param request: @param response: --------- @result:
def parse(self, request: Request, response: Response): """ @summary: 默认的解析函数 --------- @param request: @param response: --------- @result: """ pass
(self, request: feapder.network.request.Request, response: feapder.network.response.Response)
8,220
feapder.core.spiders.air_spider
run
null
def run(self): self.start_callback() for i in range(self._thread_count): parser_control = AirSpiderParserControl( memory_db=self._memory_db, request_buffer=self._request_buffer, item_buffer=self._item_buffer, ) parser_control.add_parser(self) parser_control.start() self._parser_controls.append(parser_control) self._item_buffer.start() self.distribute_task() while True: try: if self._stop_spider or self.all_thread_is_done(): # 停止 parser_controls for parser_control in self._parser_controls: parser_control.stop() # 关闭item_buffer self._item_buffer.stop() # 关闭webdirver Request.render_downloader and Request.render_downloader.close_all() if self._stop_spider: log.info("爬虫被终止") else: log.info("无任务,爬虫结束") break except Exception as e: log.exception(e) tools.delay_time(1) # 1秒钟检查一次爬虫状态 self.end_callback() # 为了线程可重复start self._started.clear() # 关闭打点 metrics.close()
(self)
8,223
feapder.utils.tail_thread
start
解决python3.12 RuntimeError: cannot join thread before it is started的报错
def start(self) -> None: """ 解决python3.12 RuntimeError: cannot join thread before it is started的报错 """ super().start() if sys.version_info.minor >= 12 and sys.version_info.major >= 3: for thread in threading.enumerate(): if ( thread.daemon or thread is threading.current_thread() or not thread.is_alive() ): continue thread.join()
(self) -> NoneType
8,224
feapder.core.base_parser
start_callback
@summary: 程序开始的回调 --------- --------- @result: None
def start_callback(self): """ @summary: 程序开始的回调 --------- --------- @result: None """ pass
(self)
8,225
feapder.core.base_parser
start_requests
@summary: 添加初始url --------- --------- @result: yield Request()
def start_requests(self): """ @summary: 添加初始url --------- --------- @result: yield Request() """ pass
(self)
8,226
feapder.core.spiders.air_spider
stop_spider
null
def stop_spider(self): self._stop_spider = True
(self)
8,227
feapder.core.base_parser
validate
@summary: 校验函数, 可用于校验response是否正确 若函数内抛出异常,则重试请求 若返回True 或 None,则进入解析函数 若返回False,则抛弃当前请求 可通过request.callback_name 区分不同的回调函数,编写不同的校验逻辑 --------- @param request: @param response: --------- @result: True / None / False
def validate(self, request: Request, response: Response): """ @summary: 校验函数, 可用于校验response是否正确 若函数内抛出异常,则重试请求 若返回True 或 None,则进入解析函数 若返回False,则抛弃当前请求 可通过request.callback_name 区分不同的回调函数,编写不同的校验逻辑 --------- @param request: @param response: --------- @result: True / None / False """ pass
(self, request: feapder.network.request.Request, response: feapder.network.response.Response)
8,228
feapder.utils.custom_argparse
ArgumentParser
null
class ArgumentParser(argparse.ArgumentParser): def __init__(self, *args, **kwargs): self.functions = {} super(ArgumentParser, self).__init__(*args, **kwargs) def add_argument(self, *args, **kwargs): function = kwargs.pop("function") if "function" in kwargs else None key = self._get_optional_kwargs(*args, **kwargs).get("dest") self.functions[key] = function return super(ArgumentParser, self).add_argument(*args, **kwargs) def start(self, args=None, namespace=None): args = self.parse_args(args=args, namespace=namespace) for key, value in vars(args).items(): # vars() 函数返回对象object的属性和属性值的字典对象 if value not in (None, False): if callable(self.functions[key]): if value != True: if isinstance(value, list) and len(value) == 1: value = value[0] self.functions[key](value) else: self.functions[key]() def run(self, args, values=None): if args in self.functions: if values: self.functions[args](values) else: self.functions[args]() else: raise Exception(f"无此方法: {args}")
(*args, **kwargs)
8,229
feapder.utils.custom_argparse
__init__
null
def __init__(self, *args, **kwargs): self.functions = {} super(ArgumentParser, self).__init__(*args, **kwargs)
(self, *args, **kwargs)
8,230
argparse
__repr__
null
def __repr__(self): type_name = type(self).__name__ arg_strings = [] star_args = {} for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): if name.isidentifier(): arg_strings.append('%s=%r' % (name, value)) else: star_args[name] = value if star_args: arg_strings.append('**%s' % repr(star_args)) return '%s(%s)' % (type_name, ', '.join(arg_strings))
(self)
8,231
argparse
_add_action
null
def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action
(self, action)
8,232
argparse
_add_container_actions
null
def _add_container_actions(self, container): # collect groups by titles title_group_map = {} for group in self._action_groups: if group.title in title_group_map: msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, conflict_handler=group.conflict_handler) # map the actions to their new group for action in group._group_actions: group_map[action] = title_group_map[group.title] # add container's mutually exclusive groups # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: mutex_group = self.add_mutually_exclusive_group( required=group.required) # map the actions to their new mutex group for action in group._group_actions: group_map[action] = mutex_group # add all actions to this container or their group for action in container._actions: group_map.get(action, self)._add_action(action)
(self, container)
8,233
argparse
_check_conflict
null
def _check_conflict(self, action): # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: if option_string in self._option_string_actions: confl_optional = self._option_string_actions[option_string] confl_optionals.append((option_string, confl_optional)) # resolve any conflicts if confl_optionals: conflict_handler = self._get_handler() conflict_handler(action, confl_optionals)
(self, action)
8,234
argparse
_check_value
null
def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: args = {'value': value, 'choices': ', '.join(map(repr, action.choices))} msg = _('invalid choice: %(value)r (choose from %(choices)s)') raise ArgumentError(action, msg % args)
(self, action, value)
8,235
argparse
_get_args
null
def _get_args(self): return []
(self)
8,236
argparse
_get_formatter
null
def _get_formatter(self): return self.formatter_class(prog=self.prog)
(self)
8,237
argparse
_get_handler
null
def _get_handler(self): # determine function from conflict handler string handler_func_name = '_handle_conflict_%s' % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: msg = _('invalid conflict_resolution value: %r') raise ValueError(msg % self.conflict_handler)
(self)
8,238
argparse
_get_kwargs
null
def _get_kwargs(self): names = [ 'prog', 'usage', 'description', 'formatter_class', 'conflict_handler', 'add_help', ] return [(name, getattr(self, name)) for name in names]
(self)
8,239
argparse
_get_nargs_pattern
null
def _get_nargs_pattern(self, action): # in all examples below, we have to allow for '--' args # which are represented as '-' in the pattern nargs = action.nargs # the default (None) is assumed to be a single argument if nargs is None: nargs_pattern = '(-*A-*)' # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' # allow any number of options or arguments elif nargs == REMAINDER: nargs_pattern = '([-AO]*)' # allow one argument followed by any number of options or arguments elif nargs == PARSER: nargs_pattern = '(-*A[-AO]*)' # suppress action, like nargs=0 elif nargs == SUPPRESS: nargs_pattern = '(-*-*)' # all others should be integers else: nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) # if this is an optional action, -- is not allowed if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') # return the pattern return nargs_pattern
(self, action)
8,240
argparse
_get_option_tuples
null
def _get_option_tuples(self, option_string): result = [] # option strings starting with two prefix characters are only # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: if self.allow_abbrev: if '=' in option_string: option_prefix, explicit_arg = option_string.split('=', 1) else: option_prefix = option_string explicit_arg = None for option_string in self._option_string_actions: if option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # single character options can be concatenated with their arguments # but multiple character options always have to have their argument # separate elif option_string[0] in chars and option_string[1] not in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] short_explicit_arg = option_string[2:] for option_string in self._option_string_actions: if option_string == short_option_prefix: action = self._option_string_actions[option_string] tup = action, option_string, short_explicit_arg result.append(tup) elif option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) # return the collected option tuples return result
(self, option_string)
8,241
argparse
_get_optional_actions
null
def _get_optional_actions(self): return [action for action in self._actions if action.option_strings]
(self)
8,242
argparse
_get_optional_kwargs
null
def _get_optional_kwargs(self, *args, **kwargs): # determine short and long option strings option_strings = [] long_option_strings = [] for option_string in args: # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: args = {'option': option_string, 'prefix_chars': self.prefix_chars} msg = _('invalid option string %(option)r: ' 'must start with a character %(prefix_chars)r') raise ValueError(msg % args) # strings starting with two prefix characters are long options option_strings.append(option_string) if len(option_string) > 1 and option_string[1] in self.prefix_chars: long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' dest = kwargs.pop('dest', None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] else: dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) if not dest: msg = _('dest= is required for options like %r') raise ValueError(msg % option_string) dest = dest.replace('-', '_') # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings)
(self, *args, **kwargs)
8,243
argparse
_get_positional_actions
null
def _get_positional_actions(self): return [action for action in self._actions if not action.option_strings]
(self)
8,244
argparse
_get_positional_kwargs
null
def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified if 'required' in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: kwargs['required'] = True if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: kwargs['required'] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[])
(self, dest, **kwargs)
8,245
argparse
_get_value
null
def _get_value(self, action, arg_string): type_func = self._registry_get('type', action.type, action.type) if not callable(type_func): msg = _('%r is not callable') raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type try: result = type_func(arg_string) # ArgumentTypeErrors indicate errors except ArgumentTypeError: name = getattr(action.type, '__name__', repr(action.type)) msg = str(_sys.exc_info()[1]) raise ArgumentError(action, msg) # TypeErrors or ValueErrors also indicate errors except (TypeError, ValueError): name = getattr(action.type, '__name__', repr(action.type)) args = {'type': name, 'value': arg_string} msg = _('invalid %(type)s value: %(value)r') raise ArgumentError(action, msg % args) # return the converted value return result
(self, action, arg_string)
8,246
argparse
_get_values
null
def _get_values(self, action, arg_strings): # for everything but PARSER, REMAINDER args, strip out first '--' if action.nargs not in [PARSER, REMAINDER]: try: arg_strings.remove('--') except ValueError: pass # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: if action.option_strings: value = action.const else: value = action.default if isinstance(value, str): value = self._get_value(action, value) self._check_value(action, value) # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None elif (not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings): if action.default is not None: value = action.default else: value = arg_strings self._check_value(action, value) # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) # REMAINDER arguments convert all values, checking none elif action.nargs == REMAINDER: value = [self._get_value(action, v) for v in arg_strings] # PARSER arguments convert all values, but check only the first elif action.nargs == PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) # SUPPRESS argument does not put anything in the namespace elif action.nargs == SUPPRESS: value = SUPPRESS # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: self._check_value(action, v) # return the converted value return value
(self, action, arg_strings)
8,247
argparse
_handle_conflict_error
null
def _handle_conflict_error(self, action, conflicting_actions): message = ngettext('conflicting option string: %s', 'conflicting option strings: %s', len(conflicting_actions)) conflict_string = ', '.join([option_string for option_string, action in conflicting_actions]) raise ArgumentError(action, message % conflict_string)
(self, action, conflicting_actions)
8,248
argparse
_handle_conflict_resolve
null
def _handle_conflict_resolve(self, action, conflicting_actions): # remove all conflicting options for option_string, action in conflicting_actions: # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) # if the option now has no option string, remove it from the # container holding it if not action.option_strings: action.container._remove_action(action)
(self, action, conflicting_actions)
8,249
argparse
_match_argument
null
def _match_argument(self, action, arg_strings_pattern): # match the pattern for this action to the arg strings nargs_pattern = self._get_nargs_pattern(action) match = _re.match(nargs_pattern, arg_strings_pattern) # raise an exception if we weren't able to find a match if match is None: nargs_errors = { None: _('expected one argument'), OPTIONAL: _('expected at most one argument'), ONE_OR_MORE: _('expected at least one argument'), } msg = nargs_errors.get(action.nargs) if msg is None: msg = ngettext('expected %s argument', 'expected %s arguments', action.nargs) % action.nargs raise ArgumentError(action, msg) # return the number of arguments matched return len(match.group(1))
(self, action, arg_strings_pattern)
8,250
argparse
_match_arguments_partial
null
def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the # final actions until we find a match result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] pattern = ''.join([self._get_nargs_pattern(action) for action in actions_slice]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) break # return the list of arg string counts return result
(self, actions, arg_strings_pattern)
8,251
argparse
_parse_known_args
null
def _parse_known_args(self, arg_strings, namespace): # replace arg strings that are file references if self.fromfile_prefix_chars is not None: arg_strings = self._read_args_from_files(arg_strings) # map all mutually exclusive arguments to the other arguments # they can't occur with action_conflicts = {} for mutex_group in self._mutually_exclusive_groups: group_actions = mutex_group._group_actions for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) conflicts.extend(group_actions[i + 1:]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, # an 'A' if there is an argument, or a '-' if there is a '--' option_string_indices = {} arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): # all args after -- are non-options if arg_string == '--': arg_string_pattern_parts.append('-') for arg_string in arg_strings_iter: arg_string_pattern_parts.append('A') # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: pattern = 'A' else: option_string_indices[i] = option_tuple pattern = 'O' arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern arg_strings_pattern = ''.join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = set() seen_non_default_actions = set() def take_action(action, argument_strings, option_string=None): seen_actions.add(action) argument_values = self._get_values(action, argument_strings) # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" if argument_values is not action.default: seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: msg = _('not allowed with argument %s') action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) # take the action if we didn't receive a SUPPRESS value # (e.g. from a default) if argument_values is not SUPPRESS: action(self, namespace, argument_values, option_string) # function to convert arg_strings into an optional action def consume_optional(start_index): # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple # identify additional optionals in the same arg string # (e.g. -xyz is the same as -x -y -z if no args are required) match_argument = self._match_argument action_tuples = [] while True: # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) return start_index + 1 # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: arg_count = match_argument(action, 'A') # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars if ( arg_count == 0 and option_string[1] not in chars and explicit_arg != '' ): action_tuples.append((action, [], option_string)) char = option_string[0] option_string = char + explicit_arg[0] new_explicit_arg = explicit_arg[1:] or None optionals_map = self._option_string_actions if option_string in optionals_map: action = optionals_map[option_string] explicit_arg = new_explicit_arg else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've # successfully matched the option; exit the loop elif arg_count == 1: stop = start_index + 1 args = [explicit_arg] action_tuples.append((action, args, option_string)) break # error if a double-dash option did not use the # explicit argument else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the # optional's string arguments with the following strings # if successful, exit the loop else: start = start_index + 1 selected_patterns = arg_strings_pattern[start:] arg_count = match_argument(action, selected_patterns) stop = start + arg_count args = arg_strings[start:stop] action_tuples.append((action, args, option_string)) break # add the Optional to the list and return the index at which # the Optional's string args stopped assert action_tuples for action, args, option_string in action_tuples: take_action(action, args, option_string) return stop # the list of Positionals left to be parsed; this is modified # by consume_positionals() positionals = self._get_positional_actions() # function to convert arg_strings into positional actions def consume_positionals(start_index): # match as many Positionals as possible match_partial = self._match_arguments_partial selected_pattern = arg_strings_pattern[start_index:] arg_counts = match_partial(positionals, selected_pattern) # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): args = arg_strings[start_index: start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped positionals[:] = positionals[len(arg_counts):] return start_index # consume Positionals and Optionals alternately, until we have # passed the last option string extras = [] start_index = 0 if option_string_indices: max_option_string_index = max(option_string_indices) else: max_option_string_index = -1 while start_index <= max_option_string_index: # consume any Positionals preceding the next option next_option_string_index = min([ index for index in option_string_indices if index >= start_index]) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) # only try to parse the next optional if we didn't consume # the option string during the positionals parsing if positionals_end_index > start_index: start_index = positionals_end_index continue else: start_index = positionals_end_index # if we consumed all the positionals we could and we're not # at the index of an option string, there were extra arguments if start_index not in option_string_indices: strings = arg_strings[start_index:next_option_string_index] extras.extend(strings) start_index = next_option_string_index # consume the next optional and any arguments for it start_index = consume_optional(start_index) # consume any positionals following the last Optional stop_index = consume_positionals(start_index) # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) # make sure all required actions were present and also convert # action defaults which were not given as arguments required_actions = [] for action in self._actions: if action not in seen_actions: if action.required: required_actions.append(_get_action_name(action)) else: # Convert action default now instead of doing it before # parsing arguments to avoid calling convert functions # twice (which may fail) if the argument was given, but # only if it was defined already in the namespace if (action.default is not None and isinstance(action.default, str) and hasattr(namespace, action.dest) and action.default is getattr(namespace, action.dest)): setattr(namespace, action.dest, self._get_value(action, action.default)) if required_actions: self.error(_('the following arguments are required: %s') % ', '.join(required_actions)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: for action in group._group_actions: if action in seen_non_default_actions: break # if no actions were used, report the error else: names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS] msg = _('one of the arguments %s is required') self.error(msg % ' '.join(names)) # return the updated namespace and the extra arguments return namespace, extras
(self, arg_strings, namespace)
8,252
argparse
_parse_optional
null
def _parse_optional(self, arg_string): # if it's an empty string, it was meant to be a positional if not arg_string: return None # if it doesn't start with a prefix, it was meant to be positional if not arg_string[0] in self.prefix_chars: return None # if the option string is present in the parser, return the action if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] return action, arg_string, None # if it's just a single character, it was meant to be positional if len(arg_string) == 1: return None # if the option string before the "=" is present, return the action if '=' in arg_string: option_string, explicit_arg = arg_string.split('=', 1) if option_string in self._option_string_actions: action = self._option_string_actions[option_string] return action, option_string, explicit_arg # search through all possible prefixes of the option string # and all actions in the parser for possible interpretations option_tuples = self._get_option_tuples(arg_string) # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: options = ', '.join([option_string for action, option_string, explicit_arg in option_tuples]) args = {'option': arg_string, 'matches': options} msg = _('ambiguous option: %(option)s could match %(matches)s') self.error(msg % args) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: option_tuple, = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative # number, it was meant to be positional # unless there are negative-number-like options if self._negative_number_matcher.match(arg_string): if not self._has_negative_number_optionals: return None # if it contains a space, it was meant to be a positional if ' ' in arg_string: return None # it was meant to be an optional but there is no such option # in this parser (though it might be a valid option in a subparser) return None, arg_string, None
(self, arg_string)
8,253
argparse
_pop_action_class
null
def _pop_action_class(self, kwargs, default=None): action = kwargs.pop('action', default) return self._registry_get('action', action, action)
(self, kwargs, default=None)
8,254
argparse
_print_message
null
def _print_message(self, message, file=None): if message: if file is None: file = _sys.stderr file.write(message)
(self, message, file=None)
8,255
argparse
_read_args_from_files
null
def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: with open(arg_string[1:]) as args_file: arg_strings = [] for arg_line in args_file.read().splitlines(): for arg in self.convert_arg_line_to_args(arg_line): arg_strings.append(arg) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) except OSError: err = _sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings
(self, arg_strings)
8,256
argparse
_registry_get
null
def _registry_get(self, registry_name, value, default=None): return self._registries[registry_name].get(value, default)
(self, registry_name, value, default=None)
8,257
argparse
_remove_action
null
def _remove_action(self, action): self._actions.remove(action)
(self, action)
8,258
feapder.utils.custom_argparse
add_argument
null
def add_argument(self, *args, **kwargs): function = kwargs.pop("function") if "function" in kwargs else None key = self._get_optional_kwargs(*args, **kwargs).get("dest") self.functions[key] = function return super(ArgumentParser, self).add_argument(*args, **kwargs)
(self, *args, **kwargs)
8,259
argparse
add_argument_group
null
def add_argument_group(self, *args, **kwargs): group = _ArgumentGroup(self, *args, **kwargs) self._action_groups.append(group) return group
(self, *args, **kwargs)
8,260
argparse
add_mutually_exclusive_group
null
def add_mutually_exclusive_group(self, **kwargs): group = _MutuallyExclusiveGroup(self, **kwargs) self._mutually_exclusive_groups.append(group) return group
(self, **kwargs)
8,261
argparse
add_subparsers
null
def add_subparsers(self, **kwargs): if self._subparsers is not None: self.error(_('cannot have multiple subparser arguments')) # add the parser class to the arguments if it's not present kwargs.setdefault('parser_class', type(self)) if 'title' in kwargs or 'description' in kwargs: title = _(kwargs.pop('title', 'subcommands')) description = _(kwargs.pop('description', None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kwargs.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(self.usage, positionals, groups, '') kwargs['prog'] = formatter.format_help().strip() # create the parsers action and add it to the positionals list parsers_class = self._pop_action_class(kwargs, 'parsers') action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) # return the created parsers action return action
(self, **kwargs)
8,262
argparse
convert_arg_line_to_args
null
def convert_arg_line_to_args(self, arg_line): return [arg_line]
(self, arg_line)
8,263
argparse
error
error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception.
def error(self, message): """error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(_sys.stderr) args = {'prog': self.prog, 'message': message} self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
(self, message)
8,264
argparse
exit
null
def exit(self, status=0, message=None): if message: self._print_message(message, _sys.stderr) _sys.exit(status)
(self, status=0, message=None)
8,265
argparse
format_help
null
def format_help(self): formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help()
(self)
8,266
argparse
format_usage
null
def format_usage(self): formatter = self._get_formatter() formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) return formatter.format_help()
(self)
8,267
argparse
get_default
null
def get_default(self, dest): for action in self._actions: if action.dest == dest and action.default is not None: return action.default return self._defaults.get(dest, None)
(self, dest)
8,268
argparse
parse_args
null
def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args
(self, args=None, namespace=None)
8,269
argparse
parse_intermixed_args
null
def parse_intermixed_args(self, args=None, namespace=None): args, argv = self.parse_known_intermixed_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args
(self, args=None, namespace=None)
8,270
argparse
parse_known_args
null
def parse_known_args(self, args=None, namespace=None): if args is None: # args default to the system args args = _sys.argv[1:] else: # make sure that args are mutable args = list(args) # default Namespace built from parser defaults if namespace is None: namespace = Namespace() # add any action defaults that aren't present for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) # parse the arguments and exit if there are any errors if self.exit_on_error: try: namespace, args = self._parse_known_args(args, namespace) except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) else: namespace, args = self._parse_known_args(args, namespace) if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) return namespace, args
(self, args=None, namespace=None)
8,271
argparse
parse_known_intermixed_args
null
def parse_known_intermixed_args(self, args=None, namespace=None): # returns a namespace and list of extras # # positional can be freely intermixed with optionals. optionals are # first parsed with all positional arguments deactivated. The 'extras' # are then parsed. If the parser definition is incompatible with the # intermixed assumptions (e.g. use of REMAINDER, subparsers) a # TypeError is raised. # # positionals are 'deactivated' by setting nargs and default to # SUPPRESS. This blocks the addition of that positional to the # namespace positionals = self._get_positional_actions() a = [action for action in positionals if action.nargs in [PARSER, REMAINDER]] if a: raise TypeError('parse_intermixed_args: positional arg' ' with nargs=%s'%a[0].nargs) if [action.dest for group in self._mutually_exclusive_groups for action in group._group_actions if action in positionals]: raise TypeError('parse_intermixed_args: positional in' ' mutuallyExclusiveGroup') try: save_usage = self.usage try: if self.usage is None: # capture the full usage for use in error messages self.usage = self.format_usage()[7:] for action in positionals: # deactivate positionals action.save_nargs = action.nargs # action.nargs = 0 action.nargs = SUPPRESS action.save_default = action.default action.default = SUPPRESS namespace, remaining_args = self.parse_known_args(args, namespace) for action in positionals: # remove the empty positional values from namespace if (hasattr(namespace, action.dest) and getattr(namespace, action.dest)==[]): from warnings import warn warn('Do not expect %s in %s' % (action.dest, namespace)) delattr(namespace, action.dest) finally: # restore nargs and usage before exiting for action in positionals: action.nargs = action.save_nargs action.default = action.save_default optionals = self._get_optional_actions() try: # parse positionals. optionals aren't normally required, but # they could be, so make sure they aren't. for action in optionals: action.save_required = action.required action.required = False for group in self._mutually_exclusive_groups: group.save_required = group.required group.required = False namespace, extras = self.parse_known_args(remaining_args, namespace) finally: # restore parser values before exiting for action in optionals: action.required = action.save_required for group in self._mutually_exclusive_groups: group.required = group.save_required finally: self.usage = save_usage return namespace, extras
(self, args=None, namespace=None)
8,272
argparse
print_help
null
def print_help(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_help(), file)
(self, file=None)
8,273
argparse
print_usage
null
def print_usage(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_usage(), file)
(self, file=None)
8,274
argparse
register
null
def register(self, registry_name, value, object): registry = self._registries.setdefault(registry_name, {}) registry[value] = object
(self, registry_name, value, object)
8,275
feapder.utils.custom_argparse
run
null
def run(self, args, values=None): if args in self.functions: if values: self.functions[args](values) else: self.functions[args]() else: raise Exception(f"无此方法: {args}")
(self, args, values=None)
8,276
argparse
set_defaults
null
def set_defaults(self, **kwargs): self._defaults.update(kwargs) # if these defaults match any existing arguments, replace # the previous default on the object with the new one for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest]
(self, **kwargs)
8,277
feapder.utils.custom_argparse
start
null
def start(self, args=None, namespace=None): args = self.parse_args(args=args, namespace=namespace) for key, value in vars(args).items(): # vars() 函数返回对象object的属性和属性值的字典对象 if value not in (None, False): if callable(self.functions[key]): if value != True: if isinstance(value, list) and len(value) == 1: value = value[0] self.functions[key](value) else: self.functions[key]()
(self, args=None, namespace=None)
8,278
feapder.core.base_parser
BaseParser
null
class BaseParser(object): def start_requests(self): """ @summary: 添加初始url --------- --------- @result: yield Request() """ pass def download_midware(self, request: Request): """ @summary: 下载中间件 可修改请求的一些参数, 或可自定义下载,然后返回 request, response --------- @param request: --------- @result: return request / request, response """ pass def validate(self, request: Request, response: Response): """ @summary: 校验函数, 可用于校验response是否正确 若函数内抛出异常,则重试请求 若返回True 或 None,则进入解析函数 若返回False,则抛弃当前请求 可通过request.callback_name 区分不同的回调函数,编写不同的校验逻辑 --------- @param request: @param response: --------- @result: True / None / False """ pass def parse(self, request: Request, response: Response): """ @summary: 默认的解析函数 --------- @param request: @param response: --------- @result: """ pass def exception_request(self, request: Request, response: Response, e: Exception): """ @summary: 请求或者parser里解析出异常的request --------- @param request: @param response: @param e: 异常 --------- @result: request / callback / None (返回值必须可迭代) """ pass def failed_request(self, request: Request, response: Response, e: Exception): """ @summary: 超过最大重试次数的request 可返回修改后的request 若不返回request,则将传进来的request直接人redis的failed表。否则将修改后的request入failed表 --------- @param request: @param response: @param e: 异常 --------- @result: request / item / callback / None (返回值必须可迭代) """ pass def start_callback(self): """ @summary: 程序开始的回调 --------- --------- @result: None """ pass def end_callback(self): """ @summary: 程序结束的回调 --------- --------- @result: None """ pass @property def name(self): return self.__class__.__name__ def close(self): pass
()
8,288
feapder.core.base_parser
BatchParser
@summary: 批次爬虫模版 ---------
class BatchParser(TaskParser): """ @summary: 批次爬虫模版 --------- """ def __init__( self, task_table, batch_record_table, task_state, date_format, mysqldb=None ): super(BatchParser, self).__init__( task_table=task_table, task_state=task_state, mysqldb=mysqldb ) self._batch_record_table = batch_record_table # mysql 中的批次记录表 self._date_format = date_format # 批次日期格式 @property def batch_date(self): """ @summary: 获取批次时间 --------- --------- @result: """ batch_date = os.environ.get("batch_date") if not batch_date: sql = 'select date_format(batch_date, "{date_format}") from {batch_record_table} order by id desc limit 1'.format( date_format=self._date_format.replace(":%M", ":%i"), batch_record_table=self._batch_record_table, ) batch_info = MysqlDB().find(sql) # (('2018-08-19'),) if batch_info: os.environ["batch_date"] = batch_date = batch_info[0][0] else: log.error("需先运行 start_monitor_task()") os._exit(137) # 使退出码为35072 方便爬虫管理器重启 return batch_date
(task_table, batch_record_table, task_state, date_format, mysqldb=None)
8,289
feapder.core.base_parser
__init__
null
def __init__( self, task_table, batch_record_table, task_state, date_format, mysqldb=None ): super(BatchParser, self).__init__( task_table=task_table, task_state=task_state, mysqldb=mysqldb ) self._batch_record_table = batch_record_table # mysql 中的批次记录表 self._date_format = date_format # 批次日期格式
(self, task_table, batch_record_table, task_state, date_format, mysqldb=None)
8,290
feapder.core.base_parser
add_task
@summary: 添加任务, 每次启动start_monitor 都会调用,且在init_task之前调用 --------- --------- @result:
def add_task(self): """ @summary: 添加任务, 每次启动start_monitor 都会调用,且在init_task之前调用 --------- --------- @result: """
(self)
8,298
feapder.core.base_parser
start_requests
@summary: --------- @param task: 任务信息 list --------- @result:
def start_requests(self, task: PerfectDict): """ @summary: --------- @param task: 任务信息 list --------- @result: """
(self, task: feapder.utils.perfect_dict.PerfectDict)
8,299
feapder.core.base_parser
update_task_state
@summary: 更新任务表中任务状态,做完每个任务时代码逻辑中要主动调用。可能会重写 调用方法为 yield lambda : self.update_task_state(task_id, state) --------- @param task_id: @param state: --------- @result:
def update_task_state(self, task_id, state=1, **kwargs): """ @summary: 更新任务表中任务状态,做完每个任务时代码逻辑中要主动调用。可能会重写 调用方法为 yield lambda : self.update_task_state(task_id, state) --------- @param task_id: @param state: --------- @result: """ kwargs["id"] = task_id kwargs[self._task_state] = state sql = tools.make_update_sql( self._task_table, kwargs, condition="id = {task_id}".format(task_id=task_id) ) if self._mysqldb.update(sql): log.debug("置任务%s状态成功" % task_id) else: log.error("置任务%s状态失败 sql=%s" % (task_id, sql))
(self, task_id, state=1, **kwargs)
8,300
feapder.core.base_parser
update_task_batch
批量更新任务 多处调用,更新的字段必须一致 注意:需要 写成 yield update_task_batch(...) 否则不会更新 @param task_id: @param state: @param kwargs: @return:
def update_task_batch(self, task_id, state=1, **kwargs): """ 批量更新任务 多处调用,更新的字段必须一致 注意:需要 写成 yield update_task_batch(...) 否则不会更新 @param task_id: @param state: @param kwargs: @return: """ kwargs["id"] = task_id kwargs[self._task_state] = state update_item = UpdateItem(**kwargs) update_item.table_name = self._task_table update_item.name_underline = self._task_table + "_item" return update_item
(self, task_id, state=1, **kwargs)
8,303
feapder.core.spiders.batch_spider
BatchSpider
null
class BatchSpider(BatchParser, Scheduler): def __init__( self, task_table, batch_record_table, batch_name, batch_interval, task_keys, task_state="state", min_task_count=10000, check_task_interval=5, task_limit=10000, related_redis_key=None, related_batch_record=None, task_condition="", task_order_by="", redis_key=None, thread_count=None, begin_callback=None, end_callback=None, delete_keys=(), keep_alive=None, auto_start_next_batch=True, **kwargs, ): """ @summary: 批次爬虫 必要条件 1、需有任务表 任务表中必须有id 及 任务状态字段 如 state。如指定parser_name字段,则任务会自动下发到对应的parser下, 否则会下发到所有的parser下。其他字段可根据爬虫需要的参数自行扩充 参考建表语句如下: CREATE TABLE `table_name` ( `id` int(11) NOT NULL AUTO_INCREMENT, `param` varchar(1000) DEFAULT NULL COMMENT '爬虫需要的抓取数据需要的参数', `state` int(11) DEFAULT NULL COMMENT '任务状态', `parser_name` varchar(255) DEFAULT NULL COMMENT '任务解析器的脚本类名', PRIMARY KEY (`id`), UNIQUE KEY `nui` (`param`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; 2、需有批次记录表 不存在自动创建 --------- @param task_table: mysql中的任务表 @param batch_record_table: mysql 中的批次记录表 @param batch_name: 批次采集程序名称 @param batch_interval: 批次间隔 天为单位。 如想一小时一批次,可写成1/24 @param task_keys: 需要获取的任务字段 列表 [] 如需指定解析的parser,则需将parser_name字段取出来。 @param task_state: mysql中任务表的任务状态字段 @param min_task_count: redis 中最少任务数, 少于这个数量会从mysql的任务表取任务 @param check_task_interval: 检查是否还有任务的时间间隔; @param task_limit: 从数据库中取任务的数量 @param redis_key: 任务等数据存放在redis中的key前缀 @param thread_count: 线程数,默认为配置文件中的线程数 @param begin_callback: 爬虫开始回调函数 @param end_callback: 爬虫结束回调函数 @param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬 @param keep_alive: 爬虫是否常驻,默认否 @param auto_start_next_batch: 本批次结束后,且下一批次时间已到达时,是否自动启动下一批次,默认是 @param related_redis_key: 有关联的其他爬虫任务表(redis)注意:要避免环路 如 A -> B & B -> A 。 @param related_batch_record: 有关联的其他爬虫批次表(mysql)注意:要避免环路 如 A -> B & B -> A 。 related_redis_key 与 related_batch_record 选其一配置即可;用于相关联的爬虫没结束时,本爬虫也不结束 若相关连的爬虫为批次爬虫,推荐以related_batch_record配置, 若相关连的爬虫为普通爬虫,无批次表,可以以related_redis_key配置 @param task_condition: 任务条件 用于从一个大任务表中挑选出数据自己爬虫的任务,即where后的条件语句 @param task_order_by: 取任务时的排序条件 如 id desc --------- @result: """ Scheduler.__init__( self, redis_key=redis_key, thread_count=thread_count, begin_callback=begin_callback, end_callback=end_callback, delete_keys=delete_keys, keep_alive=keep_alive, auto_start_requests=False, batch_interval=batch_interval, task_table=task_table, **kwargs, ) self._redisdb = RedisDB() self._mysqldb = MysqlDB() self._task_table = task_table # mysql中的任务表 self._batch_record_table = batch_record_table # mysql 中的批次记录表 self._batch_name = batch_name # 批次采集程序名称 self._task_keys = task_keys # 需要获取的任务字段 self._task_state = task_state # mysql中任务表的state字段名 self._min_task_count = min_task_count # redis 中最少任务数 self._check_task_interval = check_task_interval self._task_limit = task_limit # mysql中一次取的任务数量 self._related_task_tables = [ setting.TAB_REQUESTS.format(redis_key=redis_key) ] # 自己的task表也需要检查是否有任务 if related_redis_key: self._related_task_tables.append( setting.TAB_REQUESTS.format(redis_key=related_redis_key) ) self._related_batch_record = related_batch_record self._task_condition = task_condition self._task_condition_prefix_and = task_condition and " and {}".format( task_condition ) self._task_condition_prefix_where = task_condition and " where {}".format( task_condition ) self._task_order_by = task_order_by and " order by {}".format(task_order_by) self._auto_start_next_batch = auto_start_next_batch self._batch_date_cache = None if self._batch_interval >= 1: self._date_format = "%Y-%m-%d" elif self._batch_interval < 1 and self._batch_interval >= 1 / 24: self._date_format = "%Y-%m-%d %H" else: self._date_format = "%Y-%m-%d %H:%M" self._is_more_parsers = True # 多模版类爬虫 # 初始化每个配置的属性 self._spider_last_done_time = None # 爬虫最近已做任务数量时间 self._spider_last_done_count = None # 爬虫最近已做任务数量 self._spider_deal_speed_cached = None self._batch_timeout = False # 批次是否超时或将要超时 # 重置任务 self.reset_task() def init_batch_property(self): """ 每个批次开始时需要重置的属性 @return: """ self._spider_deal_speed_cached = None self._spider_last_done_time = None self._spider_last_done_count = None # 爬虫刚开始启动时已做任务数量 self._batch_timeout = False def add_parser(self, parser, **kwargs): parser = parser( self._task_table, self._batch_record_table, self._task_state, self._date_format, self._mysqldb, **kwargs, ) # parser 实例化 self._parsers.append(parser) def start_monitor_task(self): """ @summary: 监控任务状态 --------- --------- @result: """ if not self._parsers: # 不是多模版模式, 将自己注入到parsers,自己为模版 self._is_more_parsers = False self._parsers.append(self) elif len(self._parsers) <= 1: self._is_more_parsers = False self.create_batch_record_table() # 添加任务 for parser in self._parsers: parser.add_task() is_first_check = True while True: try: if self.check_batch(is_first_check): # 该批次已经做完 if self._keep_alive: is_first_check = True log.info("爬虫所有任务已做完,不自动结束,等待新任务...") time.sleep(self._check_task_interval) continue else: break is_first_check = False # 检查redis中是否有任务 任务小于_min_task_count 则从mysql中取 tab_requests = setting.TAB_REQUESTS.format(redis_key=self._redis_key) todo_task_count = self._redisdb.zget_count(tab_requests) tasks = [] if todo_task_count < self._min_task_count: # 从mysql中取任务 # 更新batch表的任务状态数量 self.update_task_done_count() log.info("redis 中剩余任务%s 数量过小 从mysql中取任务追加" % todo_task_count) tasks = self.get_todo_task_from_mysql() if not tasks: # 状态为0的任务已经做完,需要检查状态为2的任务是否丢失 if ( todo_task_count == 0 ): # redis 中无待做任务,此时mysql中状态为2的任务为丢失任务。需重新做 lose_task_count = self.get_lose_task_count() if not lose_task_count: time.sleep(self._check_task_interval) continue elif ( lose_task_count > self._task_limit * 5 ): # 丢失任务太多,直接重置,否则每次等redis任务消耗完再取下一批丢失任务,速度过慢 log.info("正在重置丢失任务为待做 共 {} 条".format(lose_task_count)) # 重置正在做的任务为待做 if self.reset_lose_task_from_mysql(): log.info("重置丢失任务成功") else: log.info("重置丢失任务失败") continue else: # 丢失任务少,直接取 log.info( "正在取丢失任务 共 {} 条, 取 {} 条".format( lose_task_count, self._task_limit if self._task_limit <= lose_task_count else lose_task_count, ) ) tasks = self.get_doing_task_from_mysql() else: log.info("mysql 中取到待做任务 %s 条" % len(tasks)) else: log.info("redis 中尚有%s条积压任务,暂时不派发新任务" % todo_task_count) if not tasks: if todo_task_count >= self._min_task_count: # log.info('任务正在进行 redis中剩余任务 %s' % todo_task_count) pass else: log.info("mysql 中无待做任务 redis中剩余任务 %s" % todo_task_count) else: # make start requests self.distribute_task(tasks) log.info("添加任务到redis成功") except Exception as e: log.exception(e) time.sleep(self._check_task_interval) def create_batch_record_table(self): sql = ( "select table_name from information_schema.tables where table_name like '%s'" % self._batch_record_table ) tables_name = self._mysqldb.find(sql) if not tables_name: sql = """ CREATE TABLE `{table_name}` ( `id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT, `batch_date` {batch_date} DEFAULT NULL COMMENT '批次时间', `total_count` int(11) DEFAULT NULL COMMENT '任务总数', `done_count` int(11) DEFAULT NULL COMMENT '完成数 (1,-1)', `fail_count` int(11) DEFAULT NULL COMMENT '失败任务数 (-1)', `interval` float(11) DEFAULT NULL COMMENT '批次间隔', `interval_unit` varchar(20) DEFAULT NULL COMMENT '批次间隔单位 day, hour', `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '批次开始时间', `update_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '本条记录更新时间', `is_done` int(11) DEFAULT '0' COMMENT '批次是否完成 0 未完成 1 完成', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; """.format( table_name=self._batch_record_table, batch_date="datetime", ) self._mysqldb.execute(sql) def distribute_task(self, tasks): """ @summary: 分发任务 --------- @param tasks: --------- @result: """ if self._is_more_parsers: # 为多模版类爬虫,需要下发指定的parser for task in tasks: for parser in self._parsers: # 寻找task对应的parser if parser.name in task: task = PerfectDict( _dict=dict(zip(self._task_keys, task)), _values=list(task) ) requests = parser.start_requests(task) if requests and not isinstance(requests, Iterable): raise Exception( "%s.%s返回值必须可迭代" % (parser.name, "start_requests") ) result_type = 1 for request in requests or []: if isinstance(request, Request): request.parser_name = request.parser_name or parser.name self._request_buffer.put_request(request) result_type = 1 elif isinstance(request, Item): self._item_buffer.put_item(request) result_type = 2 if ( self._item_buffer.get_items_count() >= setting.ITEM_MAX_CACHED_COUNT ): self._item_buffer.flush() elif callable(request): # callbale的request可能是更新数据库操作的函数 if result_type == 1: self._request_buffer.put_request(request) else: self._item_buffer.put_item(request) if ( self._item_buffer.get_items_count() >= setting.ITEM_MAX_CACHED_COUNT ): self._item_buffer.flush() else: raise TypeError( "start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format( type(requests) ) ) break else: # task没对应的parser 则将task下发到所有的parser for task in tasks: for parser in self._parsers: task = PerfectDict( _dict=dict(zip(self._task_keys, task)), _values=list(task) ) requests = parser.start_requests(task) if requests and not isinstance(requests, Iterable): raise Exception( "%s.%s返回值必须可迭代" % (parser.name, "start_requests") ) result_type = 1 for request in requests or []: if isinstance(request, Request): request.parser_name = request.parser_name or parser.name self._request_buffer.put_request(request) result_type = 1 elif isinstance(request, Item): self._item_buffer.put_item(request) result_type = 2 if ( self._item_buffer.get_items_count() >= setting.ITEM_MAX_CACHED_COUNT ): self._item_buffer.flush() elif callable(request): # callbale的request可能是更新数据库操作的函数 if result_type == 1: self._request_buffer.put_request(request) else: self._item_buffer.put_item(request) if ( self._item_buffer.get_items_count() >= setting.ITEM_MAX_CACHED_COUNT ): self._item_buffer.flush() self._request_buffer.flush() self._item_buffer.flush() def __get_task_state_count(self): sql = "select {state}, count(1) from {task_table}{task_condition} group by {state}".format( state=self._task_state, task_table=self._task_table, task_condition=self._task_condition_prefix_where, ) task_state_count = self._mysqldb.find(sql) task_state = { "total_count": sum(count for state, count in task_state_count), "done_count": sum( count for state, count in task_state_count if state in (1, -1) ), "failed_count": sum( count for state, count in task_state_count if state == -1 ), } return task_state def update_task_done_count(self): """ @summary: 更新批次表中的任务状态 --------- --------- @result: """ task_count = self.__get_task_state_count() # log.info('《%s》 批次进度 %s/%s' % (self._batch_name, done_task_count, total_task_count)) # 更新批次表 sql = "update {} set done_count = {}, total_count = {}, fail_count = {}, update_time = CURRENT_TIME, is_done=0, `interval` = {}, interval_unit = '{}' where batch_date = '{}'".format( self._batch_record_table, task_count.get("done_count"), task_count.get("total_count"), task_count.get("failed_count"), self._batch_interval if self._batch_interval >= 1 else self._batch_interval * 24, "day" if self._batch_interval >= 1 else "hour", self.batch_date, ) self._mysqldb.update(sql) def update_is_done(self): sql = "update {} set is_done = 1, update_time = CURRENT_TIME where batch_date = '{}' and is_done = 0".format( self._batch_record_table, self.batch_date ) self._mysqldb.update(sql) def get_todo_task_from_mysql(self): """ @summary: 取待做的任务 --------- --------- @result: """ # TODO 分批取数据 每批最大取 1000000个,防止内存占用过大 # 查询任务 task_keys = ", ".join([f"`{key}`" for key in self._task_keys]) sql = "select %s from %s where %s = 0%s%s limit %s" % ( task_keys, self._task_table, self._task_state, self._task_condition_prefix_and, self._task_order_by, self._task_limit, ) tasks = self._mysqldb.find(sql) if tasks: # 更新任务状态 for i in range(0, len(tasks), 10000): # 10000 一批量更新 task_ids = str( tuple([task[0] for task in tasks[i : i + 10000]]) ).replace(",)", ")") sql = "update %s set %s = 2 where id in %s" % ( self._task_table, self._task_state, task_ids, ) self._mysqldb.update(sql) return tasks def get_doing_task_from_mysql(self): """ @summary: 取正在做的任务 --------- --------- @result: """ # 查询任务 task_keys = ", ".join([f"`{key}`" for key in self._task_keys]) sql = "select %s from %s where %s = 2%s%s limit %s" % ( task_keys, self._task_table, self._task_state, self._task_condition_prefix_and, self._task_order_by, self._task_limit, ) tasks = self._mysqldb.find(sql) return tasks def get_lose_task_count(self): sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count from {batch_record_table} order by id desc limit 1'.format( date_format=self._date_format.replace(":%M", ":%i"), batch_record_table=self._batch_record_table, ) batch_info = self._mysqldb.find(sql) # (('2018-08-19', 49686, 0),) batch_date, total_count, done_count = batch_info[0] return total_count - done_count def reset_lose_task_from_mysql(self): """ @summary: 重置丢失任务为待做 --------- --------- @result: """ sql = "update {table} set {state} = 0 where {state} = 2{task_condition}".format( table=self._task_table, state=self._task_state, task_condition=self._task_condition_prefix_and, ) return self._mysqldb.update(sql) def get_deal_speed(self, total_count, done_count, last_batch_date): """ 获取处理速度 @param total_count: 总数量 @param done_count: 做完数量 @param last_batch_date: 批次时间 datetime @return: deal_speed (条/小时), need_time (秒), overflow_time(秒) ( overflow_time < 0 时表示提前多少秒完成 ) 或 None """ now_date = datetime.datetime.now() if self._spider_last_done_count is None: self._spider_last_done_count = done_count self._spider_last_done_time = now_date elif done_count > self._spider_last_done_count: time_interval = (now_date - self._spider_last_done_time).total_seconds() deal_speed = ( done_count - self._spider_last_done_count ) / time_interval # 条/秒 need_time = (total_count - done_count) / deal_speed # 单位秒 overflow_time = ( (now_date - last_batch_date).total_seconds() + need_time - datetime.timedelta(days=self._batch_interval).total_seconds() ) # 溢出时间 秒 calculate_speed_time = now_date.strftime("%Y-%m-%d %H:%M:%S") # 统计速度时间 deal_speed = int(deal_speed * 3600) # 条/小时 # 更新最近已做任务数及时间 self._spider_last_done_count = done_count self._spider_last_done_time = now_date self._spider_deal_speed_cached = ( deal_speed, need_time, overflow_time, calculate_speed_time, ) return self._spider_deal_speed_cached def init_task(self): """ @summary: 初始化任务表中的任务, 新一个批次开始时调用。 可能会重写 --------- --------- @result: """ sql = "update {task_table} set {state} = 0 where {state} != -1{task_condition}".format( task_table=self._task_table, state=self._task_state, task_condition=self._task_condition_prefix_and, ) return self._mysqldb.update(sql) def check_batch(self, is_first_check=False): """ @summary: 检查批次是否完成 --------- @param: is_first_check 是否为首次检查,若首次检查,且检查结果为批次已完成,则不发送批次完成消息。因为之前发送过了 --------- @result: 完成返回True 否则False """ sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count, is_done from {batch_record_table} order by id desc limit 1'.format( date_format=self._date_format.replace(":%M", ":%i"), batch_record_table=self._batch_record_table, ) batch_info = self._mysqldb.find(sql) # (('批次时间', 总量, 完成量, 批次是否完成),) if batch_info: batch_date, total_count, done_count, is_done = batch_info[0] now_date = datetime.datetime.now() last_batch_date = datetime.datetime.strptime(batch_date, self._date_format) time_difference = now_date - last_batch_date if total_count == done_count and time_difference < datetime.timedelta( days=self._batch_interval ): # 若在本批次内,再次检查任务表是否有新增任务 # # 改成查询任务表 看是否真的没任务了,因为batch_record表里边的数量可能没来得及更新 task_count = self.__get_task_state_count() total_count = task_count.get("total_count") done_count = task_count.get("done_count") if total_count == done_count: if not is_done: # 检查相关联的爬虫是否完成 related_spider_is_done = self.related_spider_is_done() if related_spider_is_done is False: msg = "《{}》本批次未完成, 正在等待依赖爬虫 {} 结束. 批次时间 {} 批次进度 {}/{}".format( self._batch_name, self._related_batch_record or self._related_task_tables, batch_date, done_count, total_count, ) log.info(msg) # 检查是否超时 超时发出报警 if time_difference >= datetime.timedelta( days=self._batch_interval ): # 已经超时 self.send_msg( msg, level="error", message_prefix="《{}》本批次未完成, 正在等待依赖爬虫 {} 结束".format( self._batch_name, self._related_batch_record or self._related_task_tables, ), ) self._batch_timeout = True return False else: self.update_is_done() msg = "《{}》本批次完成 批次时间 {} 共处理 {} 条任务".format( self._batch_name, batch_date, done_count ) log.info(msg) if not is_first_check: if self._batch_timeout: # 之前报警过已超时,现在已完成,发出恢复消息 self._batch_timeout = False self.send_msg(msg, level="error") else: self.send_msg(msg) # 判断下一批次是否到 if time_difference >= datetime.timedelta(days=self._batch_interval): if not is_first_check and not self._auto_start_next_batch: return True # 下一批次不开始。因为设置了不自动开始下一批次 msg = "《{}》下一批次开始".format(self._batch_name) log.info(msg) self.send_msg(msg) # 初始化任务表状态 if self.init_task() != False: # 更新失败返回False 其他返回True/None # 初始化属性 self.init_batch_property() is_success = ( self.record_batch() ) # 有可能插入不成功,但是任务表已经重置了,不过由于当前时间为下一批次的时间,检查批次是否结束时不会检查任务表,所以下次执行时仍然会重置 if is_success: # 看是否有等待任务的worker,若有则需要等会再下发任务,防止work批次时间没来得及更新 if self.have_alive_spider(): log.info( f"插入新批次记录成功,检测到有爬虫进程在等待任务,本批任务1分钟后开始下发, 防止爬虫端缓存的批次时间没来得及更新" ) tools.delay_time(60) else: log.info("插入新批次记录成功") return False # 下一批次开始 else: return True # 下一批次不开始。先不派发任务,因为批次表新批次插入失败了,需要插入成功后再派发任务 else: log.info("《{}》下次批次时间未到".format(self._batch_name)) if not is_first_check: self.send_msg("《{}》下次批次时间未到".format(self._batch_name)) return True else: if time_difference >= datetime.timedelta( days=self._batch_interval ): # 已经超时 time_out = time_difference - datetime.timedelta( days=self._batch_interval ) time_out_pretty = tools.format_seconds(time_out.total_seconds()) msg = "《{}》本批次已超时{} 批次时间 {}, 批次进度 {}/{}".format( self._batch_name, time_out_pretty, batch_date, done_count, total_count, ) if self._batch_interval >= 1: msg += ", 期望时间{}天".format(self._batch_interval) else: msg += ", 期望时间{}小时".format(self._batch_interval * 24) result = self.get_deal_speed( total_count=total_count, done_count=done_count, last_batch_date=last_batch_date, ) if result: ( deal_speed, need_time, overflow_time, calculate_speed_time, ) = result msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format( calculate_speed_time, deal_speed, tools.format_seconds(need_time), ) if overflow_time > 0: msg += ", 该批次预计总超时 {}, 请及时处理".format( tools.format_seconds(overflow_time) ) log.info(msg) self.send_msg( msg, level="error", message_prefix="《{}》批次超时".format(self._batch_name), ) self._batch_timeout = True else: # 未超时 remaining_time = ( datetime.timedelta(days=self._batch_interval) - time_difference ) remaining_time_pretty = tools.format_seconds( remaining_time.total_seconds() ) if self._batch_interval >= 1: msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}天, 剩余{}".format( self._batch_name, batch_date, done_count, total_count, self._batch_interval, remaining_time_pretty, ) else: msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}小时, 剩余{}".format( self._batch_name, batch_date, done_count, total_count, self._batch_interval * 24, remaining_time_pretty, ) result = self.get_deal_speed( total_count=total_count, done_count=done_count, last_batch_date=last_batch_date, ) if result: ( deal_speed, need_time, overflow_time, calculate_speed_time, ) = result msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format( calculate_speed_time, deal_speed, tools.format_seconds(need_time), ) if overflow_time > 0: msg += ", 该批次可能会超时 {}, 请及时处理".format( tools.format_seconds(overflow_time) ) # 发送警报 self.send_msg( msg, level="error", message_prefix="《{}》批次可能超时".format(self._batch_name), ) self._batch_timeout = True elif overflow_time < 0: msg += ", 该批次预计提前 {} 完成".format( tools.format_seconds(-overflow_time) ) log.info(msg) else: # 插入batch_date self.record_batch() # 初始化任务表状态 可能有产生任务的代码 self.init_task() return False def related_spider_is_done(self): """ 相关连的爬虫是否跑完 @return: True / False / None 表示无相关的爬虫 可由自身的total_count 和 done_count 来判断 """ for related_redis_task_table in self._related_task_tables: if self._redisdb.exists_key(related_redis_task_table): return False if self._related_batch_record: sql = "select is_done from {} order by id desc limit 1".format( self._related_batch_record ) is_done = self._mysqldb.find(sql) is_done = is_done[0][0] if is_done else None if is_done is None: log.warning("相关联的批次表不存在或无批次信息") return True if not is_done: return False return True def record_batch(self): """ @summary: 记录批次信息(初始化) --------- --------- @result: """ # 查询总任务数 sql = "select count(1) from %s%s" % ( self._task_table, self._task_condition_prefix_where, ) total_task_count = self._mysqldb.find(sql)[0][0] batch_date = tools.get_current_date(self._date_format) sql = "insert into %s (batch_date, done_count, total_count, `interval`, interval_unit, create_time) values ('%s', %s, %s, %s, '%s', CURRENT_TIME)" % ( self._batch_record_table, batch_date, 0, total_task_count, self._batch_interval if self._batch_interval >= 1 else self._batch_interval * 24, "day" if self._batch_interval >= 1 else "hour", ) affect_count = self._mysqldb.add(sql) # None / 0 / 1 (1 为成功) if affect_count: # 重置批次日期 self._batch_date_cache = batch_date # 重新刷下self.batch_date 中的 os.environ.get('batch_date') 否则日期还停留在上一个批次 os.environ["batch_date"] = self._batch_date_cache # 爬虫开始 self.spider_begin() else: log.error("插入新批次失败") return affect_count # -------- 批次结束逻辑 ------------ def task_is_done(self): """ @summary: 检查任务状态 是否做完 同时更新批次时间 (不能挂 挂了批次时间就不更新了) --------- --------- @result: True / False (做完 / 未做完) """ is_done = False # 查看批次记录表任务状态 sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count, is_done from {batch_record_table} order by id desc limit 1'.format( date_format=self._date_format.replace(":%M", ":%i"), batch_record_table=self._batch_record_table, ) batch_info = self._mysqldb.find(sql) if batch_info is None: raise Exception("查询批次信息失败") if batch_info: self._batch_date_cache, total_count, done_count, is_done = batch_info[ 0 ] # 更新self._batch_date_cache, 防止新批次已经开始了,但self._batch_date_cache还是原来的批次时间 log.info( "《%s》 批次时间%s 批次进度 %s/%s 完成状态 %d" % ( self._batch_name, self._batch_date_cache, done_count, total_count, is_done, ) ) os.environ["batch_date"] = self._batch_date_cache # 更新BatchParser里边的批次时间 if is_done: # 检查任务表中是否有没做的任务 若有则is_done 为 False # 比较耗时 加锁防止多进程同时查询 with RedisLock(key=self._spider_name) as lock: if lock.locked: log.info("批次表标记已完成,正在检查任务表是否有未完成的任务") sql = "select 1 from %s where (%s = 0 or %s=2)%s limit 1" % ( self._task_table, self._task_state, self._task_state, self._task_condition_prefix_and, ) tasks = self._mysqldb.find(sql) # [(1,)] / [] if tasks: log.info("检测到任务表中有未完成任务,等待任务下发") is_done = False # 更新batch_record 表的is_done 状态,减少查询任务表的次数 sql = 'update {batch_record_table} set is_done = 0 where batch_date = "{batch_date}"'.format( batch_record_table=self._batch_record_table, batch_date=self._batch_date_cache, ) self._mysqldb.update(sql) else: log.info("任务表中任务均已完成,爬虫结束") else: log.info("批次表标记已完成,其他爬虫进程正在检查任务表是否有未完成的任务,本进程跳过检查,继续等待") is_done = False return is_done def run(self): """ @summary: 重写run方法 检查mysql中的任务是否做完, 做完停止 --------- --------- @result: """ try: self.create_batch_record_table() if not self._parsers: # 不是add_parser 模式 self._parsers.append(self) self._start() while True: try: if self._stop_spider or ( self.task_is_done() and self.all_thread_is_done() ): # redis全部的任务已经做完 并且mysql中的任务已经做完(检查各个线程all_thread_is_done,防止任务没做完,就更新任务状态,导致程序结束的情况) if not self._is_notify_end: self.spider_end() self._is_notify_end = True if not self._keep_alive: self._stop_all_thread() break else: self._is_notify_end = False self.check_task_status() except Exception as e: log.exception(e) tools.delay_time(10) # 10秒钟检查一次爬虫状态 except Exception as e: msg = "《%s》主线程异常 爬虫结束 exception: %s" % (self._batch_name, e) log.error(msg) self.send_msg( msg, level="error", message_prefix="《%s》爬虫异常结束".format(self._batch_name) ) os._exit(137) # 使退出码为35072 方便爬虫管理器重启 @classmethod def to_DebugBatchSpider(cls, *args, **kwargs): # DebugBatchSpider 继承 cls DebugBatchSpider.__bases__ = (cls,) DebugBatchSpider.__name__ = cls.__name__ return DebugBatchSpider(*args, **kwargs)
(task_table, batch_record_table, batch_name, batch_interval, task_keys, task_state='state', min_task_count=10000, check_task_interval=5, task_limit=10000, related_redis_key=None, related_batch_record=None, task_condition='', task_order_by='', redis_key=None, thread_count=None, begin_callback=None, end_callback=None, delete_keys=(), keep_alive=None, auto_start_next_batch=True, **kwargs)
8,304
feapder.core.spiders.batch_spider
__get_task_state_count
null
def __get_task_state_count(self): sql = "select {state}, count(1) from {task_table}{task_condition} group by {state}".format( state=self._task_state, task_table=self._task_table, task_condition=self._task_condition_prefix_where, ) task_state_count = self._mysqldb.find(sql) task_state = { "total_count": sum(count for state, count in task_state_count), "done_count": sum( count for state, count in task_state_count if state in (1, -1) ), "failed_count": sum( count for state, count in task_state_count if state == -1 ), } return task_state
(self)