response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Create an L{IWorker} that does nothing but defer work, to be performed later. @return: a worker that will enqueue work to perform later, and a callable that will perform one element of that work. @rtype: 2-L{tuple} of (L{IWorker}, L{callable})
def createMemoryWorker(): """ Create an L{IWorker} that does nothing but defer work, to be performed later. @return: a worker that will enqueue work to perform later, and a callable that will perform one element of that work. @rtype: 2-L{tuple} of (L{IWorker}, L{callable}) """ def perform(): if not worker._pending: return False if worker._pending[0] is NoMoreWork: return False worker._pending.pop(0)() return True worker = MemoryWorker() return (worker, perform)
Construct a L{Team} that spawns threads as a thread pool, with the given limiting function. @note: Future maintainers: while the public API for the eventual move to twisted.threads should look I{something} like this, and while this function is necessary to implement the API described by L{twisted.python.threadpool}, I am starting to think the idea of a hard upper limit on threadpool size is just bad (turning memory performance issues into correctness issues well before we run into memory pressure), and instead we should build something with reactor integration for slowly releasing idle threads when they're not needed and I{rate} limiting the creation of new threads rather than just hard-capping it. @param currentLimit: a callable that returns the current limit on the number of workers that the returned L{Team} should create; if it already has more workers than that value, no new workers will be created. @type currentLimit: 0-argument callable returning L{int} @param threadFactory: Factory that, when given a C{target} keyword argument, returns a L{threading.Thread} that will run that target. @type threadFactory: callable returning a L{threading.Thread} @return: a new L{Team}.
def pool( currentLimit: Callable[[], int], threadFactory: _ThreadFactory = Thread ) -> Team: """ Construct a L{Team} that spawns threads as a thread pool, with the given limiting function. @note: Future maintainers: while the public API for the eventual move to twisted.threads should look I{something} like this, and while this function is necessary to implement the API described by L{twisted.python.threadpool}, I am starting to think the idea of a hard upper limit on threadpool size is just bad (turning memory performance issues into correctness issues well before we run into memory pressure), and instead we should build something with reactor integration for slowly releasing idle threads when they're not needed and I{rate} limiting the creation of new threads rather than just hard-capping it. @param currentLimit: a callable that returns the current limit on the number of workers that the returned L{Team} should create; if it already has more workers than that value, no new workers will be created. @type currentLimit: 0-argument callable returning L{int} @param threadFactory: Factory that, when given a C{target} keyword argument, returns a L{threading.Thread} that will run that target. @type threadFactory: callable returning a L{threading.Thread} @return: a new L{Team}. """ def startThread(target: Callable[..., object]) -> None: return threadFactory(target=target).start() def limitedWorkerCreator() -> Optional[IWorker]: stats = team.statistics() if stats.busyWorkerCount + stats.idleWorkerCount >= currentLimit(): return None return ThreadWorker(startThread, Queue()) team = Team( coordinator=LockWorker(Lock(), LocalStorage()), createWorker=limitedWorkerCreator, logException=err, ) return team
Find package information from pip freeze output. Match project name somewhat fuzzily (case sensitive; '-' matches '_', and vice versa). Return (normalized project name, installed version) if successful.
def get_installed_package_info(project: str) -> tuple[str, str] | None: """Find package information from pip freeze output. Match project name somewhat fuzzily (case sensitive; '-' matches '_', and vice versa). Return (normalized project name, installed version) if successful. """ r = subprocess.run(["pip", "freeze"], capture_output=True, text=True, check=True) return search_pip_freeze_output(project, r.stdout)
Create a METADATA.toml file.
def create_metadata(project: str, stub_dir: str, version: str) -> None: """Create a METADATA.toml file.""" match = re.match(r"[0-9]+.[0-9]+", version) if match is None: sys.exit(f"Error: Cannot parse version number: {version}") filename = os.path.join(stub_dir, "METADATA.toml") version = match.group(0) if os.path.exists(filename): return metadata = f'version = "{version}.*"\n' upstream_repo_url = asyncio.run(get_upstream_repo_url(project)) if upstream_repo_url is None: warning = ( f"\nCould not find a URL pointing to the source code for {project!r}.\n" f"Please add it as `upstream_repository` to `stubs/{project}/METADATA.toml`, if possible!\n" ) print(termcolor.colored(warning, "red")) else: metadata += f'upstream_repository = "{upstream_repo_url}"\n' print(f"Writing {filename}") with open(filename, "w", encoding="UTF-8") as file: file.write(metadata)
Exclude stub_dir from strict pyright checks.
def add_pyright_exclusion(stub_dir: str) -> None: """Exclude stub_dir from strict pyright checks.""" with open(PYRIGHT_CONFIG, encoding="UTF-8") as f: lines = f.readlines() i = 0 while i < len(lines) and not lines[i].strip().startswith('"exclude": ['): i += 1 assert i < len(lines), f"Error parsing {PYRIGHT_CONFIG}" while not lines[i].strip().startswith("]"): i += 1 end = i # We assume that all third-party excludes must be at the end of the list. # This helps with skipping special entries, such as "stubs/**/@tests/test_cases". while lines[i - 1].strip().startswith('"stubs/'): i -= 1 start = i before_third_party_excludes = lines[:start] third_party_excludes = lines[start:end] after_third_party_excludes = lines[end:] last_line = third_party_excludes[-1].rstrip() if not last_line.endswith(","): last_line += "," third_party_excludes[-1] = last_line + "\n" # Must use forward slash in the .json file line_to_add = f' "{stub_dir}",\n'.replace("\\", "/") if line_to_add in third_party_excludes: print(f"{PYRIGHT_CONFIG} already up-to-date") return third_party_excludes.append(line_to_add) third_party_excludes.sort(key=str.lower) print(f"Updating {PYRIGHT_CONFIG}") with open(PYRIGHT_CONFIG, "w", encoding="UTF-8") as f: f.writelines(before_third_party_excludes) f.writelines(third_party_excludes) f.writelines(after_third_party_excludes)
Given the old specifier and an updated version, returns an updated specifier that has the specificity of the old specifier, but matches the updated version. For example: spec="1", version="1.2.3" -> "1.2.3" spec="1.0.1", version="1.2.3" -> "1.2.3" spec="1.*", version="1.2.3" -> "1.*" spec="1.*", version="2.3.4" -> "2.*" spec="1.1.*", version="1.2.3" -> "1.2.*" spec="1.1.1.*", version="1.2.3" -> "1.2.3.*"
def get_updated_version_spec(spec: str, version: packaging.version.Version) -> str: """ Given the old specifier and an updated version, returns an updated specifier that has the specificity of the old specifier, but matches the updated version. For example: spec="1", version="1.2.3" -> "1.2.3" spec="1.0.1", version="1.2.3" -> "1.2.3" spec="1.*", version="1.2.3" -> "1.*" spec="1.*", version="2.3.4" -> "2.*" spec="1.1.*", version="1.2.3" -> "1.2.*" spec="1.1.1.*", version="1.2.3" -> "1.2.3.*" """ if not spec.endswith(".*"): return _check_spec(str(version), version) specificity = spec.count(".") if spec.removesuffix(".*") else 0 rounded_version = version.base_version.split(".")[:specificity] rounded_version.extend(["0"] * (specificity - len(rounded_version))) return _check_spec(".".join(rounded_version) + ".*", version)
Check that given directory contains only valid Python files of a certain kind.
def assert_consistent_filetypes( directory: Path, *, kind: str, allowed: set[str], allow_nonidentifier_filenames: bool = False ) -> None: """Check that given directory contains only valid Python files of a certain kind.""" allowed_paths = {Path(f) for f in allowed} contents = list(directory.iterdir()) gitignore_spec = get_gitignore_spec() while contents: entry = contents.pop() if spec_matches_path(gitignore_spec, entry): continue if entry.relative_to(directory) in allowed_paths: # Note if a subdirectory is allowed, we will not check its contents continue if entry.is_file(): if not allow_nonidentifier_filenames: assert entry.stem.isidentifier(), f'Files must be valid modules, got: "{entry}"' bad_filetype = f'Only {extension_descriptions[kind]!r} files allowed in the "{directory}" directory; got: {entry}' assert entry.suffix == kind, bad_filetype else: assert entry.name.isidentifier(), f"Directories must be valid packages, got: {entry}" contents.extend(entry.iterdir())
Check that the stdlib directory contains only the correct files.
def check_stdlib() -> None: """Check that the stdlib directory contains only the correct files.""" assert_consistent_filetypes(Path("stdlib"), kind=".pyi", allowed={"_typeshed/README.md", "VERSIONS"})
Check that the stubs directory contains only the correct files.
def check_stubs() -> None: """Check that the stubs directory contains only the correct files.""" gitignore_spec = get_gitignore_spec() for dist in Path("stubs").iterdir(): if spec_matches_path(gitignore_spec, dist): continue assert dist.is_dir(), f"Only directories allowed in stubs, got {dist}" valid_dist_name = "^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$" # courtesy of PEP 426 assert re.fullmatch( valid_dist_name, dist.name, re.IGNORECASE ), f"Directory name must be a valid distribution name: {dist}" assert not dist.name.startswith("types-"), f"Directory name not allowed to start with 'types-': {dist}" allowed = {"METADATA.toml", "README", "README.md", "README.rst", "@tests"} assert_consistent_filetypes(dist, kind=".pyi", allowed=allowed) tests_dir = dist / "@tests" if tests_dir.exists() and tests_dir.is_dir(): py_files_present = any(file.suffix == ".py" for file in tests_dir.iterdir()) error_message = "Test-case files must be in an `@tests/test_cases/` directory, not in the `@tests/` directory" assert not py_files_present, error_message
Check whether all setuptools._distutils files are re-exported from distutils.
def check_distutils() -> None: """Check whether all setuptools._distutils files are re-exported from distutils.""" def all_relative_paths_in_directory(path: Path) -> set[Path]: return {pyi.relative_to(path) for pyi in path.rglob("*.pyi")} all_setuptools_files = all_relative_paths_in_directory(Path("stubs", "setuptools", "setuptools", "_distutils")) all_distutils_files = all_relative_paths_in_directory(Path("stubs", "setuptools", "distutils")) assert all_setuptools_files and all_distutils_files, "Looks like this test might be out of date!" extra_files = all_setuptools_files - all_distutils_files joined = "\n".join(f" * {f}" for f in extra_files) assert not extra_files, f"Files missing from distutils:\n{joined}"
Check that the test_cases directory contains only the correct files.
def check_test_cases() -> None: """Check that the test_cases directory contains only the correct files.""" for _, testcase_dir in get_all_testcase_directories(): assert_consistent_filetypes(testcase_dir, kind=".py", allowed={"README.md"}, allow_nonidentifier_filenames=True) bad_test_case_filename = 'Files in a `test_cases` directory must have names starting with "check_"; got "{}"' for file in testcase_dir.rglob("*.py"): assert file.stem.startswith("check_"), bad_test_case_filename.format(file)
Check that there are no symlinks in the typeshed repository.
def check_no_symlinks() -> None: """Check that there are no symlinks in the typeshed repository.""" files = [os.path.join(root, file) for root, _, files in os.walk(".") for file in files] no_symlink = "You cannot use symlinks in typeshed, please copy {} to its link." for file in files: _, ext = os.path.splitext(file) if ext == ".pyi" and os.path.islink(file): raise ValueError(no_symlink.format(file))
Check that the stdlib/VERSIONS file has the correct format.
def check_versions_file() -> None: """Check that the stdlib/VERSIONS file has the correct format.""" versions = set[str]() with open("stdlib/VERSIONS", encoding="UTF-8") as f: data = f.read().splitlines() for line in data: line = strip_comments(line) if line == "": continue m = VERSIONS_RE.match(line) if not m: raise AssertionError(f"Bad line in VERSIONS: {line}") module = m.group(1) assert module not in versions, f"Duplicate module {module} in VERSIONS" versions.add(module) modules = _find_stdlib_modules() # Sub-modules don't need to be listed in VERSIONS. extra = {m.split(".")[0] for m in modules} - versions assert not extra, f"Modules not in versions: {extra}" extra = versions - modules assert not extra, f"Versions not in modules: {extra}"
Check that all METADATA.toml files are valid.
def check_metadata() -> None: """Check that all METADATA.toml files are valid.""" for distribution in os.listdir("stubs"): # This function does various sanity checks for METADATA.toml files read_metadata(distribution)
Check that type checkers and linters are pinned to an exact version.
def check_requirement_pins() -> None: """Check that type checkers and linters are pinned to an exact version.""" requirements = parse_requirements() for package in linters: assert package in requirements, f"type checker/linter '{package}' not found in {REQS_FILE}" spec = requirements[package].specifier assert len(spec) == 1, f"type checker/linter '{package}' has complex specifier in {REQS_FILE}" msg = f"type checker/linter '{package}' is not pinned to an exact version in {REQS_FILE}" assert str(spec).startswith("=="), msg
Helper function for argument-parsing
def valid_path(cmd_arg: str) -> Path: """Helper function for argument-parsing""" path = Path(cmd_arg) if not path.exists(): raise argparse.ArgumentTypeError(f'"{path}" does not exist in typeshed!') if not (path in DIRECTORIES_TO_TEST or any(directory in path.parents for directory in DIRECTORIES_TO_TEST)): raise argparse.ArgumentTypeError('mypy_test.py only tests the stubs found in the "stdlib" and "stubs" directories') return path
Helper function for argument-parsing
def remove_dev_suffix(version: str) -> str: """Helper function for argument-parsing""" if version.endswith("-dev"): return version[: -len("-dev")] return version
Add all files in package or module represented by 'name' located in 'root'.
def add_files(files: list[Path], module: Path, args: TestConfig) -> None: """Add all files in package or module represented by 'name' located in 'root'.""" if module.is_file() and module.suffix == ".pyi": if match(module, args): files.append(module) else: files.extend(sorted(file for file in module.rglob("*.pyi") if match(file, args)))
Test the stubs of a third-party distribution. Return a tuple, where the first element indicates mypy's return code and the second element is the number of checked files.
def test_third_party_distribution( distribution: str, args: TestConfig, venv_dir: Path | None, *, non_types_dependencies: bool ) -> TestResult: """Test the stubs of a third-party distribution. Return a tuple, where the first element indicates mypy's return code and the second element is the number of checked files. """ files: list[Path] = [] configurations: list[MypyDistConf] = [] seen_dists: set[str] = set() add_third_party_files(distribution, files, args, configurations, seen_dists) if not files and args.filter: return TestResult(MypyResult.SUCCESS, 0) print(f"testing {distribution} ({len(files)} files)... ", end="", flush=True) if not files: print_error("no files found") sys.exit(1) mypypath = os.pathsep.join(str(Path("stubs", dist)) for dist in seen_dists) if args.verbose: print(colored(f"\nMYPYPATH={mypypath}", "blue")) result = run_mypy( args, configurations, files, venv_dir=venv_dir, mypypath=mypypath, testing_stdlib=False, non_types_dependencies=non_types_dependencies, ) return TestResult(result, len(files))
Logic necessary for testing stubs with non-types dependencies in isolated environments.
def setup_virtual_environments(distributions: dict[str, PackageDependencies], args: TestConfig, tempdir: Path) -> None: """Logic necessary for testing stubs with non-types dependencies in isolated environments.""" if not distributions: return # hooray! Nothing to do # STAGE 1: Determine which (if any) stubs packages require virtual environments. # Group stubs packages according to their external-requirements sets external_requirements_to_distributions: defaultdict[frozenset[str], list[str]] = defaultdict(list) num_pkgs_with_external_reqs = 0 for distribution_name, requirements in distributions.items(): if requirements.external_pkgs: num_pkgs_with_external_reqs += 1 external_requirements = frozenset(requirements.external_pkgs) external_requirements_to_distributions[external_requirements].append(distribution_name) else: _DISTRIBUTION_TO_VENV_MAPPING[distribution_name] = None # Exit early if there are no stubs packages that have non-types dependencies if num_pkgs_with_external_reqs == 0: if args.verbose: print(colored("No additional venvs are required to be set up", "blue")) return # STAGE 2: Setup a virtual environment for each unique set of external requirements requirements_sets_to_venvs: dict[frozenset[str], Path] = {} if args.verbose: num_venvs = len(external_requirements_to_distributions) msg = ( f"Setting up {num_venvs} venv{'s' if num_venvs != 1 else ''} " f"for {num_pkgs_with_external_reqs} " f"distribution{'s' if num_pkgs_with_external_reqs != 1 else ''}... " ) print(colored(msg, "blue"), end="", flush=True) venv_start_time = time.perf_counter() with concurrent.futures.ProcessPoolExecutor() as executor: venv_futures = [ executor.submit(setup_venv_for_external_requirements_set, requirements_set, tempdir, args) for requirements_set in external_requirements_to_distributions ] for venv_future in concurrent.futures.as_completed(venv_futures): requirements_set, venv_dir = venv_future.result() requirements_sets_to_venvs[requirements_set] = venv_dir venv_elapsed_time = time.perf_counter() - venv_start_time if args.verbose: print(colored(f"took {venv_elapsed_time:.2f} seconds", "blue")) # STAGE 3: For each {virtual_environment: requirements_set} pairing, # `pip install` the requirements set into the virtual environment pip_start_time = time.perf_counter() # Limit workers to 10 at a time, since this makes network requests with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: pip_install_futures = [ executor.submit(install_requirements_for_venv, venv_dir, args, requirements_set) for requirements_set, venv_dir in requirements_sets_to_venvs.items() ] concurrent.futures.wait(pip_install_futures) pip_elapsed_time = time.perf_counter() - pip_start_time if args.verbose: msg = f"Combined time for installing requirements across all venvs: {pip_elapsed_time:.2f} seconds" print(colored(msg, "blue")) # STAGE 4: Populate the _DISTRIBUTION_TO_VENV_MAPPING # so that we have a simple {distribution: venv_to_use} mapping to use for the rest of the test. for requirements_set, distribution_list in external_requirements_to_distributions.items(): venv_to_use = requirements_sets_to_venvs[requirements_set] _DISTRIBUTION_TO_VENV_MAPPING.update(dict.fromkeys(distribution_list, venv_to_use))
Return an object describing the stubtest settings for a single stubs distribution.
def read_stubtest_settings(distribution: str) -> StubtestSettings: """Return an object describing the stubtest settings for a single stubs distribution.""" with Path("stubs", distribution, "METADATA.toml").open("rb") as f: data: dict[str, object] = tomli.load(f).get("tool", {}).get("stubtest", {}) skip: object = data.get("skip", False) apt_dependencies: object = data.get("apt_dependencies", []) brew_dependencies: object = data.get("brew_dependencies", []) choco_dependencies: object = data.get("choco_dependencies", []) extras: object = data.get("extras", []) ignore_missing_stub: object = data.get("ignore_missing_stub", False) specified_platforms: object = data.get("platforms", ["linux"]) stubtest_requirements: object = data.get("stubtest_requirements", []) assert type(skip) is bool assert type(ignore_missing_stub) is bool # It doesn't work for type-narrowing if we use a for loop here... assert _is_list_of_strings(specified_platforms) assert _is_list_of_strings(apt_dependencies) assert _is_list_of_strings(brew_dependencies) assert _is_list_of_strings(choco_dependencies) assert _is_list_of_strings(extras) assert _is_list_of_strings(stubtest_requirements) unrecognised_platforms = set(specified_platforms) - _STUBTEST_PLATFORM_MAPPING.keys() assert not unrecognised_platforms, f"Unrecognised platforms specified for {distribution!r}: {unrecognised_platforms}" for platform, dep_key in _STUBTEST_PLATFORM_MAPPING.items(): if platform not in specified_platforms: assert dep_key not in data, ( f"Stubtest is not run on {platform} in CI for {distribution!r}, " f"but {dep_key!r} are specified in METADATA.toml" ) return StubtestSettings( skip=skip, apt_dependencies=apt_dependencies, brew_dependencies=brew_dependencies, choco_dependencies=choco_dependencies, extras=extras, ignore_missing_stub=ignore_missing_stub, platforms=specified_platforms, stubtest_requirements=stubtest_requirements, )
Return an object describing the metadata of a stub as given in the METADATA.toml file. This function does some basic validation, but does no parsing, transforming or normalization of the metadata. Use `read_dependencies` if you need to parse the dependencies given in the `requires` field, for example.
def read_metadata(distribution: str) -> StubMetadata: """Return an object describing the metadata of a stub as given in the METADATA.toml file. This function does some basic validation, but does no parsing, transforming or normalization of the metadata. Use `read_dependencies` if you need to parse the dependencies given in the `requires` field, for example. """ try: with Path("stubs", distribution, "METADATA.toml").open("rb") as f: data: dict[str, object] = tomli.load(f) except FileNotFoundError: raise NoSuchStubError(f"Typeshed has no stubs for {distribution!r}!") from None unknown_metadata_fields = data.keys() - _KNOWN_METADATA_FIELDS assert not unknown_metadata_fields, f"Unexpected keys in METADATA.toml for {distribution!r}: {unknown_metadata_fields}" assert "version" in data, f"Missing 'version' field in METADATA.toml for {distribution!r}" version = data["version"] assert isinstance(version, str) # Check that the version parses Version(version[:-2] if version.endswith(".*") else version) requires: object = data.get("requires", []) assert isinstance(requires, list) for req in requires: assert isinstance(req, str), f"Invalid requirement {req!r} for {distribution!r}" for space in " \t\n": assert space not in req, f"For consistency, requirement should not have whitespace: {req!r}" # Check that the requirement parses Requirement(req) extra_description: object = data.get("extra_description") assert isinstance(extra_description, (str, type(None))) if "stub_distribution" in data: stub_distribution = data["stub_distribution"] assert isinstance(stub_distribution, str) assert _DIST_NAME_RE.fullmatch(stub_distribution), f"Invalid 'stub_distribution' value for {distribution!r}" else: stub_distribution = f"types-{distribution}" upstream_repository: object = data.get("upstream_repository") assert isinstance(upstream_repository, (str, type(None))) if isinstance(upstream_repository, str): parsed_url = urllib.parse.urlsplit(upstream_repository) assert parsed_url.scheme == "https", f"{distribution}: URLs in the upstream_repository field should use https" no_www_please = ( f"{distribution}: `World Wide Web` subdomain (`www.`) should be removed from URLs in the upstream_repository field" ) assert not parsed_url.netloc.startswith("www."), no_www_please no_query_params_please = ( f"{distribution}: Query params (`?`) should be removed from URLs in the upstream_repository field" ) assert parsed_url.hostname in _QUERY_URL_ALLOWLIST or (not parsed_url.query), no_query_params_please no_fragments_please = f"{distribution}: Fragments (`#`) should be removed from URLs in the upstream_repository field" assert not parsed_url.fragment, no_fragments_please if parsed_url.netloc == "github.com": cleaned_url_path = parsed_url.path.strip("/") num_url_path_parts = len(Path(cleaned_url_path).parts) bad_github_url_msg = ( f"Invalid upstream_repository for {distribution!r}: " "URLs for GitHub repositories always have two parts in their paths" ) assert num_url_path_parts == 2, bad_github_url_msg obsolete_since: object = data.get("obsolete_since") assert isinstance(obsolete_since, (str, type(None))) no_longer_updated: object = data.get("no_longer_updated", False) assert type(no_longer_updated) is bool uploaded_to_pypi: object = data.get("upload", True) assert type(uploaded_to_pypi) is bool partial_stub: object = data.get("partial_stub", True) assert type(partial_stub) is bool requires_python_str: object = data.get("requires_python") oldest_supported_python = _get_oldest_supported_python() oldest_supported_python_specifier = Specifier(f">={oldest_supported_python}") if requires_python_str is None: requires_python = oldest_supported_python_specifier else: assert type(requires_python_str) is str requires_python = Specifier(requires_python_str) assert requires_python != oldest_supported_python_specifier, f'requires_python="{requires_python}" is redundant' # Check minimum Python version is not less than the oldest version of Python supported by typeshed assert oldest_supported_python_specifier.contains( requires_python.version ), f"'requires_python' contains versions lower than typeshed's oldest supported Python ({oldest_supported_python})" assert requires_python.operator == ">=", "'requires_python' should be a minimum version specifier, use '>=3.x'" empty_tools: dict[object, object] = {} tools_settings: object = data.get("tool", empty_tools) assert isinstance(tools_settings, dict) assert tools_settings.keys() <= _KNOWN_METADATA_TOOL_FIELDS.keys(), f"Unrecognised tool for {distribution!r}" for tool, tk in _KNOWN_METADATA_TOOL_FIELDS.items(): settings_for_tool: object = tools_settings.get(tool, {}) # pyright: ignore[reportUnknownMemberType] assert isinstance(settings_for_tool, dict) for key in settings_for_tool: assert key in tk, f"Unrecognised {tool} key {key!r} for {distribution!r}" return StubMetadata( version=version, requires=requires, extra_description=extra_description, stub_distribution=stub_distribution, upstream_repository=upstream_repository, obsolete_since=obsolete_since, no_longer_updated=no_longer_updated, uploaded_to_pypi=uploaded_to_pypi, partial_stub=partial_stub, stubtest_settings=read_stubtest_settings(distribution), requires_python=requires_python, )
Read the dependencies listed in a METADATA.toml file for a stubs package. Once the dependencies have been read, determine which dependencies are typeshed-internal dependencies, and which dependencies are external (non-types) dependencies. For typeshed dependencies, translate the "dependency name" into the "package name"; for external dependencies, leave them as they are in the METADATA.toml file. Note that this function may consider things to be typeshed stubs even if they haven't yet been uploaded to PyPI. If a typeshed stub is removed, this function will consider it to be an external dependency.
def read_dependencies(distribution: str) -> PackageDependencies: """Read the dependencies listed in a METADATA.toml file for a stubs package. Once the dependencies have been read, determine which dependencies are typeshed-internal dependencies, and which dependencies are external (non-types) dependencies. For typeshed dependencies, translate the "dependency name" into the "package name"; for external dependencies, leave them as they are in the METADATA.toml file. Note that this function may consider things to be typeshed stubs even if they haven't yet been uploaded to PyPI. If a typeshed stub is removed, this function will consider it to be an external dependency. """ pypi_name_to_typeshed_name_mapping = get_pypi_name_to_typeshed_name_mapping() typeshed: list[str] = [] external: list[str] = [] for dependency in read_metadata(distribution).requires: maybe_typeshed_dependency = Requirement(dependency).name if maybe_typeshed_dependency in pypi_name_to_typeshed_name_mapping: typeshed.append(pypi_name_to_typeshed_name_mapping[maybe_typeshed_dependency]) else: # convert to Requirement and then back to str # to make sure that the requirements all have a normalised string representation # (This will also catch any malformed requirements early) external.append(str(Requirement(dependency))) return PackageDependencies(tuple(typeshed), tuple(external))
Recursively gather dependencies for a single stubs package. For example, if the stubs for `caldav` declare a dependency on typeshed's stubs for `requests`, and the stubs for requests declare a dependency on typeshed's stubs for `urllib3`, `get_recursive_requirements("caldav")` will determine that the stubs for `caldav` have both `requests` and `urllib3` as typeshed-internal dependencies.
def get_recursive_requirements(package_name: str) -> PackageDependencies: """Recursively gather dependencies for a single stubs package. For example, if the stubs for `caldav` declare a dependency on typeshed's stubs for `requests`, and the stubs for requests declare a dependency on typeshed's stubs for `urllib3`, `get_recursive_requirements("caldav")` will determine that the stubs for `caldav` have both `requests` and `urllib3` as typeshed-internal dependencies. """ typeshed: set[str] = set() external: set[str] = set() non_recursive_requirements = read_dependencies(package_name) typeshed.update(non_recursive_requirements.typeshed_pkgs) external.update(non_recursive_requirements.external_pkgs) for pkg in non_recursive_requirements.typeshed_pkgs: reqs = get_recursive_requirements(pkg) typeshed.update(reqs.typeshed_pkgs) external.update(reqs.external_pkgs) return PackageDependencies(tuple(sorted(typeshed)), tuple(sorted(external)))
Runs pytype, returning the stderr if any.
def run_pytype(*, filename: str, python_version: str, missing_modules: Iterable[str]) -> str | None: """Runs pytype, returning the stderr if any.""" if python_version not in _LOADERS: options = pytype_config.Options.create("", parse_pyi=True, python_version=python_version) # For simplicity, pretends missing modules are part of the stdlib. missing_modules = tuple(os.path.join("stdlib", m) for m in missing_modules) loader = load_pytd.create_loader(options, missing_modules) _LOADERS[python_version] = (options, loader) options, loader = _LOADERS[python_version] stderr: str | None try: with pytype_config.verbosity_from(options): ast = loader.load_file(_get_module_name(filename), filename) loader.finish_and_verify_ast(ast) except Exception: stderr = traceback.format_exc() else: stderr = None return stderr
Converts a filename {subdir}/m.n/module/foo to module.foo.
def _get_module_name(filename: str) -> str: """Converts a filename {subdir}/m.n/module/foo to module.foo.""" parts = _get_relative(filename).split(os.path.sep) if parts[0] == "stdlib": module_parts = parts[1:] else: assert parts[0] == "stubs" module_parts = parts[2:] return ".".join(module_parts).replace(".pyi", "").replace(".__init__", "")
Determine all files to test, checking if it's in the exclude list and which Python versions to use. Returns a list of pairs of the file path and Python version as an int.
def determine_files_to_test(*, paths: Sequence[str]) -> list[str]: """Determine all files to test, checking if it's in the exclude list and which Python versions to use. Returns a list of pairs of the file path and Python version as an int.""" filenames = find_stubs_in_paths(paths) ts = typeshed.Typeshed() skipped = set(ts.read_blacklist()) files = [] for f in sorted(filenames): rel = _get_relative(f) if rel in skipped: continue files.append(f) return files
Get names of modules that should be treated as missing. Some typeshed stubs depend on dependencies outside of typeshed. Since pytype isn't able to read such dependencies, we instead declare them as "missing" modules, so that no errors are reported for them. Similarly, pytype cannot parse files on its exclude list, so we also treat those as missing.
def get_missing_modules(files_to_test: Sequence[str]) -> Iterable[str]: """Get names of modules that should be treated as missing. Some typeshed stubs depend on dependencies outside of typeshed. Since pytype isn't able to read such dependencies, we instead declare them as "missing" modules, so that no errors are reported for them. Similarly, pytype cannot parse files on its exclude list, so we also treat those as missing. """ stub_distributions = set() for fi in files_to_test: parts = fi.split(os.sep) try: idx = parts.index("stubs") except ValueError: continue stub_distributions.add(parts[idx + 1]) missing_modules = set() for distribution in stub_distributions: for external_req in read_dependencies(distribution).external_pkgs: req_name = Requirement(external_req).name associated_packages = _get_pkgs_associated_with_requirement(req_name) missing_modules.update(associated_packages) test_dir = os.path.dirname(__file__) exclude_list = os.path.join(test_dir, "pytype_exclude_list.txt") with open(exclude_list) as f: excluded_files = f.readlines() for fi in excluded_files: if not fi.startswith("stubs/"): # Skips comments, empty lines, and stdlib files, which are in # the exclude list because pytype has its own version. continue unused_stubs_prefix, unused_pkg, mod_path = fi.split("/", 2) # pyright: ignore [reportUnusedVariable] missing_modules.add(os.path.splitext(mod_path)[0]) return missing_modules
Helper function for argument-parsing
def package_with_test_cases(package_name: str) -> PackageInfo: """Helper function for argument-parsing""" if package_name == "stdlib": return PackageInfo("stdlib", Path(TEST_CASES)) test_case_dir = testcase_dir_from_package_name(package_name) if test_case_dir.is_dir(): if not os.listdir(test_case_dir): raise argparse.ArgumentTypeError(f"{package_name!r} has a 'test_cases' directory but it is empty!") return PackageInfo(package_name, test_case_dir) raise argparse.ArgumentTypeError(f"No test cases found for {package_name!r}!")
Use wrapper scripts to run stubtest inside gdb. The wrapper script is used to pass the arguments to the gdb script.
def setup_gdb_stubtest_command(venv_dir: Path, stubtest_cmd: list[str]) -> bool: """ Use wrapper scripts to run stubtest inside gdb. The wrapper script is used to pass the arguments to the gdb script. """ if sys.platform == "win32": print_error("gdb is not supported on Windows") return False try: gdb_version = subprocess.check_output(["gdb", "--version"], text=True, stderr=subprocess.STDOUT) except FileNotFoundError: print_error("gdb is not installed") return False if "Python scripting is not supported in this copy of GDB" in gdb_version: print_error("Python scripting is not supported in this copy of GDB") return False gdb_script = venv_dir / "gdb_stubtest.py" wrapper_script = venv_dir / "gdb_wrapper.py" gdb_script_contents = dedent( f""" import json import os import site import sys import traceback from glob import glob # Add the venv site-packages to sys.path. gdb doesn't use the virtual environment. # Taken from https://github.com/pwndbg/pwndbg/blob/83d8d95b576b749e888f533ce927ad5a77fb957b/gdbinit.py#L37 site_pkgs_path = glob(os.path.join({str(venv_dir)!r}, "lib/*/site-packages"))[0] site.addsitedir(site_pkgs_path) exit_code = 1 try: # gdb wraps stdout and stderr without a .fileno # colored output in mypy tries to access .fileno() sys.stdout.fileno = sys.__stdout__.fileno sys.stderr.fileno = sys.__stderr__.fileno from mypy.stubtest import main sys.argv = json.loads(os.environ.get("STUBTEST_ARGS")) exit_code = main() except Exception: traceback.print_exc() finally: gdb.execute(f"quit {{exit_code}}") """ ) gdb_script.write_text(gdb_script_contents) wrapper_script_contents = dedent( f""" import json import os import subprocess import sys stubtest_env = os.environ | {{"STUBTEST_ARGS": json.dumps(sys.argv)}} gdb_cmd = [ "gdb", "--quiet", "--nx", "--batch", "--command", {str(gdb_script)!r}, ] r = subprocess.run(gdb_cmd, env=stubtest_env) sys.exit(r.returncode) """ ) wrapper_script.write_text(wrapper_script_contents) # replace "-m mypy.stubtest" in stubtest_cmd with the path to our wrapper script assert stubtest_cmd[1:3] == ["-m", "mypy.stubtest"] stubtest_cmd[1:3] = [str(wrapper_script)] return True
Perform some black magic in order to run stubtest inside uWSGI. We have to write the exit code from stubtest to a surrogate file because uwsgi --pyrun does not exit with the exitcode from the python script. We have a second wrapper script that passed the arguments along to the uWSGI script and retrieves the exit code from the file, so it behaves like running stubtest normally would. Both generated wrapper scripts are created inside `venv_dir`, which itself is a subdirectory inside a temporary directory, so both scripts will be cleaned up after this function has been executed.
def setup_uwsgi_stubtest_command(dist: Path, venv_dir: Path, stubtest_cmd: list[str]) -> bool: """Perform some black magic in order to run stubtest inside uWSGI. We have to write the exit code from stubtest to a surrogate file because uwsgi --pyrun does not exit with the exitcode from the python script. We have a second wrapper script that passed the arguments along to the uWSGI script and retrieves the exit code from the file, so it behaves like running stubtest normally would. Both generated wrapper scripts are created inside `venv_dir`, which itself is a subdirectory inside a temporary directory, so both scripts will be cleaned up after this function has been executed. """ uwsgi_ini = dist / "@tests/uwsgi.ini" if sys.platform == "win32": print_error("uWSGI is not supported on Windows") return False uwsgi_script = venv_dir / "uwsgi_stubtest.py" wrapper_script = venv_dir / "uwsgi_wrapper.py" exit_code_surrogate = venv_dir / "exit_code" uwsgi_script_contents = dedent( f""" import json import os import sys from mypy.stubtest import main sys.argv = json.loads(os.environ.get("STUBTEST_ARGS")) exit_code = main() with open("{exit_code_surrogate}", mode="w") as fp: fp.write(str(exit_code)) sys.exit(exit_code) """ ) uwsgi_script.write_text(uwsgi_script_contents) uwsgi_exe = venv_dir / "bin" / "uwsgi" # It would be nice to reliably separate uWSGI output from # the stubtest output, on linux it appears that stubtest # will always go to stdout and uWSGI to stderr, but on # MacOS they both go to stderr, for now we deal with the # bit of extra spam wrapper_script_contents = dedent( f""" import json import os import subprocess import sys stubtest_env = os.environ | {{"STUBTEST_ARGS": json.dumps(sys.argv)}} uwsgi_cmd = [ "{uwsgi_exe}", "--ini", "{uwsgi_ini}", "--spooler", "{venv_dir}", "--pyrun", "{uwsgi_script}", ] subprocess.run(uwsgi_cmd, env=stubtest_env) with open("{exit_code_surrogate}", mode="r") as fp: sys.exit(int(fp.read())) """ ) wrapper_script.write_text(wrapper_script_contents) # replace "-m mypy.stubtest" in stubtest_cmd with the path to our wrapper script assert stubtest_cmd[1:3] == ["-m", "mypy.stubtest"] stubtest_cmd[1:3] = [str(wrapper_script)] return True
Print a row of * symbols across the screen. This can be useful to divide terminal output into separate sections.
def print_divider() -> None: """Print a row of * symbols across the screen. This can be useful to divide terminal output into separate sections. """ print() print("*" * 70) print()
Return a dictionary of requirements from the requirements file.
def parse_requirements() -> Mapping[str, Requirement]: """Return a dictionary of requirements from the requirements file.""" with open(REQS_FILE, encoding="UTF-8") as requirements_file: stripped_lines = map(strip_comments, requirements_file) requirements = map(Requirement, filter(None, stripped_lines)) return {requirement.name: requirement for requirement in requirements}
See issue #9591
def check_search_with_AnyStr(pattern: re.Pattern[t.AnyStr], string: t.AnyStr) -> re.Match[t.AnyStr]: """See issue #9591""" match = pattern.search(string) if match is None: raise ValueError(f"'{string!r}' does not match {pattern!r}") return match
Return first n items of the iterable as a list
def take(n: int, iterable: Iterable[_T]) -> list[_T]: "Return first n items of the iterable as a list" return list(islice(iterable, n))
Prepend a single value in front of an iterator
def prepend(value: _T1, iterator: Iterable[_T2]) -> Iterator[_T1 | _T2]: "Prepend a single value in front of an iterator" # prepend(1, [2, 3, 4]) --> 1 2 3 4 return chain([value], iterator)
Return function(0), function(1), ...
def tabulate(function: Callable[[int], _T], start: int = 0) -> Iterator[_T]: "Return function(0), function(1), ..." return map(function, count(start))
Repeat calls to func with specified arguments. Example: repeatfunc(random.random)
def repeatfunc(func: Callable[[Unpack[_Ts]], _T], times: int | None = None, *args: Unpack[_Ts]) -> Iterator[_T]: """Repeat calls to func with specified arguments. Example: repeatfunc(random.random) """ if times is None: return starmap(func, repeat(args)) return starmap(func, repeat(args, times))
Flatten one level of nesting
def flatten(list_of_lists: Iterable[Iterable[_T]]) -> Iterator[_T]: "Flatten one level of nesting" return chain.from_iterable(list_of_lists)
Returns the sequence elements n times
def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: "Returns the sequence elements n times" return chain.from_iterable(repeat(tuple(iterable), n))
Return an iterator over the last n items
def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: "Return an iterator over the last n items" # tail(3, 'ABCDEFG') --> E F G return iter(collections.deque(iterable, maxlen=n))
Advance the iterator n-steps ahead. If n is None, consume entirely.
def consume(iterator: Iterator[object], n: int | None = None) -> None: "Advance the iterator n-steps ahead. If n is None, consume entirely." # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque collections.deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(islice(iterator, n, n), None)
Returns the nth item or a default value
def nth(iterable: Iterable[object], n: int, default: object = None) -> object: "Returns the nth item or a default value" return next(islice(iterable, n, None), default)
Given a predicate that returns True or False, count the True results.
def quantify(iterable: Iterable[object], pred: Callable[[Any], bool] = bool) -> int: "Given a predicate that returns True or False, count the True results." return sum(map(pred, iterable))
Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true.
def first_true(iterable: Iterable[object], default: object = False, pred: Callable[[Any], bool] | None = None) -> object: """Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true. """ # first_true([a,b,c], x) --> a or b or c or x # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x return next(filter(pred, iterable), default)
Call a function repeatedly until an exception is raised. Converts a call-until-exception interface to an iterator interface. Like builtins.iter(func, sentinel) but uses an exception instead of a sentinel to end the loop. Examples: iter_except(functools.partial(heappop, h), IndexError) # priority queue iterator iter_except(d.popitem, KeyError) # non-blocking dict iterator iter_except(d.popleft, IndexError) # non-blocking deque iterator iter_except(q.get_nowait, Queue.Empty) # loop over a producer Queue iter_except(s.pop, KeyError) # non-blocking set iterator
def iter_except( func: Callable[[], object], exception: _ExceptionOrExceptionTuple, first: Callable[[], object] | None = None ) -> Iterator[object]: """Call a function repeatedly until an exception is raised. Converts a call-until-exception interface to an iterator interface. Like builtins.iter(func, sentinel) but uses an exception instead of a sentinel to end the loop. Examples: iter_except(functools.partial(heappop, h), IndexError) # priority queue iterator iter_except(d.popitem, KeyError) # non-blocking dict iterator iter_except(d.popleft, IndexError) # non-blocking deque iterator iter_except(q.get_nowait, Queue.Empty) # loop over a producer Queue iter_except(s.pop, KeyError) # non-blocking set iterator """ try: if first is not None: yield first() # For database APIs needing an initial cast to db.first() while True: yield func() except exception: pass
roundrobin('ABC', 'D', 'EF') --> A D E B F C
def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: "roundrobin('ABC', 'D', 'EF') --> A D E B F C" # Recipe credited to George Sakkis num_active = len(iterables) nexts: Iterator[Callable[[], _T]] = cycle(iter(it).__next__ for it in iterables) while num_active: try: for next in nexts: yield next() except StopIteration: # Remove the iterator we just exhausted from the cycle. num_active -= 1 nexts = cycle(islice(nexts, num_active))
Partition entries into false entries and true entries. If *pred* is slow, consider wrapping it with functools.lru_cache().
def partition(pred: Callable[[_T], bool], iterable: Iterable[_T]) -> tuple[Iterator[_T], Iterator[_T]]: """Partition entries into false entries and true entries. If *pred* is slow, consider wrapping it with functools.lru_cache(). """ # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 t1, t2 = tee(iterable) return filterfalse(pred, t1), filter(pred, t2)
Return all contiguous non-empty subslices of a sequence
def subslices(seq: Sequence[_T]) -> Iterator[Sequence[_T]]: "Return all contiguous non-empty subslices of a sequence" # subslices('ABCD') --> A AB ABC ABCD B BC BCD C CD D slices = starmap(slice, combinations(range(len(seq) + 1), 2)) return map(operator.getitem, repeat(seq), slices)
Variant of takewhile() that allows complete access to the remainder of the iterator. >>> it = iter('ABCdEfGhI') >>> all_upper, remainder = before_and_after(str.isupper, it) >>> ''.join(all_upper) 'ABC' >>> ''.join(remainder) # takewhile() would lose the 'd' 'dEfGhI' Note that the first iterator must be fully consumed before the second iterator can generate valid results.
def before_and_after(predicate: Callable[[_T], bool], it: Iterable[_T]) -> tuple[Iterator[_T], Iterator[_T]]: """Variant of takewhile() that allows complete access to the remainder of the iterator. >>> it = iter('ABCdEfGhI') >>> all_upper, remainder = before_and_after(str.isupper, it) >>> ''.join(all_upper) 'ABC' >>> ''.join(remainder) # takewhile() would lose the 'd' 'dEfGhI' Note that the first iterator must be fully consumed before the second iterator can generate valid results. """ it = iter(it) transition: list[_T] = [] def true_iterator() -> Iterator[_T]: for elem in it: if predicate(elem): yield elem else: transition.append(elem) return def remainder_iterator() -> Iterator[_T]: yield from transition yield from it return true_iterator(), remainder_iterator()
List unique elements, preserving order. Remember all elements ever seen.
def unique_everseen(iterable: Iterable[_T], key: Callable[[_T], Hashable] | None = None) -> Iterator[_T]: "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBcCAD', str.lower) --> A B c D seen: set[Hashable] = set() if key is None: for element in filterfalse(seen.__contains__, iterable): seen.add(element) yield element # For order preserving deduplication, # a faster but non-lazy solution is: # yield from dict.fromkeys(iterable) else: for element in iterable: k = key(element) if k not in seen: seen.add(k) yield element
List unique elements, preserving order. Remember only the element just seen.
def unique_justseen(iterable: Iterable[_T], key: Callable[[_T], bool] | None = None) -> Iterator[_T]: "List unique elements, preserving order. Remember only the element just seen." # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B # unique_justseen('ABBcCAD', str.lower) --> A B c A D g: groupby[_T | bool, _T] = groupby(iterable, key) return map(next, map(operator.itemgetter(1), g))
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
Compute the first derivative of a polynomial. f(x) = x³ -4x² -17x + 60 f'(x) = 3x² -8x -17
def polynomial_derivative(coefficients: Sequence[float]) -> list[float]: """Compute the first derivative of a polynomial. f(x) = x³ -4x² -17x + 60 f'(x) = 3x² -8x -17 """ # polynomial_derivative([1, -4, -17, 60]) -> [3, -8, -17] n = len(coefficients) powers = reversed(range(1, n)) return list(map(operator.mul, coefficients, powers))
Equivalent to list(combinations(iterable, r))[index]
def nth_combination(iterable: Iterable[_T], r: int, index: int) -> tuple[_T, ...]: "Equivalent to list(combinations(iterable, r))[index]" pool = tuple(iterable) n = len(pool) c = math.comb(n, r) if index < 0: index += c if index < 0 or index >= c: raise IndexError result: list[_T] = [] while r: c, n, r = c * r // n, n - 1, r - 1 while index >= c: index -= c c, n = c * (n - r) // n, n - 1 result.append(pool[-1 - n]) return tuple(result)
Create an indexer for the given application. :param app: The name of the application to create an indexer for. :param docs: The help documents dir for the application. :param format: The format of the help documents. :param incremental: Whether to enable incremental updates. :param save_path: The path to save the indexer to. :return: The created indexer.
def create_indexer(app: str, docs: str, format: str, incremental: bool, save_path: str): """ Create an indexer for the given application. :param app: The name of the application to create an indexer for. :param docs: The help documents dir for the application. :param format: The format of the help documents. :param incremental: Whether to enable incremental updates. :param save_path: The path to save the indexer to. :return: The created indexer. """ if os.path.exists("./learner/records.json"): records = load_json_file("./learner/records.json") else: records = {} print_with_color("Loading documents from {docs}...".format(docs=docs), "cyan") loader = xml_loader.XMLLoader(docs) documents = loader.construct_document() print_with_color("Creating indexer for {num} documents for {app}...".format(num=len(documents), app=app), "yellow") if format == "xml": embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") else: raise ValueError("Invalid format: " + format) db = FAISS.from_documents(documents, embeddings) if incremental: if app in records: print_with_color("Merging with previous indexer...", "yellow") prev_db = FAISS.load_local(records[app], embeddings) db.merge_from(prev_db) db_file_path = os.path.join(save_path, app) db_file_path = os.path.abspath(db_file_path) db.save_local(db_file_path) records[app] = db_file_path save_json_file("./learner/records.json", records) print_with_color("Indexer for {app} created successfully. Save in {path}.".format(app=app, path=db_file_path), "green") return db_file_path
Main function.
def main(): """ Main function. """ indexer.create_indexer(parsed_args.app, parsed_args.docs, parsed_args.format, parsed_args.incremental, parsed_args.save_path)
Print text with specified color using ANSI escape codes from Colorama library. :param text: The text to print. :param color: The color of the text (options: red, green, yellow, blue, magenta, cyan, white, black).
def print_with_color(text: str, color: str = ""): """ Print text with specified color using ANSI escape codes from Colorama library. :param text: The text to print. :param color: The color of the text (options: red, green, yellow, blue, magenta, cyan, white, black). """ color_mapping = { "red": Fore.RED, "green": Fore.GREEN, "yellow": Fore.YELLOW, "blue": Fore.BLUE, "magenta": Fore.MAGENTA, "cyan": Fore.CYAN, "white": Fore.WHITE, "black": Fore.BLACK } selected_color = color_mapping.get(color.lower(), "") colored_text = selected_color + text + Style.RESET_ALL print(colored_text)
Find files with the given extension in the given directory. :param directory: The directory to search. :param extension: The extension to search for. :return: The list of matching files.
def find_files_with_extension(directory, extension): """ Find files with the given extension in the given directory. :param directory: The directory to search. :param extension: The extension to search for. :return: The list of matching files. """ matching_files = [] for root, _, files in os.walk(directory): for file in files: if file.endswith(extension): path = os.path.join(root, file) path = os.path.realpath(path) matching_files.append(path) return matching_files
Find files with the given extensions in the given directory. :param directory: The directory to search. :param extensions: The list of extensions to search for. :return: The list of matching files.
def find_files_with_extension_list(directory, extensions): """ Find files with the given extensions in the given directory. :param directory: The directory to search. :param extensions: The list of extensions to search for. :return: The list of matching files. """ matching_files = [] for root, _, files in os.walk(directory): for file in files: if file.endswith(tuple(extensions)): path = os.path.join(root, file) path = os.path.realpath(path) matching_files.append(path) return matching_files
Load a JSON file. :param file_path: The path to the file to load. :return: The loaded JSON data.
def load_json_file(file_path): """ Load a JSON file. :param file_path: The path to the file to load. :return: The loaded JSON data. """ with open(file_path, 'r') as file: data = json.load(file) return data
Save a JSON file. :param file_path: The path to the file to save.
def save_json_file(file_path, data): """ Save a JSON file. :param file_path: The path to the file to save. """ with open(file_path, 'w') as file: json.dump(data, file, indent=4)
Main function.
def main(): """ Main function. """ session = flow.Session(parsed_args.task) step = 0 status = session.get_status() round = session.get_round() # Start the task while status.upper() not in ["ALLFINISH", "ERROR", "MAX_STEP_REACHED"]: round = session.get_round() if status == "FINISH": session.set_new_round() status = session.get_status() if status == "ALLFINISH": if session.experience_asker(): session.experience_saver() break while status.upper() not in ["FINISH", "ERROR"] and step <= configs["MAX_STEP"]: session.process_application_selection() step = session.get_step() status = session.get_status() while status.upper() not in ["FINISH", "ERROR"] and step <= configs["MAX_STEP"]: session.process_action_selection() status = session.get_status() step = session.get_step() if status == "APP_SELECTION": print_with_color( "Step {step}: Switching to New Application".format(step=step), "magenta") app_window = session.get_application_window() app_window.minimize() break if status == "FINISH": print_with_color("Task Completed.", "magenta") break if step > configs["MAX_STEP"]: print_with_color("Max step reached.", "magenta") status = "MAX_STEP_REACHED" break result = session.get_results() round = session.get_round() # Print the result if result != "": print_with_color("Result for round {round}:".format( round=round), "magenta") print_with_color("{result}".format(result=result), "yellow") # Print the total cost total_cost = session.get_cost() formatted_cost = '${:.2f}'.format(total_cost) print_with_color(f"Request total cost is {formatted_cost}", "yellow") return status
Load the configuration from a YAML file and environment variables. :param config_path: The path to the YAML config file. Defaults to "./config.yaml". :return: Merged configuration from environment variables and YAML file.
def load_config(config_path="ufo/config/"): """ Load the configuration from a YAML file and environment variables. :param config_path: The path to the YAML config file. Defaults to "./config.yaml". :return: Merged configuration from environment variables and YAML file. """ # Copy environment variables to avoid modifying them directly os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress TensorFlow warnings configs = dict(os.environ) path = config_path try: with open(path + "config.yaml", "r") as file: yaml_data = yaml.safe_load(file) # Update configs with YAML data if yaml_data: configs.update(yaml_data) with open(path + "config_dev.yaml", "r") as file: yaml_dev_data = yaml.safe_load(file) # Update configs with YAML data if yaml_data: configs.update(yaml_dev_data) except FileNotFoundError: print_with_color( f"Warning: Config file not found at {config_path}. Using only environment variables.", "yellow") return optimize_configs(configs)
Get the list of offline indexers obtained from the learner. :return: The list of offline indexers.
def get_offline_learner_indexer_config(): """ Get the list of offline indexers obtained from the learner. :return: The list of offline indexers. """ # The fixed path of the offline indexer config file. file_path = "learner/records.json" if os.path.exists(file_path): with open(file_path, 'r') as file: records = json.load(file) else: records = {} return records
Get completion for the given messages. Args: messages (list): List of messages to be used for completion. agent (str, optional): Type of agent. Possible values are 'APP', 'ACTION' or 'BACKUP'. use_backup_engine (bool, optional): Flag indicating whether to use the backup engine or not. Returns: tuple: A tuple containing the completion response (str) and the cost (float).
def get_completion(messages, agent: str='APP', use_backup_engine: bool=True): """ Get completion for the given messages. Args: messages (list): List of messages to be used for completion. agent (str, optional): Type of agent. Possible values are 'APP', 'ACTION' or 'BACKUP'. use_backup_engine (bool, optional): Flag indicating whether to use the backup engine or not. Returns: tuple: A tuple containing the completion response (str) and the cost (float). """ if agent.lower() == "app": agent_type = "APP_AGENT" elif agent.lower() == "action": agent_type = "ACTION_AGENT" elif agent.lower() == "backup": agent_type = "BACKUP_AGENT" else: raise ValueError(f'Agent {agent} not supported') api_type = configs[agent_type]['API_TYPE'] try: if api_type.lower() in ['openai', 'aoai', 'azure_ad']: from .openai import OpenAIService response, cost = OpenAIService(configs, agent_type=agent_type).chat_completion(messages) return response, cost else: raise ValueError(f'API_TYPE {api_type} not supported') except Exception as e: if use_backup_engine: print_with_color(f"The API request of {agent_type} failed: {e}.", "red") print_with_color(f"Switching to use the backup engine...", "yellow") return get_completion(messages, agent='backup', use_backup_engine=False) else: raise e
Get titles and control types of all the apps on the desktop. :param remove_empty: Whether to remove empty titles. :return: The titles and control types of all the apps on the desktop.
def get_desktop_app_info(remove_empty:bool=True) -> Tuple[dict, List[dict]]: """ Get titles and control types of all the apps on the desktop. :param remove_empty: Whether to remove empty titles. :return: The titles and control types of all the apps on the desktop. """ app_list = Desktop(backend=BACKEND).windows() app_titles = [app.window_text() for app in app_list] app_control_types = [app.element_info.control_type for app in app_list] if remove_empty: app_control_types = [app_control_types[i] for i, title in enumerate(app_titles) if title != ""] app_titles = [title for title in app_titles if title != ""] return app_titles, app_control_types
Get titles and control types of all the apps on the desktop. :param remove_empty: Whether to remove empty titles. :return: The titles and control types of all the apps on the desktop.
def get_desktop_app_info_dict(remove_empty:bool=True, field_list:List[str]=["control_text", "control_type"]) -> Tuple[dict, List[dict]]: """ Get titles and control types of all the apps on the desktop. :param remove_empty: Whether to remove empty titles. :return: The titles and control types of all the apps on the desktop. """ desktop_windows = Desktop(BACKEND).windows() if remove_empty: desktop_windows = [app for app in desktop_windows if app.window_text()!= "" and app.element_info.class_name not in ["IME", "MSCTFIME UI", "TXGuiFoundation"]] desktop_windows_dict = dict(zip([str(i+1) for i in range(len(desktop_windows))], desktop_windows)) desktop_windows_info = get_control_info_dict(desktop_windows_dict, field_list) return desktop_windows_dict, desktop_windows_info
Find control elements in descendants of the window. :param window: The window to find control elements. :param control_type_list: The control types to find. :param class_name_list: The class names to find. :param title_list: The titles to find. :param is_visible: Whether the control elements are visible. :param is_enabled: Whether the control elements are enabled. :param depth: The depth of the descendants to find. :return: The control elements found.
def find_control_elements_in_descendants(window, control_type_list:List[str]=[], class_name_list:List[str]=[], title_list:List[str]=[], is_visible:bool=True, is_enabled:bool=True, depth:int=0) -> List: """ Find control elements in descendants of the window. :param window: The window to find control elements. :param control_type_list: The control types to find. :param class_name_list: The class names to find. :param title_list: The titles to find. :param is_visible: Whether the control elements are visible. :param is_enabled: Whether the control elements are enabled. :param depth: The depth of the descendants to find. :return: The control elements found. """ control_elements = [] if len(control_type_list) == 0: control_elements += window.descendants() else: for control_type in control_type_list: if depth == 0: subcontrols = window.descendants(control_type=control_type) else: subcontrols = window.descendants(control_type=control_type, depth=depth) control_elements += subcontrols if is_visible: control_elements = [control for control in control_elements if control.is_visible()] if is_enabled: control_elements = [control for control in control_elements if control.is_enabled()] if len(title_list) > 0: control_elements = [control for control in control_elements if control.window_text() in title_list] if len(class_name_list) > 0: control_elements = [control for control in control_elements if control.element_info.class_name in class_name_list] return control_elements
Get control info of the window. :param window: The window to get control info. :param field_list: The fields to get. return: The control info of the window.
def get_control_info(window, field_list:List[str]=[]) -> dict: """ Get control info of the window. :param window: The window to get control info. :param field_list: The fields to get. return: The control info of the window. """ control_info = {} try: control_info["control_type"] = window.element_info.control_type control_info["control_id"] = window.element_info.control_id control_info["control_class"] = window.element_info.class_name control_info["control_name"] = window.element_info.name control_info["control_rect"] = window.element_info.rectangle control_info["control_text"] = window.element_info.name control_info["control_title"] = window.window_text() except: return {} if len(field_list) > 0: control_info = {field: control_info[field] for field in field_list} return control_info
Get control info of the window. :param window: The list of windows to get control info. :param field_list: The fields to get. return: The list of control info of the window.
def get_control_info_batch(window_list:List, field_list:List[str]=[]) -> List: """ Get control info of the window. :param window: The list of windows to get control info. :param field_list: The fields to get. return: The list of control info of the window. """ control_info_list = [] for window in window_list: control_info_list.append(get_control_info(window, field_list)) return control_info_list
Get control info of the window. :param window: The list of windows to get control info. :param field_list: The fields to get. return: The list of control info of the window.
def get_control_info_dict(window_dict:dict, field_list:List[str]=[]) -> List[dict]: """ Get control info of the window. :param window: The list of windows to get control info. :param field_list: The fields to get. return: The list of control info of the window. """ control_info_list = [] for key in window_dict.keys(): window = window_dict[key] control_info = get_control_info(window, field_list) control_info["label"] = key control_info_list.append(control_info) return control_info_list
Replace with \n. :param input_str: The string to replace. :return: The replaced string.
def replace_newline(input_str : str) -> str: """ Replace \n with \\n. :param input_str: The string to replace. :return: The replaced string. """ # Replace \n with \\n result_str = input_str.replace('\n', '\\n') # Check if there are already \\n in the string if '\\\\n' in result_str: # If found, revert \\n to \n result_str = result_str.replace('\\\\n', '\\n') return result_str
Get the application name of the window. :param window: The window to get the application name. :return: The application name of the window. Empty string ("") if failed to get the name.
def get_application_name(window) -> str: """ Get the application name of the window. :param window: The window to get the application name. :return: The application name of the window. Empty string ("") if failed to get the name. """ if window == None: return "" process_id = window.process_id() try: process = psutil.Process(process_id) return process.name() except psutil.NoSuchProcess: return ""
Capture a screenshot of the window. :param window_title: The title of the window. :param save_path: The path to save the screenshot. :param is_save: Whether to save the screenshot. :return: The screenshot.
def capture_screenshot(window_title:str, save_path:str, is_save:bool=True): """ Capture a screenshot of the window. :param window_title: The title of the window. :param save_path: The path to save the screenshot. :param is_save: Whether to save the screenshot. :return: The screenshot. """ app = Application(backend="uia").connect(title_re=window_title) window = app.top_window() screenshot = window.capture_as_image() if is_save: screenshot.save(save_path) return screenshot
Capture a screenshot of the window. :param window_title: The title of the window. :param save_path: The path to save the screenshot. :param is_save: Whether to save the screenshot. :return: The screenshot.
def capture_screenshot(window_title:str, save_path:str, is_save:bool=True): """ Capture a screenshot of the window. :param window_title: The title of the window. :param save_path: The path to save the screenshot. :param is_save: Whether to save the screenshot. :return: The screenshot. """ app = Application(backend="uia").connect(title_re=window_title) window = app.top_window() screenshot = window.capture_as_image() if is_save: screenshot.save(save_path) return screenshot
Capture a screenshot of the multi-screen.
def capture_screenshot_multiscreen(save_path:str): """ Capture a screenshot of the multi-screen. """ screenshot = ImageGrab.grab(all_screens=True) screenshot.save(save_path) return screenshot
Draw a rectangle on the image. :param image: The image to draw on. :param coordinate: The coordinate of the rectangle. :param color: The color of the rectangle. :param width: The width of the rectangle. :return: The image with the rectangle.
def draw_rectangles(image, coordinate:tuple, color="red", width=3): """ Draw a rectangle on the image. :param image: The image to draw on. :param coordinate: The coordinate of the rectangle. :param color: The color of the rectangle. :param width: The width of the rectangle. :return: The image with the rectangle. """ draw = ImageDraw.Draw(image) draw.rectangle(coordinate, outline=color, width=width) return image
Capture a screenshot with rectangles around the controls. :param top_window: The top window. :param control_list: The list of the controls to annotate. :param save_path: The path to save the screenshot. :param color: The color of the rectangle. :param is_save: Whether to save the screenshot. :return: The screenshot with rectangles around the controls.
def capture_screenshot_controls(top_window, control_list: List, save_path:str, color="red", is_save:bool=True): """ Capture a screenshot with rectangles around the controls. :param top_window: The top window. :param control_list: The list of the controls to annotate. :param save_path: The path to save the screenshot. :param color: The color of the rectangle. :param is_save: Whether to save the screenshot. :return: The screenshot with rectangles around the controls. """ screenshot = top_window.capture_as_image() window_rect = top_window.rectangle() for control in control_list: if control: control_rect = control.rectangle() adjusted_rect = coordinate_adjusted(window_rect, control_rect) screenshot = draw_rectangles(screenshot, adjusted_rect, color=color) if is_save: screenshot.save(save_path) return screenshot
Adjust the coordinates of the control rectangle to the window rectangle. :param window_rect: The window rectangle. :param control_rect: The control rectangle. :return: The adjusted control rectangle.
def coordinate_adjusted(window_rect:RECT, control_rect:RECT): """ Adjust the coordinates of the control rectangle to the window rectangle. :param window_rect: The window rectangle. :param control_rect: The control rectangle. :return: The adjusted control rectangle. """ # (left, top, right, bottom) adjusted_rect = (control_rect.left - window_rect.left, control_rect.top - window_rect.top, control_rect.right - window_rect.left, control_rect.bottom - window_rect.top) return adjusted_rect
Draw a rectangle around the control and label it. :param image: The image to draw on. :param save_path: The path to save the screenshot. :param coordinate: The coordinate of the control. :param label_text: The text label of the control. :param botton_margin: The margin of the button. :param border_width: The width of the border. :param font_size: The size of the font. :param font_color: The color of the font. :param border_color: The color of the border. :param button_color: The color of the button. return: The image with the rectangle and label.
def draw_rectangles_controls(image, coordinate:tuple, label_text:str, botton_margin:int=5, border_width:int=2, font_size:int=25, font_color:str="#000000", border_color:str="#FF0000", button_color:str="#FFF68F"): """ Draw a rectangle around the control and label it. :param image: The image to draw on. :param save_path: The path to save the screenshot. :param coordinate: The coordinate of the control. :param label_text: The text label of the control. :param botton_margin: The margin of the button. :param border_width: The width of the border. :param font_size: The size of the font. :param font_color: The color of the font. :param border_color: The color of the border. :param button_color: The color of the button. return: The image with the rectangle and label. """ _ = ImageDraw.Draw(image) font = ImageFont.truetype("arial.ttf", font_size) text_size = font.getbbox(label_text) # set button size + margins button_size = (text_size[2]+botton_margin, text_size[3]+botton_margin) # create image with correct size and black background button_img = Image.new('RGBA', button_size, button_color) button_draw = ImageDraw.Draw(button_img) button_draw.text((botton_margin/2, botton_margin/2), label_text, font=font, fill=font_color) # draw red rectangle around button ImageDraw.Draw(button_img).rectangle( [(0, 0), (button_size[0] - 1, button_size[1] - 1)], outline=border_color, width=border_width ) # put button on source image image.paste(button_img, (coordinate[0], coordinate[1])) return image
Annotate the controls of the window. :param window_title: The title of the window. :param screenshot_save_path: The path to save the screenshot. :param annotated_screenshot_save_path: The path to save the annotated screenshot. :param control_list: The list of the controls to annotate. :param anntation_type: The type of the annotation, must be number or letter. :param color_diff: Whether to use color difference to annotate different control type. :param color_default: The default color of the annotation. :param is_save: Whether to save the screenshot and annotated screenshot. :return: The dictionary of the annotations and the annotated screenshot.
def control_annotations(window:str, screenshot_save_path:str, annotated_screenshot_save_path:str, control_list:List, anntation_type:str="number", color_diff:bool=True, color_default:str="#FFF68F", is_save:bool=True): """ Annotate the controls of the window. :param window_title: The title of the window. :param screenshot_save_path: The path to save the screenshot. :param annotated_screenshot_save_path: The path to save the annotated screenshot. :param control_list: The list of the controls to annotate. :param anntation_type: The type of the annotation, must be number or letter. :param color_diff: Whether to use color difference to annotate different control type. :param color_default: The default color of the annotation. :param is_save: Whether to save the screenshot and annotated screenshot. :return: The dictionary of the annotations and the annotated screenshot. """ annotation_dict = {} window_rect = window.rectangle() screenshot = window.capture_as_image() screenshot_annotated = screenshot.copy() assert anntation_type in ["number", "letter"], "The annotation type must be number or letter." color_dict = configs["ANNOTATION_COLORS"] for i, control in enumerate(control_list): control_rect = control.rectangle() adjusted_rect = coordinate_adjusted(window_rect, control_rect) adjusted_coordinate = (adjusted_rect[0], adjusted_rect[1]) if anntation_type == "number": label_text = str(i+1) screenshot_annotated = draw_rectangles_controls(screenshot_annotated, adjusted_coordinate, label_text, button_color=color_dict.get(control.element_info.control_type, color_default) if color_diff else color_default) elif anntation_type == "letter": label_text = number_to_letter(i) screenshot_annotated = draw_rectangles_controls(screenshot_annotated, adjusted_coordinate, label_text, button_color=color_dict.get(control.element_info.control_type, color_default) if color_diff else color_default) annotation_dict[label_text] = control if is_save: screenshot.save(screenshot_save_path) screenshot_annotated.save(annotated_screenshot_save_path) return annotation_dict, screenshot, screenshot_annotated
Concatenate two images horizontally. :param image1_path: The path of the first image. :param image2_path: The path of the second image. :param output_path: The path to save the concatenated image. :return: The concatenated image.
def concat_images_left_right(image1_path, image2_path, output_path): """ Concatenate two images horizontally. :param image1_path: The path of the first image. :param image2_path: The path of the second image. :param output_path: The path to save the concatenated image. :return: The concatenated image. """ # Open the images image1 = Image.open(image1_path) image2 = Image.open(image2_path) # Ensure both images have the same height min_height = min(image1.height, image2.height) image1 = image1.crop((0, 0, image1.width, min_height)) image2 = image2.crop((0, 0, image2.width, min_height)) # Concatenate images horizontally result = Image.new('RGB', (image1.width + image2.width, min_height)) result.paste(image1, (0, 0)) result.paste(image2, (image1.width, 0)) # Save the result result.save(output_path) return result
Print text with specified color using ANSI escape codes from Colorama library. :param text: The text to print. :param color: The color of the text (options: red, green, yellow, blue, magenta, cyan, white, black).
def print_with_color(text: str, color: str = ""): """ Print text with specified color using ANSI escape codes from Colorama library. :param text: The text to print. :param color: The color of the text (options: red, green, yellow, blue, magenta, cyan, white, black). """ color_mapping = { "red": Fore.RED, "green": Fore.GREEN, "yellow": Fore.YELLOW, "blue": Fore.BLUE, "magenta": Fore.MAGENTA, "cyan": Fore.CYAN, "white": Fore.WHITE, "black": Fore.BLACK } selected_color = color_mapping.get(color.lower(), "") colored_text = selected_color + text + Style.RESET_ALL print(colored_text)
Convert image to base64 string. :param image: The image to convert. :return: The base64 string.
def image_to_base64(image: Image): """ Convert image to base64 string. :param image: The image to convert. :return: The base64 string. """ buffered = BytesIO() image.save(buffered, format="PNG") return base64.b64encode(buffered.getvalue()).decode("utf-8")
Encode an image file to base64 string. :param image_path: The path of the image file. :param mime_type: The mime type of the image. :return: The base64 string.
def encode_image_from_path(image_path: str, mime_type: Optional[str] = None) -> str: """ Encode an image file to base64 string. :param image_path: The path of the image file. :param mime_type: The mime type of the image. :return: The base64 string. """ import mimetypes file_name = os.path.basename(image_path) mime_type = mime_type if mime_type is not None else mimetypes.guess_type(file_name)[0] with open(image_path, "rb") as image_file: encoded_image = base64.b64encode(image_file.read()).decode('ascii') if mime_type is None or not mime_type.startswith("image/"): print("Warning: mime_type is not specified or not an image mime type. Defaulting to png.") mime_type = "image/png" image_url = f"data:{mime_type};base64," + encoded_image return image_url
Create a folder if it doesn't exist. :param folder_path: The path of the folder to create.
def create_folder(folder_path: str): """ Create a folder if it doesn't exist. :param folder_path: The path of the folder to create. """ if not os.path.exists(folder_path): os.makedirs(folder_path)
Convert number to letter. :param n: The number to convert. :return: The letter converted from the number.
def number_to_letter(n:int): """ Convert number to letter. :param n: The number to convert. :return: The letter converted from the number. """ if n < 0: return "Invalid input" result = "" while n >= 0: remainder = n % 26 result = chr(65 + remainder) + result # 65 is the ASCII code for 'A' n = n // 26 - 1 if n < 0: break return result
Check if the string can be correctly parse by json. :param string: The string to check. :return: True if the string can be correctly parse by json, False otherwise.
def check_json_format(string:str): """ Check if the string can be correctly parse by json. :param string: The string to check. :return: True if the string can be correctly parse by json, False otherwise. """ import json try: json.loads(string) except ValueError: return False return True
Ask for user input until the user enters either Y or N. :return: The user input.
def yes_or_no(): """ Ask for user input until the user enters either Y or N. :return: The user input. """ while True: user_input = input().upper() if user_input == 'Y': return True elif user_input == 'N': return False else: print("Invalid choice. Please enter either Y or N. Try again.")
Parse json string to json object. :param json_string: The json string to parse. :return: The json object.
def json_parser(json_string:str): """ Parse json string to json object. :param json_string: The json string to parse. :return: The json object. """ # Remove the ```json and ``` at the beginning and end of the string if exists. if json_string.startswith("```json"): json_string = json_string[7:-3] return json.loads(json_string)
Generate a function call string. :param func: The function name. :param args: The arguments as a dictionary. :return: The function call string.
def generate_function_call(func, args): """ Generate a function call string. :param func: The function name. :param args: The arguments as a dictionary. :return: The function call string. """ # Format the arguments args_str = ', '.join(f'{k}={v!r}' for k, v in args.items()) # Return the function call string return f'{func}({args_str})'
Replace '\n' with ' ' in the arguments. :param args: The arguments. :return: The arguments with \n replaced with .
def revise_line_breaks(args: dict): """ Replace '\\n' with '\n' in the arguments. :param args: The arguments. :return: The arguments with \\n replaced with \n. """ # Replace \\n with \\n for key in args.keys(): if isinstance(args[key], str): args[key] = args[key].replace('\\n', '\n') return args
Add time in milliseconds to global ringbuffer. Locates the event device (/dev/input/*) in the dict of ringbuffers and adds the KEY_DOWN time in milliseconds to it. Then calls the check_for_attack function on the event device and the usb core device. Args: event_device_path: The path to the event device (/dev/input/*). key_down_time: The KEY_DOWN time in milliseconds. keystroke: The actual key typed. device: A USB device (usb.core.Device).
def add_to_ring_buffer(event_device_path: Text, key_down_time: int, keystroke: Text, device: usb.core.Device): """Add time in milliseconds to global ringbuffer. Locates the event device (/dev/input/*) in the dict of ringbuffers and adds the KEY_DOWN time in milliseconds to it. Then calls the check_for_attack function on the event device and the usb core device. Args: event_device_path: The path to the event device (/dev/input/*). key_down_time: The KEY_DOWN time in milliseconds. keystroke: The actual key typed. device: A USB device (usb.core.Device). """ with _event_devices_lock: if event_device_path not in _event_devices_timings: _event_devices_timings[event_device_path] = collections.deque( maxlen=KEYSTROKE_WINDOW) _event_devices_keystrokes[event_device_path] = collections.deque( maxlen=KEYSTROKE_WINDOW) _event_devices_timings[event_device_path].append(key_down_time) _event_devices_keystrokes[event_device_path].append(keystroke) check_for_attack(event_device_path, device)
Check local (user-based) allowlist for specifically allowed devices. UKIP users are able to specify USB devices they want to allow in a local file. This allowlist is checked, when a device is found attacking (timing threshold is exceeded) and whether that device is listed in here. If so, only the characters listed in the corresponding allowlist are allowed, the others are denied (in case of 'any' and 'none' all or no characters are allowed respectively). If the device is not listed in the allowlist, it is denied per default. Args: product_id: The required product ID to look up in the local allowlist. vendor_id: The required vendor ID to look up in the local allowlist. Raises: AllowlistFileError: When there were errors with the allowlist config file. Returns: A AllowlistConfigReturn object, with the following variations: 1) allowlist is a list with characters, device_present is true: the returned characters are not blocked by UKIP for the given device. 2) allowlist is an empty list, device_present is true: for the given device any character is allowed by UKIP. 3) allowlist is an empty list, device_present is false: for the given device no character is allowed by UKIP (either the device is not in the config file, or a user specifically marked that device with 'none' for the allowed characters).
def check_local_allowlist(product_id: Text, vendor_id: Text) -> AllowlistConfigReturn: """Check local (user-based) allowlist for specifically allowed devices. UKIP users are able to specify USB devices they want to allow in a local file. This allowlist is checked, when a device is found attacking (timing threshold is exceeded) and whether that device is listed in here. If so, only the characters listed in the corresponding allowlist are allowed, the others are denied (in case of 'any' and 'none' all or no characters are allowed respectively). If the device is not listed in the allowlist, it is denied per default. Args: product_id: The required product ID to look up in the local allowlist. vendor_id: The required vendor ID to look up in the local allowlist. Raises: AllowlistFileError: When there were errors with the allowlist config file. Returns: A AllowlistConfigReturn object, with the following variations: 1) allowlist is a list with characters, device_present is true: the returned characters are not blocked by UKIP for the given device. 2) allowlist is an empty list, device_present is true: for the given device any character is allowed by UKIP. 3) allowlist is an empty list, device_present is false: for the given device no character is allowed by UKIP (either the device is not in the config file, or a user specifically marked that device with 'none' for the allowed characters). """ device = '%s:%s' % (product_id, vendor_id) try: with open('/etc/ukip/allowlist', 'r') as f: for line in f: # Comments start with '#'. if line[0] == '#': continue # Ignore empty lines. if not line.strip(): continue try: (key, val) = line.split() int(key.split(':')[0], 16) int(key.split(':')[1], 16) allowlist = val.split(',') if key != device: continue if allowlist[0] == 'any': return AllowlistConfigReturn(allowlist=[], device_present=True) if allowlist[0] == 'none': return AllowlistConfigReturn(allowlist=[], device_present=False) # If all of the checks succeed, return the allowlist (but only if it # is an allowlist, and not a word). if len(allowlist[0]) == 1: return AllowlistConfigReturn( allowlist=val.split(','), device_present=True) except (ValueError, IndexError) as vi: raise AllowlistFileError( 'The format of the config file /etc/ukip/allowlist seems to be' ' incorrect: %s' % vi) # If the device wasn't found in the file, return False. return AllowlistConfigReturn(allowlist=[], device_present=False) except FileNotFoundError as fnfe: raise AllowlistFileError( 'The config file /etc/ukip/allowlist could not be found: %s' % fnfe)
Check a ringbuffer of KEY_DOWN timings for attacks. Locates the event device (/dev/input/*) in the dict of ringbuffers and checks the correct ringbuffer for attacks (keystroke injection attack). In case of an attack, two actions can be taken, depending on the mode UKIP is running in. Those modes are specified in the UKIP_AVAILABLE_MODES enum. Args: event_device_path: The path to the event device (/dev/input/*). device: A USB device (usb.core.Device). Returns: False: If the check failed (not enough times, mode not set). None otherwise.
def check_for_attack(event_device_path: Text, device: usb.core.Device) -> bool: """Check a ringbuffer of KEY_DOWN timings for attacks. Locates the event device (/dev/input/*) in the dict of ringbuffers and checks the correct ringbuffer for attacks (keystroke injection attack). In case of an attack, two actions can be taken, depending on the mode UKIP is running in. Those modes are specified in the UKIP_AVAILABLE_MODES enum. Args: event_device_path: The path to the event device (/dev/input/*). device: A USB device (usb.core.Device). Returns: False: If the check failed (not enough times, mode not set). None otherwise. """ with _event_devices_lock: if len(_event_devices_timings[event_device_path]) < KEYSTROKE_WINDOW: return False attack_counter = 0 # Count the number of adjacent keystrokes below (or equal) the # ABNORMAL_TYPING. reversed_buffer = reversed(_event_devices_timings[event_device_path]) for value in reversed_buffer: for prev in reversed_buffer: if value - prev <= ABNORMAL_TYPING: attack_counter += 1 value = prev break # Exit after the first backward iteratation. # If all the timings in the ringbuffer are within the ABNORMAL_TYPING timing. if attack_counter == KEYSTROKE_WINDOW - 1: if _UKIP_RUN_MODE == UKIP_AVAILABLE_MODES.MONITOR: enforce_monitor_mode(device, event_device_path) elif _UKIP_RUN_MODE == UKIP_AVAILABLE_MODES.HARDENING: enforce_hardening_mode(device, event_device_path) else: log.error('No run mode was specified for UKIP. Exiting...') return False
Enforce the MONITOR mode on a given device. Information about devices, that would have been blocked in HARDENING mode is logged to /dev/log. Args: device: A USB device (usb.core.Device). event_device_path: The path to the event device (/dev/input/*).
def enforce_monitor_mode(device: usb.core.Device, event_device_path: Text): """Enforce the MONITOR mode on a given device. Information about devices, that would have been blocked in HARDENING mode is logged to /dev/log. Args: device: A USB device (usb.core.Device). event_device_path: The path to the event device (/dev/input/*). """ log.warning( '[UKIP] The device %s with the vendor id %s and the product id' ' %s would have been blocked. The causing timings are: %s.', device.product if device.product else 'UNKNOWN', hex(device.idVendor), hex(device.idProduct), _event_devices_timings[event_device_path])
Enforce the HARDENING mode on a given device. When enforcing the HARDENING mode, a device gets removed from the operating system when the keystrokes exceed the typing speed threshold (ABNORMAL_TYPING). This is done by unbinding the drivers from every device interface. Before the device is removed, the allowlist is checked. If the product and vendor ids are in there, the function will return and the device will continue working (possibly with a reduced allowed character set, as described in the function check_local_allowlist). Args: device: A USB device (usb.core.Device). event_device_path: The path to the event device (/dev/input/*).
def enforce_hardening_mode(device: usb.core.Device, event_device_path: Text): """Enforce the HARDENING mode on a given device. When enforcing the HARDENING mode, a device gets removed from the operating system when the keystrokes exceed the typing speed threshold (ABNORMAL_TYPING). This is done by unbinding the drivers from every device interface. Before the device is removed, the allowlist is checked. If the product and vendor ids are in there, the function will return and the device will continue working (possibly with a reduced allowed character set, as described in the function check_local_allowlist). Args: device: A USB device (usb.core.Device). event_device_path: The path to the event device (/dev/input/*). """ product_id = hex(device.idProduct) vendor_id = hex(device.idVendor) local_allowlist = check_local_allowlist( hex(device.idProduct), hex(device.idVendor)) # Device is present in the allowlist and all characters are allowed. if local_allowlist.device_present and not local_allowlist.allowlist: return # Device is present and an allowlist is specified. elif local_allowlist.device_present and local_allowlist.allowlist: allowlist = local_allowlist.allowlist # Device is not in the allowlist or keyword is 'none'. # i.e.: not local_allowlist.device_present and not local_allowlist.allowlist else: allowlist = [] # If all typed characters are in the allowlist, return. Otherwise run through # the rest of the function. if not set(_event_devices_keystrokes[event_device_path]).difference( set(allowlist)): return pid_and_vid = '%s:%s' % (product_id, vendor_id) for config in device: for interface in range(config.bNumInterfaces): if device.is_kernel_driver_active(interface): try: device.detach_kernel_driver(interface) if device.product: log.warning( '[UKIP] The device %s with the vendor id %s and the ' 'product id %s was blocked. The causing timings were: ' '%s.', device.product, vendor_id, product_id, _event_devices_timings[event_device_path]) else: log.warning( '[UKIP] The device with the vendor id %s and the ' 'product id %s was blocked. The causing timings were: ' '%s.', vendor_id, product_id, _event_devices_timings[event_device_path]) except (IOError, OSError, ValueError, usb.core.USBError) as e: log.warning( 'There was an error in unbinding the interface for the USB device' ' %s: %s', pid_and_vid, e) # In case of an error we still need to continue to the next interface. continue # The device was removed, so clear the dicts. Most importantly, clear the # keystroke dict. del _event_devices_timings[event_device_path] del _event_devices_keystrokes[event_device_path] gc.collect()
Helper function to load the keycodes file into memory. Returns: The lowcodes and capscodes as dicts in a KeycodesReturn attribute. Raises: KeycodesFileError: If there is a problem with the keycodes file.
def load_keycodes_from_file() -> KeycodesReturn: """Helper function to load the keycodes file into memory. Returns: The lowcodes and capscodes as dicts in a KeycodesReturn attribute. Raises: KeycodesFileError: If there is a problem with the keycodes file. """ lowcodes = {} capscodes = {} try: with open('/etc/ukip/keycodes', 'r') as keycode_file: try: keycodes = json.load(keycode_file) except (OverflowError, ValueError, TypeError) as je: raise KeycodesFileError('The keycodes file could not be read: %s' % je) except FileNotFoundError as fnfe: raise KeycodesFileError( 'The keycode file /etc/ukip/keycodes could not be found: %s' % fnfe) if not keycodes.get('lowcodes') or not keycodes.get('capscodes'): log.error( 'The keycodes file is missing either the lowcodes or capscodes keyword.' ) return KeycodesReturn(lower_codes=lowcodes, capped_codes=capscodes) for keycode in keycodes['lowcodes']: for scancode, lowcode in keycode.items(): lowcodes[int(scancode)] = lowcode for keycode in keycodes['capscodes']: for scancode, capcode in keycode.items(): capscodes[int(scancode)] = capcode return KeycodesReturn(lower_codes=lowcodes, capped_codes=capscodes)
Monitor a given USB device for occurring KEY_DOWN events. Creates a passive reading loop over a given event device and waits for KEY_DOWN events to occour. Then extracts the time in milliseconds of the event and adds it to the ringbuffer. Args: device: The event device in (/dev/input/*). vendor_id: The vendor ID of the device. product_id: The product ID of the device. Raises: OSError: If the given USB device cannot be found or if the OS receives keyboard events, after the device was unbound. Both originate from the evdev lib. StopIteration: If the iteration of the usb device tree breaks.
def monitor_device_thread(device: pyudev.Device, vendor_id: int, product_id: int) -> None: """Monitor a given USB device for occurring KEY_DOWN events. Creates a passive reading loop over a given event device and waits for KEY_DOWN events to occour. Then extracts the time in milliseconds of the event and adds it to the ringbuffer. Args: device: The event device in (/dev/input/*). vendor_id: The vendor ID of the device. product_id: The product ID of the device. Raises: OSError: If the given USB device cannot be found or if the OS receives keyboard events, after the device was unbound. Both originate from the evdev lib. StopIteration: If the iteration of the usb device tree breaks. """ keycodes = load_keycodes_from_file() lowcodes = keycodes.lower_codes capscodes = keycodes.capped_codes try: try: inputdevice = evdev.InputDevice(device.device_node) dev = usb.core.find(idVendor=vendor_id, idProduct=product_id) except (OSError, StopIteration) as mex: log.warning( 'There was an error while starting the thread for device monitoring:' ' %s', mex) # Bail the function and with that, end the thread. return log.info( f'Start monitoring {device.device_node} with the VID {hex(vendor_id)} and the PID {hex(product_id)}' ) try: # The default behaviour of evdev.InputDevice is a non-exclusive access, # so each reader gets a copy of each event. for event in inputdevice.read_loop(): caps = False for led in inputdevice.leds(verbose=True): # Check if CapsLock is turned on. if 'LED_CAPSL' in led: caps = True # LShift or RShift is either pressed or held. if LSHIFT in inputdevice.active_keys( ) or RSHIFT in inputdevice.active_keys(): caps = True if event.value == KEY_DOWN and event.type == evdev.ecodes.EV_KEY: keystroke_in_ms = (event.sec * 1000000) + event.usec if caps: keystroke = capscodes.get(evdev.categorize(event).scancode) else: keystroke = lowcodes.get(evdev.categorize(event).scancode) add_to_ring_buffer(device.device_node, keystroke_in_ms, keystroke, dev) except OSError as ose: log.warning('Events found for unbound device: %s', ose) except: log.exception('Error monitoring device.')