response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Parse command line arguments. | def parse_args(args: Optional[Sequence[str]] = None) -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--stdio", action="store_true")
parser.add_argument("--socket", type=int, default=None)
parser.add_argument("--pipe", type=str, default=None)
parser.add_argument("--clientProcessId", type=int, default=None)
return parser.parse_args(args) |
Convert bytes to string as needed. | def to_str(text) -> str:
"""Convert bytes to string as needed."""
return text.decode("utf-8") if isinstance(text, bytes) else text |
Creates JSON-RPC wrapper for the readable and writable streams. | def create_json_rpc(readable: BinaryIO, writable: BinaryIO) -> JsonRpc:
"""Creates JSON-RPC wrapper for the readable and writable streams."""
return JsonRpc(readable, writable) |
Gets an existing JSON-RPC connection or starts one and return it. | def get_or_start_json_rpc(
workspace: str,
interpreter: Sequence[str],
cwd: str,
env: Optional[Dict[str, str]] = None,
) -> Union[JsonRpc, None]:
"""Gets an existing JSON-RPC connection or starts one and return it."""
res = _get_json_rpc(workspace)
if not res:
args = [*interpreter, RUNNER_SCRIPT]
_process_manager.start_process(workspace, args, cwd, env)
res = _get_json_rpc(workspace)
return res |
Uses JSON-RPC to execute a command. | def run_over_json_rpc(
workspace: str,
interpreter: Sequence[str],
module: str,
argv: Sequence[str],
use_stdin: bool,
cwd: str,
source: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
) -> RpcRunResult:
"""Uses JSON-RPC to execute a command."""
rpc: Union[JsonRpc, None] = get_or_start_json_rpc(workspace, interpreter, cwd, env)
if not rpc:
raise Exception("Failed to run over JSON-RPC.")
msg_id = str(uuid.uuid4())
msg = {
"id": msg_id,
"method": "run",
"module": module,
"argv": argv,
"useStdin": use_stdin,
"cwd": cwd,
}
if source:
msg["source"] = source
rpc.send_data(msg)
data = rpc.receive_data()
if data["id"] != msg_id:
return RpcRunResult(
"", f"Invalid result for request: {json.dumps(msg, indent=4)}"
)
if "error" in data:
result = data["result"] if "result" in data else ""
error = data["error"]
if data.get("exception", False):
return RpcRunResult(result, "", error)
return RpcRunResult(result, error)
return RpcRunResult(result, "") |
Shutdown all JSON-RPC processes. | def shutdown_json_rpc():
"""Shutdown all JSON-RPC processes."""
_process_manager.stop_all_processes() |
Add given path to `sys.path`. | def update_sys_path(path_to_add: str, strategy: str) -> None:
"""Add given path to `sys.path`."""
if path_to_add not in sys.path and os.path.isdir(path_to_add):
if strategy == "useBundled":
sys.path.insert(0, path_to_add)
else:
sys.path.append(path_to_add) |
Add given path to `sys.path`. | def update_sys_path(path_to_add: str, strategy: str) -> None:
"""Add given path to `sys.path`."""
if path_to_add not in sys.path and os.path.isdir(path_to_add):
if strategy == "useBundled":
sys.path.insert(0, path_to_add)
else:
sys.path.append(path_to_add) |
Update PATH environment variable with the 'scripts' directory.
Windows: .venv/Scripts
Linux/MacOS: .venv/bin | def update_environ_path() -> None:
"""Update PATH environment variable with the 'scripts' directory.
Windows: .venv/Scripts
Linux/MacOS: .venv/bin
"""
scripts = sysconfig.get_path("scripts")
paths_variants = ["Path", "PATH"]
for var_name in paths_variants:
if var_name in os.environ:
paths = os.environ[var_name].split(os.pathsep)
if scripts not in paths:
paths.insert(0, scripts)
os.environ[var_name] = os.pathsep.join(paths)
break |
LSP handler for textDocument/formatting request. | def formatting(params: lsp.DocumentFormattingParams) -> list[lsp.TextEdit] | None:
"""LSP handler for textDocument/formatting request."""
document = LSP_SERVER.workspace.get_text_document(params.text_document.uri)
return _formatting_helper(document) |
LSP handler for textDocument/rangeFormatting request. | def range_formatting(
params: lsp.DocumentRangeFormattingParams,
) -> list[lsp.TextEdit] | None:
"""LSP handler for textDocument/rangeFormatting request."""
document = LSP_SERVER.workspace.get_text_document(params.text_document.uri)
settings = _get_settings_by_document(document)
version = VERSION_LOOKUP[settings["workspaceFS"]]
if version >= LINE_RANGES_MIN_VERSION:
return _formatting_helper(
document,
args=[
"--line-ranges",
f"{params.range.start.line + 1}-{params.range.end.line + 1}",
],
)
else:
log_warning(
"Black version earlier than 23.11.0 does not support range formatting. Formatting entire document."
)
return _formatting_helper(document) |
LSP handler for textDocument/rangesFormatting request. | def ranges_formatting(
params: lsp.DocumentRangesFormattingParams,
) -> list[lsp.TextEdit] | None:
"""LSP handler for textDocument/rangesFormatting request."""
document = LSP_SERVER.workspace.get_text_document(params.text_document.uri)
settings = _get_settings_by_document(document)
version = VERSION_LOOKUP[settings["workspaceFS"]]
if version >= LINE_RANGES_MIN_VERSION:
args = []
for r in params.ranges:
args += ["--line-ranges", f"{r.start.line + 1}-{r.end.line + 1}"]
return _formatting_helper(document, args=args)
else:
log_warning(
"Black version earlier than 23.11.0 does not support range formatting. Formatting entire document."
)
return _formatting_helper(document) |
Ensures that the code provided is python. | def is_python(code: str, file_path: str) -> bool:
"""Ensures that the code provided is python."""
try:
ast.parse(code, file_path)
except SyntaxError:
log_error(f"Syntax error in code: {traceback.format_exc()}")
return False
return True |
Gets or generates a file name to use with black when formatting. | def _get_filename_for_black(document: workspace.Document) -> str:
"""Gets or generates a file name to use with black when formatting."""
if document.uri.startswith("vscode-notebook-cell") and document.path.endswith(
".ipynb"
):
# Treat the cell like a python file
return document.path[:-6] + ".py"
return document.path |
Returns line endings used in the text. | def _get_line_endings(lines: list[str]) -> str:
"""Returns line endings used in the text."""
try:
if lines[0][-2:] == "\r\n":
return "\r\n"
return "\n"
except Exception: # pylint: disable=broad-except
return None |
Ensures that the edited text line endings matches the document line endings. | def _match_line_endings(document: workspace.Document, text: str) -> str:
"""Ensures that the edited text line endings matches the document line endings."""
expected = _get_line_endings(document.source.splitlines(keepends=True))
actual = _get_line_endings(text.splitlines(keepends=True))
if actual == expected or actual is None or expected is None:
return text
return text.replace(actual, expected) |
Returns arguments used by black based on file extensions. | def _get_args_by_file_extension(document: workspace.Document) -> List[str]:
"""Returns arguments used by black based on file extensions."""
if document.uri.startswith("vscode-notebook-cell"):
return []
p = document.path.lower()
if p.endswith(".py"):
return []
elif p.endswith(".pyi"):
return ["--pyi"]
elif p.endswith(".ipynb"):
return ["--ipynb"]
return [] |
LSP handler for initialize request. | def initialize(params: lsp.InitializeParams) -> None:
"""LSP handler for initialize request."""
log_to_output(f"CWD Server: {os.getcwd()}")
GLOBAL_SETTINGS.update(**params.initialization_options.get("globalSettings", {}))
settings = params.initialization_options["settings"]
_update_workspace_settings(settings)
log_to_output(
f"Settings received on server:\r\n{json.dumps(settings, indent=4, ensure_ascii=False)}\r\n"
)
log_to_output(
f"Global settings received on server:\r\n{json.dumps(GLOBAL_SETTINGS, indent=4, ensure_ascii=False)}\r\n"
)
paths = "\r\n ".join(sys.path)
log_to_output(f"sys.path used to run Server:\r\n {paths}")
_update_workspace_settings_with_version_info(WORKSPACE_SETTINGS) |
Handle clean up on exit. | def on_exit(_params: Optional[Any] = None) -> None:
"""Handle clean up on exit."""
jsonrpc.shutdown_json_rpc() |
Handle clean up on shutdown. | def on_shutdown(_params: Optional[Any] = None) -> None:
"""Handle clean up on shutdown."""
jsonrpc.shutdown_json_rpc() |
Returns cwd for the given settings and document. | def get_cwd(settings: Dict[str, Any], document: Optional[workspace.Document]) -> str:
"""Returns cwd for the given settings and document."""
if settings["cwd"] == "${workspaceFolder}":
return settings["workspaceFS"]
if settings["cwd"] == "${fileDirname}":
if document is not None:
return os.fspath(pathlib.Path(document.path).parent)
return settings["workspaceFS"]
return settings["cwd"] |
Runs tool on the given document.
if use_stdin is true then contents of the document is passed to the
tool via stdin. | def _run_tool_on_document(
document: workspace.Document,
use_stdin: bool = False,
extra_args: Sequence[str] = [],
) -> utils.RunResult | None:
"""Runs tool on the given document.
if use_stdin is true then contents of the document is passed to the
tool via stdin.
"""
if utils.is_stdlib_file(document.path):
log_warning(f"Skipping standard library file: {document.path}")
return None
if not is_python(document.source, document.path):
log_warning(
f"Skipping non python code or code with syntax errors: {document.path}"
)
return None
# deep copy here to prevent accidentally updating global settings.
settings = copy.deepcopy(_get_settings_by_document(document))
code_workspace = settings["workspaceFS"]
cwd = get_cwd(settings, document)
use_path = False
use_rpc = False
if settings["path"]:
# 'path' setting takes priority over everything.
use_path = True
argv = settings["path"]
elif settings["interpreter"] and not utils.is_current_interpreter(
settings["interpreter"][0]
):
# If there is a different interpreter set use JSON-RPC to the subprocess
# running under that interpreter.
argv = [TOOL_MODULE]
use_rpc = True
else:
# if the interpreter is same as the interpreter running this
# process then run as module.
argv = [TOOL_MODULE]
argv += TOOL_ARGS + settings["args"] + extra_args
if use_stdin:
argv += ["-"]
if use_path:
# This mode is used when running executables.
log_to_output(" ".join(argv))
log_to_output(f"CWD Server: {cwd}")
result = utils.run_path(
argv=argv,
use_stdin=use_stdin,
cwd=cwd,
source=document.source.replace("\r\n", "\n"),
)
if result.stderr:
log_to_output(result.stderr)
elif use_rpc:
# This mode is used if the interpreter running this server is different from
# the interpreter used for running this server.
log_to_output(" ".join(settings["interpreter"] + ["-m"] + argv))
log_to_output(f"CWD formatter: {cwd}")
result = jsonrpc.run_over_json_rpc(
workspace=code_workspace,
interpreter=settings["interpreter"],
module=TOOL_MODULE,
argv=argv,
use_stdin=use_stdin,
cwd=cwd,
source=document.source,
env={
"LS_IMPORT_STRATEGY": settings["importStrategy"],
},
)
result = _to_run_result_with_logging(result)
else:
# In this mode the tool is run as a module in the same process as the language server.
log_to_output(" ".join([sys.executable, "-m"] + argv))
log_to_output(f"CWD formatter: {cwd}")
# This is needed to preserve sys.path, in cases where the tool modifies
# sys.path and that might not work for this scenario next time around.
with utils.substitute_attr(sys, "path", [""] + sys.path[:]):
try:
result = utils.run_module(
module=TOOL_MODULE,
argv=argv,
use_stdin=use_stdin,
cwd=cwd,
source=document.source,
)
except Exception:
log_error(traceback.format_exc(chain=True))
raise
if result.stderr:
log_to_output(result.stderr)
return result |
Runs tool. | def _run_tool(extra_args: Sequence[str], settings: Dict[str, Any]) -> utils.RunResult:
"""Runs tool."""
code_workspace = settings["workspaceFS"]
cwd = get_cwd(settings, None)
use_path = False
use_rpc = False
if len(settings["path"]) > 0:
# 'path' setting takes priority over everything.
use_path = True
argv = settings["path"]
elif len(settings["interpreter"]) > 0 and not utils.is_current_interpreter(
settings["interpreter"][0]
):
# If there is a different interpreter set use JSON-RPC to the subprocess
# running under that interpreter.
argv = [TOOL_MODULE]
use_rpc = True
else:
# if the interpreter is same as the interpreter running this
# process then run as module.
argv = [TOOL_MODULE]
argv += extra_args
if use_path:
# This mode is used when running executables.
log_to_output(" ".join(argv))
log_to_output(f"CWD Server: {cwd}")
result = utils.run_path(argv=argv, use_stdin=True, cwd=cwd)
if result.stderr:
log_to_output(result.stderr)
elif use_rpc:
# This mode is used if the interpreter running this server is different from
# the interpreter used for running this server.
log_to_output(" ".join(settings["interpreter"] + ["-m"] + argv))
log_to_output(f"CWD formatter: {cwd}")
result = jsonrpc.run_over_json_rpc(
workspace=code_workspace,
interpreter=settings["interpreter"],
module=TOOL_MODULE,
argv=argv,
use_stdin=True,
cwd=cwd,
env={
"LS_IMPORT_STRATEGY": settings["importStrategy"],
},
)
result = _to_run_result_with_logging(result)
else:
# In this mode the tool is run as a module in the same process as the language server.
log_to_output(" ".join([sys.executable, "-m"] + argv))
log_to_output(f"CWD formatter: {cwd}")
# This is needed to preserve sys.path, in cases where the tool modifies
# sys.path and that might not work for this scenario next time around.
with utils.substitute_attr(sys, "path", [""] + sys.path[:]):
try:
result = utils.run_module(
module=TOOL_MODULE, argv=argv, use_stdin=True, cwd=cwd
)
except Exception:
log_error(traceback.format_exc(chain=True))
raise
if result.stderr:
log_to_output(result.stderr)
if LSP_SERVER.lsp.trace == lsp.TraceValues.Verbose:
log_to_output(f"\r\n{result.stdout}\r\n")
return result |
Logs messages to Output > Black Formatter channel only. | def log_to_output(
message: str, msg_type: lsp.MessageType = lsp.MessageType.Log
) -> None:
"""Logs messages to Output > Black Formatter channel only."""
LSP_SERVER.show_message_log(message, msg_type) |
Logs messages with notification on error. | def log_error(message: str) -> None:
"""Logs messages with notification on error."""
LSP_SERVER.show_message_log(message, lsp.MessageType.Error)
if os.getenv("LS_SHOW_NOTIFICATION", "off") in ["onError", "onWarning", "always"]:
LSP_SERVER.show_message(message, lsp.MessageType.Error) |
Logs messages with notification on warning. | def log_warning(message: str) -> None:
"""Logs messages with notification on warning."""
LSP_SERVER.show_message_log(message, lsp.MessageType.Warning)
if os.getenv("LS_SHOW_NOTIFICATION", "off") in ["onWarning", "always"]:
LSP_SERVER.show_message(message, lsp.MessageType.Warning) |
Logs messages with notification. | def log_always(message: str) -> None:
"""Logs messages with notification."""
LSP_SERVER.show_message_log(message, lsp.MessageType.Info)
if os.getenv("LS_SHOW_NOTIFICATION", "off") in ["always"]:
LSP_SERVER.show_message(message, lsp.MessageType.Info) |
Ensures we always get a list | def as_list(content: Union[Any, List[Any], Tuple[Any]]) -> List[Any]:
"""Ensures we always get a list"""
if isinstance(content, (list, tuple)):
return list(content)
return [content] |
Returns paths from sysconfig.get_paths(). | def _get_sys_config_paths() -> List[str]:
"""Returns paths from sysconfig.get_paths()."""
return [
path
for group, path in sysconfig.get_paths().items()
if group not in ["data", "platdata", "scripts"]
] |
This is the extensions folder under ~/.vscode or ~/.vscode-server. | def _get_extensions_dir() -> List[str]:
"""This is the extensions folder under ~/.vscode or ~/.vscode-server."""
# The path here is calculated relative to the tool
# this is because users can launch VS Code with custom
# extensions folder using the --extensions-dir argument
path = pathlib.Path(__file__).parent.parent.parent.parent
# ^ bundled ^ extensions
# tool <extension>
if path.name == "extensions":
return [os.fspath(path)]
return [] |
Returns true if two paths are the same. | def is_same_path(file_path1: str, file_path2: str) -> bool:
"""Returns true if two paths are the same."""
return pathlib.Path(file_path1) == pathlib.Path(file_path2) |
Returns normalized path. | def normalize_path(file_path: str) -> str:
"""Returns normalized path."""
return str(pathlib.Path(file_path).resolve()) |
Returns true if the executable path is same as the current interpreter. | def is_current_interpreter(executable) -> bool:
"""Returns true if the executable path is same as the current interpreter."""
return is_same_path(executable, sys.executable) |
Return True if the file belongs to the standard library. | def is_stdlib_file(file_path: str) -> bool:
"""Return True if the file belongs to the standard library."""
normalized_path = str(pathlib.Path(file_path).resolve())
return any(normalized_path.startswith(path) for path in _stdlib_paths) |
Manage object attributes context when using runpy.run_module(). | def substitute_attr(obj: Any, attribute: str, new_value: Any):
"""Manage object attributes context when using runpy.run_module()."""
old_value = getattr(obj, attribute)
setattr(obj, attribute, new_value)
yield
setattr(obj, attribute, old_value) |
Redirect stdio streams to a custom stream. | def redirect_io(stream: str, new_stream):
"""Redirect stdio streams to a custom stream."""
old_stream = getattr(sys, stream)
setattr(sys, stream, new_stream)
yield
setattr(sys, stream, old_stream) |
Change working directory before running code. | def change_cwd(new_cwd):
"""Change working directory before running code."""
os.chdir(new_cwd)
yield
os.chdir(SERVER_CWD) |
Runs as a module. | def _run_module(
module: str, argv: Sequence[str], use_stdin: bool, source: str = None
) -> RunResult:
"""Runs as a module."""
str_output = CustomIO("<stdout>", encoding="utf-8")
str_error = CustomIO("<stderr>", encoding="utf-8")
try:
with substitute_attr(sys, "argv", argv):
with redirect_io("stdout", str_output):
with redirect_io("stderr", str_error):
if use_stdin and source is not None:
str_input = CustomIO("<stdin>", encoding="utf-8", newline="\n")
with redirect_io("stdin", str_input):
str_input.write(source)
str_input.seek(0)
runpy.run_module(module, run_name="__main__")
else:
runpy.run_module(module, run_name="__main__")
except SystemExit:
pass
return RunResult(str_output.get_value(), str_error.get_value()) |
Runs as a module. | def run_module(
module: str, argv: Sequence[str], use_stdin: bool, cwd: str, source: str = None
) -> RunResult:
"""Runs as a module."""
with CWD_LOCK:
if is_same_path(os.getcwd(), cwd):
return _run_module(module, argv, use_stdin, source)
with change_cwd(cwd):
return _run_module(module, argv, use_stdin, source) |
Runs as an executable. | def run_path(
argv: Sequence[str], use_stdin: bool, cwd: str, source: str = None
) -> RunResult:
"""Runs as an executable."""
if use_stdin:
with subprocess.Popen(
argv,
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=cwd,
) as process:
return RunResult(*process.communicate(input=source))
else:
result = subprocess.run(
argv,
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
cwd=cwd,
)
return RunResult(result.stdout, result.stderr) |
Run a API. | def run_api(
callback: Callable[[Sequence[str], CustomIO, CustomIO, CustomIO | None], None],
argv: Sequence[str],
use_stdin: bool,
cwd: str,
source: str = None,
) -> RunResult:
"""Run a API."""
with CWD_LOCK:
if is_same_path(os.getcwd(), cwd):
return _run_api(callback, argv, use_stdin, source)
with change_cwd(cwd):
return _run_api(callback, argv, use_stdin, source) |
Add given path to `sys.path`. | def update_sys_path(path_to_add: str) -> None:
"""Add given path to `sys.path`."""
if path_to_add not in sys.path and os.path.isdir(path_to_add):
sys.path.append(path_to_add) |
Test formatting a python file. | def test_formatting(sample: str):
"""Test formatting a python file."""
FORMATTED_TEST_FILE_PATH = constants.TEST_DATA / sample / "sample.py"
UNFORMATTED_TEST_FILE_PATH = constants.TEST_DATA / sample / "sample.unformatted"
contents = UNFORMATTED_TEST_FILE_PATH.read_text(encoding="utf-8")
actual = []
with utils.python_file(contents, UNFORMATTED_TEST_FILE_PATH.parent) as pf:
uri = utils.as_uri(str(pf))
with session.LspSession() as ls_session:
ls_session.initialize()
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": contents,
}
}
)
actual = ls_session.text_document_formatting(
{
"textDocument": {"uri": uri},
# `options` is not used by black
"options": {"tabSize": 4, "insertSpaces": True},
}
)
expected_text = FORMATTED_TEST_FILE_PATH.read_text(encoding="utf-8")
actual_text = utils.apply_text_edits(contents, utils.destructure_text_edits(actual))
assert_that(actual_text, is_(expected_text)) |
Test formating a python file. | def test_formatting_cell():
"""Test formating a python file."""
FORMATTED_TEST_FILE_PATH = constants.TEST_DATA / "sample2" / "sample.formatted"
UNFORMATTED_TEST_FILE_PATH = constants.TEST_DATA / "sample2" / "sample.unformatted"
contents = UNFORMATTED_TEST_FILE_PATH.read_text(encoding="utf-8")
actual = []
# generate a fake cell uri
uri = (
utils.as_uri(UNFORMATTED_TEST_FILE_PATH.parent / "sample.ipynb").replace(
"file:", "vscode-notebook-cell:"
)
+ "#C00001"
)
with session.LspSession() as ls_session:
ls_session.initialize()
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": contents,
}
}
)
actual = ls_session.text_document_formatting(
{
"textDocument": {"uri": uri},
# `options` is not used by black
"options": {"tabSize": 4, "insertSpaces": True},
}
)
expected_text = FORMATTED_TEST_FILE_PATH.read_text(encoding="utf-8")
actual_text = utils.apply_text_edits(contents, utils.destructure_text_edits(actual))
assert_that(actual_text, is_(expected_text)) |
Test skipping formatting when the file is in site-packages | def test_skipping_site_packages_files():
"""Test skipping formatting when the file is in site-packages"""
UNFORMATTED_TEST_FILE_PATH = constants.TEST_DATA / "sample1" / "sample.unformatted"
with session.LspSession() as ls_session:
# Use any stdlib path here
uri = utils.as_uri(pathlib.__file__)
ls_session.initialize()
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": UNFORMATTED_TEST_FILE_PATH.read_text(encoding="utf-8"),
}
}
)
actual = ls_session.text_document_formatting(
{
"textDocument": {"uri": uri},
# `options` is not used by black
"options": {"tabSize": 4, "insertSpaces": True},
}
)
expected = None
assert_that(actual, is_(expected)) |
Test formatting a python file. | def test_range_formatting(sample: str, ranges: str):
"""Test formatting a python file."""
FORMATTED_TEST_FILE_PATH = constants.TEST_DATA / sample / "sample.py"
UNFORMATTED_TEST_FILE_PATH = constants.TEST_DATA / sample / "sample.unformatted"
contents = UNFORMATTED_TEST_FILE_PATH.read_text(encoding="utf-8")
lines = contents.splitlines()
actual = []
with utils.python_file(contents, UNFORMATTED_TEST_FILE_PATH.parent) as pf:
uri = utils.as_uri(str(pf))
with session.LspSession() as ls_session:
ls_session.initialize()
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": contents,
}
}
)
if ranges == "single-range":
actual = ls_session.text_document_range_formatting(
{
"textDocument": {"uri": uri},
# `options` is not used by black
"options": {"tabSize": 4, "insertSpaces": True},
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": len(lines[0])},
},
}
)
else:
actual = ls_session.text_document_ranges_formatting(
{
"textDocument": {"uri": uri},
# `options` is not used by black
"options": {"tabSize": 4, "insertSpaces": True},
"ranges": [
{
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": len(lines[0])},
},
{
"start": {"line": 2, "character": 0},
"end": {"line": 2, "character": len(lines[2])},
},
],
}
)
expected_text = FORMATTED_TEST_FILE_PATH.read_text(encoding="utf-8")
actual_text = utils.apply_text_edits(contents, utils.destructure_text_edits(actual))
assert_that(actual_text, is_(expected_text)) |
Test linting using pylint bin path set. | def test_path():
"""Test linting using pylint bin path set."""
init_params = copy.deepcopy(defaults.VSCODE_DEFAULT_INITIALIZE)
init_params["initializationOptions"]["settings"][0]["path"] = [
sys.executable,
os.fspath(UTILS_PATH),
]
argv_callback_object = CallbackObject()
contents = TEST_FILE.read_text()
actual = []
with utils.python_file(contents, TEST_FILE.parent) as file:
uri = utils.as_uri(str(file))
with session.LspSession() as ls_session:
ls_session.set_notification_callback(
session.WINDOW_LOG_MESSAGE,
argv_callback_object.check_for_argv_duplication,
)
ls_session.initialize(init_params)
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": contents,
}
}
)
# Call this second time to detect arg duplication.
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": contents,
}
}
)
ls_session.text_document_formatting(
{
"textDocument": {"uri": uri},
# `options` is not used by black
"options": {"tabSize": 4, "insertSpaces": True},
}
)
actual = argv_callback_object.check_result()
assert_that(actual, is_(False)) |
Test linting using specific python path. | def test_interpreter():
"""Test linting using specific python path."""
init_params = copy.deepcopy(defaults.VSCODE_DEFAULT_INITIALIZE)
init_params["initializationOptions"]["settings"][0]["interpreter"] = ["python"]
argv_callback_object = CallbackObject()
contents = TEST_FILE.read_text()
actual = []
with utils.python_file(contents, TEST_FILE.parent) as file:
uri = utils.as_uri(str(file))
with session.LspSession() as ls_session:
ls_session.set_notification_callback(
session.WINDOW_LOG_MESSAGE,
argv_callback_object.check_for_argv_duplication,
)
ls_session.initialize(init_params)
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": contents,
}
}
)
# Call this second time to detect arg duplication.
ls_session.notify_did_open(
{
"textDocument": {
"uri": uri,
"languageId": "python",
"version": 1,
"text": contents,
}
}
)
actual = argv_callback_object.check_result()
assert_that(actual, is_(False)) |
Fixes 'file' uri or path case for easier testing in windows. | def normalizecase(path: str) -> str:
"""Fixes 'file' uri or path case for easier testing in windows."""
if platform.system() == "Windows":
return path.lower()
return path |
Return 'file' uri as string. | def as_uri(path: str) -> str:
"""Return 'file' uri as string."""
return normalizecase(pathlib.Path(path).as_uri()) |
Returns server info from package.json | def get_server_info_defaults():
"""Returns server info from package.json"""
package_json_path = PROJECT_ROOT / "package.json"
package_json = json.loads(package_json_path.read_text())
return package_json["serverInfo"] |
Returns initialization options from package.json | def get_initialization_options():
"""Returns initialization options from package.json"""
package_json_path = PROJECT_ROOT / "package.json"
package_json = json.loads(package_json_path.read_text())
server_info = package_json["serverInfo"]
server_id = f"{server_info['module']}-formatter"
properties = package_json["contributes"]["configuration"]["properties"]
setting = {}
for prop in properties:
name = prop[len(server_id) + 1 :]
value = properties[prop]["default"]
setting[name] = value
setting["workspace"] = as_uri(str(PROJECT_ROOT))
setting["interpreter"] = []
setting["cwd"] = str(PROJECT_ROOT)
return {"settings": [setting], "globalSettings": setting} |
Converts text edits from the language server to the format used by the test client. | def destructure_text_edits(text_edits: List[Any]) -> List[lsp.TextEdit]:
"""Converts text edits from the language server to the format used by the test client."""
converter = cv.get_converter()
return [converter.structure(text_edit, lsp.TextEdit) for text_edit in text_edits] |
Returns true if the class has a property that may be python keyword. | def is_keyword_class(cls: type) -> bool:
"""Returns true if the class has a property that may be python keyword."""
return any(cls is c for c in _KEYWORD_CLASSES) |
Returns true if the class or its properties require special handling. | def is_special_class(cls: type) -> bool:
"""Returns true if the class or its properties require special handling."""
return any(cls is c for c in _SPECIAL_CLASSES) |
Returns true if the class or its properties require special handling.
Example:
Consider RenameRegistrationOptions
* document_selector property:
When you set `document_selector` to None in python it has to be preserved when
serializing it. Since the serialized JSON value `{"document_selector": null}`
means use the Clients document selector. Omitting it might throw error.
* prepare_provider property
This property does NOT need special handling, since omitting it or using
`{"prepare_provider": null}` in JSON has the same meaning. | def is_special_property(cls: type, property_name: str) -> bool:
"""Returns true if the class or its properties require special handling.
Example:
Consider RenameRegistrationOptions
* document_selector property:
When you set `document_selector` to None in python it has to be preserved when
serializing it. Since the serialized JSON value `{"document_selector": null}`
means use the Clients document selector. Omitting it might throw error.
* prepare_provider property
This property does NOT need special handling, since omitting it or using
`{"prepare_provider": null}` in JSON has the same meaning.
"""
qualified_name = f"{cls.__name__}.{property_name}"
return qualified_name in _SPECIAL_PROPERTIES |
Returns message direction clientToServer, serverToClient or both. | def message_direction(method: str) -> str:
"""Returns message direction clientToServer, serverToClient or both."""
return _MESSAGE_DIRECTION[method] |
Convert a string to ascii. | def string_to_ascii(value):
"""
Convert a string to ascii.
"""
return str(anyascii(value)) |
Returns a string that can be used to identify the specified model.
The format is: `app_label.ModelName`
This an be reversed with the `resolve_model_string` function | def get_model_string(model):
"""
Returns a string that can be used to identify the specified model.
The format is: `app_label.ModelName`
This an be reversed with the `resolve_model_string` function
"""
return model._meta.app_label + "." + model.__name__ |
Resolve an 'app_label.model_name' string into an actual model class.
If a model class is passed in, just return that.
Raises a LookupError if a model can not be found, or ValueError if passed
something that is neither a model or a string. | def resolve_model_string(model_string, default_app=None):
"""
Resolve an 'app_label.model_name' string into an actual model class.
If a model class is passed in, just return that.
Raises a LookupError if a model can not be found, or ValueError if passed
something that is neither a model or a string.
"""
if isinstance(model_string, str):
try:
app_label, model_name = model_string.split(".")
except ValueError:
if default_app is not None:
# If we can't split, assume a model in current app
app_label = default_app
model_name = model_string
else:
raise ValueError(
"Can not resolve {!r} into a model. Model names "
"should be in the form app_label.model_name".format(model_string),
model_string,
)
return apps.get_model(app_label, model_name)
elif isinstance(model_string, type):
return model_string
else:
raise ValueError(f"Can not resolve {model_string!r} into a model", model_string) |
Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without
accidentally closing it. A '-' character will be inserted for each time it is escaped:
`<-/script>`, `<--/script>` etc. | def escape_script(text):
"""
Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without
accidentally closing it. A '-' character will be inserted for each time it is escaped:
`<-/script>`, `<--/script>` etc.
"""
warn(
"The `escape_script` hook is deprecated - use `template` elements instead.",
category=RemovedInWagtail70Warning,
)
return SCRIPT_RE.sub(r"<-\1/script>", text) |
Convert a string to ASCII exactly as Django's slugify does, with the exception
that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode
normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.
This ensures that the result of slugifying (for example - Cyrillic) text will not be an empty
string, and can thus be safely used as an identifier (albeit not a human-readable one). | def cautious_slugify(value):
"""
Convert a string to ASCII exactly as Django's slugify does, with the exception
that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode
normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.
This ensures that the result of slugifying (for example - Cyrillic) text will not be an empty
string, and can thus be safely used as an identifier (albeit not a human-readable one).
"""
value = force_str(value)
# Normalize the string to decomposed unicode form. This causes accented Latin
# characters to be split into 'base character' + 'accent modifier'; the latter will
# be stripped out by the regexp, resulting in an ASCII-clean character that doesn't
# need to be escaped
value = unicodedata.normalize("NFKD", value)
# Strip out characters that aren't letterlike, underscores or hyphens,
# using the same regexp that slugify uses. This ensures that non-ASCII non-letters
# (accent modifiers, fancy punctuation) get stripped rather than escaped
value = SLUGIFY_RE.sub("", value)
# Encode as ASCII, escaping non-ASCII characters with backslashreplace, then convert
# back to a unicode string (which is what slugify expects)
value = value.encode("ascii", "backslashreplace").decode("ascii")
# Pass to slugify to perform final conversion (whitespace stripping, applying
# mark_safe); this will also strip out the backslashes from the 'backslashreplace'
# conversion
return slugify(value) |
Convert a string to ASCII similar to Django's slugify, with cautious handling of
non-ASCII alphanumeric characters. See `cautious_slugify`.
Any inner whitespace, hyphens or dashes will be converted to underscores and
will be safe for Django template or filename usage. | def safe_snake_case(value):
"""
Convert a string to ASCII similar to Django's slugify, with cautious handling of
non-ASCII alphanumeric characters. See `cautious_slugify`.
Any inner whitespace, hyphens or dashes will be converted to underscores and
will be safe for Django template or filename usage.
"""
slugified_ascii_string = cautious_slugify(value)
snake_case_string = slugified_ascii_string.replace("-", "_")
return snake_case_string |
Return a human-readable label for a content type object, suitable for display in the admin
in place of the default 'wagtailcore | page' representation | def get_content_type_label(content_type):
"""
Return a human-readable label for a content type object, suitable for display in the admin
in place of the default 'wagtailcore | page' representation
"""
if content_type is None:
return _("Unknown content type")
model = content_type.model_class()
if model:
return str(capfirst(model._meta.verbose_name))
else:
# no corresponding model class found; fall back on the name field of the ContentType
return capfirst(content_type.model) |
Determine whether the callable `func` has a signature that accepts the keyword argument `kwarg` | def accepts_kwarg(func, kwarg):
"""
Determine whether the callable `func` has a signature that accepts the keyword argument `kwarg`
"""
signature = inspect.signature(func)
try:
signature.bind_partial(**{kwarg: None})
return True
except TypeError:
return False |
Finds an available slug within the specified parent.
If the requested slug is not available, this adds a number on the end, for example:
- 'requested-slug'
- 'requested-slug-1'
- 'requested-slug-2'
And so on, until an available slug is found.
The `ignore_page_id` keyword argument is useful for when you are updating a page,
you can pass the page being updated here so the page's current slug is not
treated as in use by another page. | def find_available_slug(parent, requested_slug, ignore_page_id=None):
"""
Finds an available slug within the specified parent.
If the requested slug is not available, this adds a number on the end, for example:
- 'requested-slug'
- 'requested-slug-1'
- 'requested-slug-2'
And so on, until an available slug is found.
The `ignore_page_id` keyword argument is useful for when you are updating a page,
you can pass the page being updated here so the page's current slug is not
treated as in use by another page.
"""
pages = parent.get_children().filter(slug__startswith=requested_slug)
if ignore_page_id:
pages = pages.exclude(id=ignore_page_id)
existing_slugs = set(pages.values_list("slug", flat=True))
slug = requested_slug
number = 1
while slug in existing_slugs:
slug = requested_slug + "-" + str(number)
number += 1
return slug |
Cache of settings.WAGTAIL_CONTENT_LANGUAGES in a dictionary for easy lookups by key. | def get_content_languages():
"""
Cache of settings.WAGTAIL_CONTENT_LANGUAGES in a dictionary for easy lookups by key.
"""
content_languages = getattr(settings, "WAGTAIL_CONTENT_LANGUAGES", None)
languages = dict(settings.LANGUAGES)
if content_languages is None:
# Default to a single language based on LANGUAGE_CODE
default_language_code = get_supported_language_variant(settings.LANGUAGE_CODE)
try:
language_name = languages[default_language_code]
except KeyError:
# get_supported_language_variant on the 'null' translation backend (used for
# USE_I18N=False) returns settings.LANGUAGE_CODE unchanged without accounting for
# language variants (en-us versus en), so retry with the generic version.
default_language_code = default_language_code.split("-")[0]
try:
language_name = languages[default_language_code]
except KeyError:
# Can't extract a display name, so fall back on displaying LANGUAGE_CODE instead
language_name = settings.LANGUAGE_CODE
# Also need to tweak the languages dict to get around the check below
languages[default_language_code] = settings.LANGUAGE_CODE
content_languages = [
(default_language_code, language_name),
]
# Check that each content language is in LANGUAGES
for language_code, name in content_languages:
if language_code not in languages:
raise ImproperlyConfigured(
"The language {} is specified in WAGTAIL_CONTENT_LANGUAGES but not LANGUAGES. "
"WAGTAIL_CONTENT_LANGUAGES must be a subset of LANGUAGES.".format(
language_code
)
)
return dict(content_languages) |
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
This is equvilant to Django's `django.utils.translation.get_supported_content_language_variant`
but reads the `WAGTAIL_CONTENT_LANGUAGES` setting instead. | def get_supported_content_language_variant(lang_code, strict=False):
"""
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
This is equvilant to Django's `django.utils.translation.get_supported_content_language_variant`
but reads the `WAGTAIL_CONTENT_LANGUAGES` setting instead.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]["fallback"])
except KeyError:
pass
generic_lang_code = lang_code.split("-")[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_content_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + "-"):
return supported_code
raise LookupError(lang_code) |
Cache of the locale id -> locale display name mapping | def get_locales_display_names() -> dict:
"""
Cache of the locale id -> locale display name mapping
"""
from wagtail.models import Locale # inlined to avoid circular imports
cached_map = cache.get("wagtail_locales_display_name")
if cached_map is None:
cached_map = {
locale.pk: locale.get_display_name() for locale in Locale.objects.all()
}
cache.set("wagtail_locales_display_name", cached_map)
return cached_map |
Clear cache when global WAGTAIL_CONTENT_LANGUAGES/LANGUAGES/LANGUAGE_CODE settings are changed | def reset_cache(**kwargs):
"""
Clear cache when global WAGTAIL_CONTENT_LANGUAGES/LANGUAGES/LANGUAGE_CODE settings are changed
"""
if kwargs["setting"] in ("WAGTAIL_CONTENT_LANGUAGES", "LANGUAGES", "LANGUAGE_CODE"):
get_content_languages.cache_clear()
get_supported_content_language_variant.cache_clear() |
Like getattr, but accepts a dotted path as the accessor to be followed to any depth.
At each step, the lookup on the object can be a dictionary lookup (foo['bar']) or an attribute
lookup (foo.bar), and if it results in a callable, will be called (provided we can do so with
no arguments, and it does not have an 'alters_data' property).
Modelled on the variable resolution logic in Django templates:
https://github.com/django/django/blob/f331eba6d576752dd79c4b37c41d981daa537fe6/django/template/base.py#L838 | def multigetattr(item, accessor):
"""
Like getattr, but accepts a dotted path as the accessor to be followed to any depth.
At each step, the lookup on the object can be a dictionary lookup (foo['bar']) or an attribute
lookup (foo.bar), and if it results in a callable, will be called (provided we can do so with
no arguments, and it does not have an 'alters_data' property).
Modelled on the variable resolution logic in Django templates:
https://github.com/django/django/blob/f331eba6d576752dd79c4b37c41d981daa537fe6/django/template/base.py#L838
"""
current = item
for bit in accessor.split("."):
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (
IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError, # unsubscriptable object
):
raise AttributeError(
f"Failed lookup for key [{bit}] in {current!r}"
)
if callable(current):
if getattr(current, "alters_data", False):
raise SuspiciousOperation(f"Cannot call {current!r} from multigetattr")
# if calling without arguments is invalid, let the exception bubble up
current = current()
return current |
Return a simple ``HttpRequest`` instance that can be passed to
``Page.get_url()`` and other methods to benefit from improved performance
when no real ``HttpRequest`` instance is available.
If ``site`` is provided, the ``HttpRequest`` is made to look like it came
from that Wagtail ``Site``. | def get_dummy_request(*, path: str = "/", site: "Site" = None) -> HttpRequest:
"""
Return a simple ``HttpRequest`` instance that can be passed to
``Page.get_url()`` and other methods to benefit from improved performance
when no real ``HttpRequest`` instance is available.
If ``site`` is provided, the ``HttpRequest`` is made to look like it came
from that Wagtail ``Site``.
"""
server_port = 80
if site:
server_name = site.hostname
server_port = site.port
else:
server_name = settings.ALLOWED_HOSTS[0]
if server_name == "*":
server_name = "example.com"
# `SERVER_PORT` doesn't work when passed to the constructor
return RequestFactory(SERVER_NAME=server_name).get(path, SERVER_PORT=server_port) |
Safely use the MD5 hash algorithm with the given ``data`` and a flag
indicating if the purpose of the digest is for security or not.
On security-restricted systems (such as FIPS systems), insecure hashes
like MD5 are disabled by default. But passing ``usedforsecurity`` as
``False`` tells the underlying security implementation we're not trying
to use the digest for secure purposes and to please just go ahead and
allow it to happen. | def safe_md5(data=b"", usedforsecurity=True):
"""
Safely use the MD5 hash algorithm with the given ``data`` and a flag
indicating if the purpose of the digest is for security or not.
On security-restricted systems (such as FIPS systems), insecure hashes
like MD5 are disabled by default. But passing ``usedforsecurity`` as
``False`` tells the underlying security implementation we're not trying
to use the digest for secure purposes and to please just go ahead and
allow it to happen.
"""
# Although ``accepts_kwarg`` works great on Python 3.8+, on Python 3.7 it
# raises a ValueError, saying "no signature found for builtin". So, back
# to the try/except.
try:
return md5(data, usedforsecurity=usedforsecurity)
except TypeError:
return md5(data) |
A modified version of `make_template_fragment_key` which varies on page and
site for use with `{% wagtailpagecache %}`. | def make_wagtail_template_fragment_key(fragment_name, page, site, vary_on=None):
"""
A modified version of `make_template_fragment_key` which varies on page and
site for use with `{% wagtailpagecache %}`.
"""
if vary_on is None:
vary_on = []
vary_on.extend([page.cache_key, site.id])
return make_template_fragment_key(fragment_name, vary_on) |
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook) | def register(hook_name, fn=None, order=0):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order)) |
Register hook for ``hook_name`` temporarily. This is useful for testing hooks.
Can be used as a decorator::
def my_hook(...):
pass
class TestMyHook(Testcase):
@hooks.register_temporarily('hook_name', my_hook)
def test_my_hook(self):
pass
or as a context manager::
def my_hook(...):
pass
with hooks.register_temporarily('hook_name', my_hook):
# Hook is registered here
# Hook is unregistered here
To register multiple hooks at the same time, pass in a list of 2-tuples:
def my_hook(...):
pass
def my_other_hook(...):
pass
with hooks.register_temporarily([
('hook_name', my_hook),
('hook_name', my_other_hook),
]):
# Hooks are registered here | def register_temporarily(hook_name_or_hooks, fn=None, *, order=0):
"""
Register hook for ``hook_name`` temporarily. This is useful for testing hooks.
Can be used as a decorator::
def my_hook(...):
pass
class TestMyHook(Testcase):
@hooks.register_temporarily('hook_name', my_hook)
def test_my_hook(self):
pass
or as a context manager::
def my_hook(...):
pass
with hooks.register_temporarily('hook_name', my_hook):
# Hook is registered here
# Hook is unregistered here
To register multiple hooks at the same time, pass in a list of 2-tuples:
def my_hook(...):
pass
def my_other_hook(...):
pass
with hooks.register_temporarily([
('hook_name', my_hook),
('hook_name', my_other_hook),
]):
# Hooks are registered here
"""
if not isinstance(hook_name_or_hooks, list) and fn is not None:
hooks = [(hook_name_or_hooks, fn)]
else:
hooks = hook_name_or_hooks
return TemporaryHook(hooks, order) |
Return the hooks function sorted by their order. | def get_hooks(hook_name):
"""Return the hooks function sorted by their order."""
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks] |
A context manager that can be used to temporarily disable the reference index auto-update signal handlers.
For example:
with disable_reference_index_auto_update():
my_instance.save() # Reference index will not be updated by this save | def disable_reference_index_auto_update():
"""
A context manager that can be used to temporarily disable the reference index auto-update signal handlers.
For example:
with disable_reference_index_auto_update():
my_instance.save() # Reference index will not be updated by this save
"""
try:
reference_index_auto_update_disabled.value = True
yield
finally:
del reference_index_auto_update_disabled.value |
Allows a class to implement its adapting logic with a `js_args()` method on the class itself.
This just helps reduce the amount of code you have to write.
For example:
@adapter('wagtail.mywidget')
class MyWidget():
...
def js_args(self):
return [
self.foo,
]
Is equivalent to:
class MyWidget():
...
class MyWidgetAdapter(Adapter):
js_constructor = 'wagtail.mywidget'
def js_args(self, obj):
return [
self.foo,
] | def adapter(js_constructor, base=Adapter):
"""
Allows a class to implement its adapting logic with a `js_args()` method on the class itself.
This just helps reduce the amount of code you have to write.
For example:
@adapter('wagtail.mywidget')
class MyWidget():
...
def js_args(self):
return [
self.foo,
]
Is equivalent to:
class MyWidget():
...
class MyWidgetAdapter(Adapter):
js_constructor = 'wagtail.mywidget'
def js_args(self, obj):
return [
self.foo,
]
"""
def _wrapper(cls):
ClassAdapter = type(
cls.__name__ + "Adapter",
(base,),
{
"js_constructor": js_constructor,
"js_args": lambda self, obj: obj.js_args(),
},
)
register(ClassAdapter(), cls)
return cls
return _wrapper |
Handle a submission of PasswordViewRestrictionForm to grant view access over a
subtree that is protected by a PageViewRestriction | def authenticate_with_password(request, page_view_restriction_id, page_id):
"""
Handle a submission of PasswordViewRestrictionForm to grant view access over a
subtree that is protected by a PageViewRestriction
"""
restriction = get_object_or_404(PageViewRestriction, id=page_view_restriction_id)
page = get_object_or_404(Page, id=page_id).specific
if request.method == "POST":
form = PasswordViewRestrictionForm(request.POST, instance=restriction)
if form.is_valid():
return_url = form.cleaned_data["return_url"]
if not url_has_allowed_host_and_scheme(
return_url, request.get_host(), request.is_secure()
):
return_url = settings.LOGIN_REDIRECT_URL
restriction.mark_as_passed(request)
return redirect(return_url)
else:
form = PasswordViewRestrictionForm(instance=restriction)
action_url = reverse(
"wagtailcore_authenticate_with_password", args=[restriction.id, page.id]
)
return page.serve_password_required_response(request, form, action_url) |
Check whether there are any view restrictions on this page which are
not fulfilled by the given request object. If there are, return an
HttpResponse that will notify the user of that restriction (and possibly
include a password / login form that will allow them to proceed). If
there are no such restrictions, return None | def check_view_restrictions(page, request, serve_args, serve_kwargs):
"""
Check whether there are any view restrictions on this page which are
not fulfilled by the given request object. If there are, return an
HttpResponse that will notify the user of that restriction (and possibly
include a password / login form that will allow them to proceed). If
there are no such restrictions, return None
"""
for restriction in page.get_view_restrictions():
if not restriction.accept_request(request):
if restriction.restriction_type == PageViewRestriction.PASSWORD:
from wagtail.forms import PasswordViewRestrictionForm
form = PasswordViewRestrictionForm(
instance=restriction,
initial={"return_url": request.get_full_path()},
)
action_url = reverse(
"wagtailcore_authenticate_with_password",
args=[restriction.id, page.id],
)
return page.serve_password_required_response(request, form, action_url)
elif restriction.restriction_type in [
PageViewRestriction.LOGIN,
PageViewRestriction.GROUPS,
]:
return require_wagtail_login(next=request.get_full_path()) |
Generator for functions that can be used as entries in Whitelister.element_rules.
These functions accept a tag, and modify its attributes by looking each attribute
up in the 'allowed_attrs' dict defined here:
* if the lookup fails, drop the attribute
* if the lookup returns a callable, replace the attribute with the result of calling
it - for example `{'title': uppercase}` will replace 'title' with the result of
uppercasing the title. If the callable returns None, the attribute is dropped.
* if the lookup returns a truthy value, keep the attribute; if falsy, drop it | def attribute_rule(allowed_attrs):
"""
Generator for functions that can be used as entries in Whitelister.element_rules.
These functions accept a tag, and modify its attributes by looking each attribute
up in the 'allowed_attrs' dict defined here:
* if the lookup fails, drop the attribute
* if the lookup returns a callable, replace the attribute with the result of calling
it - for example `{'title': uppercase}` will replace 'title' with the result of
uppercasing the title. If the callable returns None, the attribute is dropped.
* if the lookup returns a truthy value, keep the attribute; if falsy, drop it
"""
def fn(tag):
for attr, val in list(tag.attrs.items()):
rule = allowed_attrs.get(attr)
if rule:
if callable(rule):
new_val = rule(val)
if new_val is None:
del tag[attr]
else:
tag[attr] = new_val
else:
# rule is not callable, just truthy - keep the attribute
pass
else:
# rule is falsy or absent - remove the attribute
del tag[attr]
return fn |
Retrieves non-abstract descendants of the given model class. If `inclusive` is set to
True, includes model_class | def get_concrete_descendants(model_class, inclusive=True):
"""Retrieves non-abstract descendants of the given model class. If `inclusive` is set to
True, includes model_class"""
subclasses = model_class.__subclasses__()
if subclasses:
for subclass in subclasses:
yield from get_concrete_descendants(subclass)
if inclusive and not model_class._meta.abstract:
yield model_class |
Retrieve the global list of menu items for the page action menu,
which may then be customised on a per-request basis | def _get_base_page_action_menu_items():
"""
Retrieve the global list of menu items for the page action menu,
which may then be customised on a per-request basis
"""
global BASE_PAGE_ACTION_MENU_ITEMS
if BASE_PAGE_ACTION_MENU_ITEMS is None:
BASE_PAGE_ACTION_MENU_ITEMS = [
SaveDraftMenuItem(order=0),
UnpublishMenuItem(order=20),
PublishMenuItem(order=30),
CancelWorkflowMenuItem(order=40),
RestartWorkflowMenuItem(order=50),
SubmitForModerationMenuItem(order=60),
PageLockedMenuItem(order=10000),
]
for hook in hooks.get_hooks("register_page_action_menu_item"):
action_menu_item = hook()
if action_menu_item:
BASE_PAGE_ACTION_MENU_ITEMS.append(action_menu_item)
return BASE_PAGE_ACTION_MENU_ITEMS |
Return a standard 'permission denied' response | def permission_denied(request):
"""Return a standard 'permission denied' response"""
if request.headers.get("x-requested-with") == "XMLHttpRequest":
raise PermissionDenied
from wagtail.admin import messages
messages.error(request, _("Sorry, you do not have permission to access this area."))
return redirect("wagtailadmin_home") |
Given a test function that takes a user object and returns a boolean,
return a view decorator that denies access to the user if the test returns false. | def user_passes_test(test):
"""
Given a test function that takes a user object and returns a boolean,
return a view decorator that denies access to the user if the test returns false.
"""
def decorator(view_func):
# decorator takes the view function, and returns the view wrapped in
# a permission check
@wraps(view_func)
def wrapped_view_func(request, *args, **kwargs):
if test(request.user):
# permission check succeeds; run the view function as normal
return view_func(request, *args, **kwargs)
else:
# permission check failed
return permission_denied(request)
return wrapped_view_func
return decorator |
Replacement for django.contrib.auth.decorators.permission_required which returns a
more meaningful 'permission denied' response than just redirecting to the login page.
(The latter doesn't work anyway because Wagtail doesn't define LOGIN_URL...) | def permission_required(permission_name):
"""
Replacement for django.contrib.auth.decorators.permission_required which returns a
more meaningful 'permission denied' response than just redirecting to the login page.
(The latter doesn't work anyway because Wagtail doesn't define LOGIN_URL...)
"""
def test(user):
return user.has_perm(permission_name)
# user_passes_test constructs a decorator function specific to the above test function
return user_passes_test(test) |
Decorator that accepts a list of permission names, and allows the user
to pass if they have *any* of the permissions in the list | def any_permission_required(*perms):
"""
Decorator that accepts a list of permission names, and allows the user
to pass if they have *any* of the permissions in the list
"""
def test(user):
for perm in perms:
if user.has_perm(perm):
return True
return False
return user_passes_test(test) |
Check if a user has any permission to add, edit, or otherwise manage any
page. | def user_has_any_page_permission(user):
"""
Check if a user has any permission to add, edit, or otherwise manage any
page.
"""
return page_permission_policy.user_has_any_permission(
user, {"add", "change", "publish", "bulk_delete", "lock", "unlock"}
) |
Check panels configuration uses `panels` when `edit_handler` not in use. | def check_panels_in_model(cls, context="model"):
"""Check panels configuration uses `panels` when `edit_handler` not in use."""
from wagtail.admin.panels import InlinePanel, PanelGroup
from wagtail.models import Page
errors = []
if hasattr(cls, "get_edit_handler"):
# must check the InlinePanel related models
edit_handler = cls.get_edit_handler()
for tab in edit_handler.children:
if isinstance(tab, PanelGroup):
inline_panel_children = [
panel for panel in tab.children if isinstance(panel, InlinePanel)
]
for inline_panel_child in inline_panel_children:
errors.extend(
check_panels_in_model(
inline_panel_child.db_field.related_model,
context="InlinePanel model",
)
)
if issubclass(cls, Page) or hasattr(cls, "edit_handler"):
# Pages do not need to be checked for standalone tabbed_panel usage
# if edit_handler is used on any model, assume config is correct
return errors
tabbed_panels = [
"content_panels",
"promote_panels",
"settings_panels",
]
for panel_name in tabbed_panels:
class_name = cls.__name__
if not hasattr(cls, panel_name):
continue
panel_name_short = panel_name.replace("_panels", "").title()
error_title = "{}.{} will have no effect on {} editing".format(
class_name, panel_name, context
)
if "InlinePanel" in context:
error_hint = """Ensure that {} uses `panels` instead of `{}`.
There are no tabs on non-Page model editing within InlinePanels.""".format(
class_name, panel_name
)
else:
error_hint = """Ensure that {} uses `panels` instead of `{}`\
or set up an `edit_handler` if you want a tabbed editing interface.
There are no default tabs on non-Page models so there will be no \
{} tab for the {} to render in.""".format(
class_name, panel_name, panel_name_short, panel_name
)
error = Warning(error_title, hint=error_hint, obj=cls, id="wagtailadmin.W002")
errors.append(error)
return errors |
If L10N is enabled, check if WAGTAIL_* formats are compatible with Django input formats.
See https://docs.djangoproject.com/en/stable/topics/i18n/formatting/#creating-custom-format-files
See https://docs.wagtail.org/en/stable/reference/settings.html#wagtail-date-format-wagtail-datetime-format-wagtail-time-format | def datetime_format_check(app_configs, **kwargs):
"""
If L10N is enabled, check if WAGTAIL_* formats are compatible with Django input formats.
See https://docs.djangoproject.com/en/stable/topics/i18n/formatting/#creating-custom-format-files
See https://docs.wagtail.org/en/stable/reference/settings.html#wagtail-date-format-wagtail-datetime-format-wagtail-time-format
"""
from django.conf import settings
from django.utils import formats, translation
errors = []
if not getattr(settings, "USE_L10N", False):
return errors
formats.FORMAT_SETTINGS = formats.FORMAT_SETTINGS.union(
[
"WAGTAIL_DATE_FORMAT",
"WAGTAIL_DATETIME_FORMAT",
"WAGTAIL_TIME_FORMAT",
]
)
for code, label in settings.LANGUAGES:
with translation.override(code):
for wagtail_format, django_formats in [
("WAGTAIL_DATE_FORMAT", "DATE_INPUT_FORMATS"),
("WAGTAIL_DATETIME_FORMAT", "DATETIME_INPUT_FORMATS"),
("WAGTAIL_TIME_FORMAT", "TIME_INPUT_FORMATS"),
]:
wagtail_format_value = getattr(settings, wagtail_format, None)
django_formats_value = getattr(settings, django_formats, None)
if wagtail_format_value is None:
# Skip the iteration if wagtail_format is not present
continue
input_format = formats.get_format_lazy(wagtail_format_value)
input_formats = formats.get_format_lazy(django_formats_value)
if str(input_format) not in str(input_formats):
errors.append(
Error(
"Configuration error",
hint=f"{wagtail_format} {input_format} must be in {django_formats} for language {label} ({code}).",
)
)
return errors |
Define parameters for form fields to be used by WagtailAdminModelForm for a given
database field. | def register_comparison_class(
field_class, to=None, comparison_class=None, exact_class=False
):
"""
Define parameters for form fields to be used by WagtailAdminModelForm for a given
database field.
"""
if comparison_class is None:
raise ImproperlyConfigured(
"register_comparison_class must be passed a 'comparison_class' keyword argument"
)
if to and field_class != models.ForeignKey:
raise ImproperlyConfigured(
"The 'to' argument on register_comparison_class is only valid for ForeignKey fields"
)
comparison_class_registry.register(
field_class, to=to, value=comparison_class, exact_class=exact_class
) |
Performs a diffing algorithm on two pieces of text. Returns
a string of HTML containing the content of both texts with
<span> tags inserted indicating where the differences are. | def diff_text(a, b):
"""
Performs a diffing algorithm on two pieces of text. Returns
a string of HTML containing the content of both texts with
<span> tags inserted indicating where the differences are.
"""
def tokenise(text):
"""
Tokenises a string by splitting it into individual characters
and grouping the alphanumeric ones together.
This means that punctuation, whitespace, CJK characters, etc
become separate tokens and words/numbers are merged together
to form bigger tokens.
This makes the output of the diff easier to read as words are
not broken up.
"""
tokens = []
current_token = ""
for c in text or "":
if c.isalnum():
current_token += c
else:
if current_token:
tokens.append(current_token)
current_token = ""
tokens.append(c)
if current_token:
tokens.append(current_token)
return tokens
a_tok = tokenise(a)
b_tok = tokenise(b)
sm = difflib.SequenceMatcher(lambda t: len(t) <= 4, a_tok, b_tok)
changes = []
for op, i1, i2, j1, j2 in sm.get_opcodes():
if op == "replace":
for token in a_tok[i1:i2]:
changes.append(("deletion", token))
for token in b_tok[j1:j2]:
changes.append(("addition", token))
elif op == "delete":
for token in a_tok[i1:i2]:
changes.append(("deletion", token))
elif op == "insert":
for token in b_tok[j1:j2]:
changes.append(("addition", token))
elif op == "equal":
for token in a_tok[i1:i2]:
changes.append(("equal", token))
# Merge adjacent changes which have the same type. This just cleans up the HTML a bit
merged_changes = []
current_value = []
current_change_type = None
for change_type, value in changes:
if change_type != current_change_type:
if current_change_type is not None:
merged_changes.append((current_change_type, "".join(current_value)))
current_value = []
current_change_type = change_type
current_value.append(value)
if current_value:
merged_changes.append((current_change_type, "".join(current_value)))
return TextDiff(merged_changes) |
Given a python datetime format string, attempts to convert it to
the nearest PHP datetime format string possible. | def to_datetimepicker_format(python_format_string):
"""
Given a python datetime format string, attempts to convert it to
the nearest PHP datetime format string possible.
"""
python2PHP = {
"%a": "D",
"%A": "l",
"%b": "M",
"%B": "F",
"%c": "",
"%d": "d",
"%H": "H",
"%I": "h",
"%j": "z",
"%m": "m",
"%M": "i",
"%p": "A",
"%S": "s",
"%U": "",
"%w": "w",
"%W": "W",
"%x": "",
"%X": "",
"%y": "y",
"%Y": "Y",
"%Z": "e",
}
php_format_string = python_format_string
for py, php in python2PHP.items():
php_format_string = php_format_string.replace(py, php)
return php_format_string |
Wrapper around Django's EmailMultiAlternatives as done in send_mail().
Custom from_email handling and special Auto-Submitted header. | def send_mail(subject, message, recipient_list, from_email=None, **kwargs):
"""
Wrapper around Django's EmailMultiAlternatives as done in send_mail().
Custom from_email handling and special Auto-Submitted header.
"""
if not from_email:
if hasattr(settings, "WAGTAILADMIN_NOTIFICATION_FROM_EMAIL"):
from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL
elif hasattr(settings, "DEFAULT_FROM_EMAIL"):
from_email = settings.DEFAULT_FROM_EMAIL
else:
# We are no longer using the term `webmaster` except in this case, where we continue to match Django's default: https://github.com/django/django/blob/stable/3.2.x/django/conf/global_settings.py#L223
from_email = "webmaster@localhost"
connection = kwargs.get("connection", False) or get_connection(
username=kwargs.get("auth_user", None),
password=kwargs.get("auth_password", None),
fail_silently=kwargs.get("fail_silently", None),
)
multi_alt_kwargs = {
"connection": connection,
"headers": {
"Auto-Submitted": "auto-generated",
},
"bcc": kwargs.get("bcc", None),
"cc": kwargs.get("cc", None),
"reply_to": kwargs.get("reply_to", None),
}
mail = EmailMultiAlternatives(
subject, message, from_email, recipient_list, **multi_alt_kwargs
)
html_message = kwargs.get("html_message", None)
if html_message:
mail.attach_alternative(html_message, "text/html")
return mail.send() |
"
Render a response consisting of an HTML chunk and a JS onload chunk
in the format required by the modal-workflow framework. | def render_modal_workflow(
request, html_template, js_template=None, template_vars=None, json_data=None
):
""" "
Render a response consisting of an HTML chunk and a JS onload chunk
in the format required by the modal-workflow framework.
"""
if js_template:
raise TypeError(
"Passing a js_template argument to render_modal_workflow is no longer supported"
)
# construct response as JSON
response = {}
if html_template:
response["html"] = render_to_string(
html_template, template_vars or {}, request=request
)
if json_data:
response.update(json_data)
return JsonResponse(response) |
Returns a queryset of pages that link to a particular object | def get_object_usage(obj):
"""Returns a queryset of pages that link to a particular object"""
pages = Page.objects.none()
# get all the relation objects for obj
relations = [
f
for f in type(obj)._meta.get_fields(include_hidden=True)
if (f.one_to_many or f.one_to_one) and f.auto_created
]
for relation in relations:
related_model = relation.related_model
# if the relation is between obj and a page, get the page
if issubclass(related_model, Page):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(
**{relation.field.name: obj.id}
).values_list("id", flat=True)
)
else:
# if the relation is between obj and an object that has a page as a
# property, return the page
for f in related_model._meta.fields:
if isinstance(f, ParentalKey) and issubclass(
f.remote_field.model, Page
):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(
**{relation.field.name: obj.id}
).values_list(f.attname, flat=True)
)
return pages |
Return a queryset of the most frequently used tags used on this model class | def popular_tags_for_model(model, count=10):
"""Return a queryset of the most frequently used tags used on this model class"""
content_type = ContentType.objects.get_for_model(model)
return (
Tag.objects.filter(taggit_taggeditem_items__content_type=content_type)
.annotate(item_count=Count("taggit_taggeditem_items"))
.order_by("-item_count")[:count]
) |
Gets the base URL for the wagtail admin site. This is set in `settings.WAGTAILADMIN_BASE_URL`. | def get_admin_base_url():
"""
Gets the base URL for the wagtail admin site. This is set in `settings.WAGTAILADMIN_BASE_URL`.
"""
return getattr(settings, "WAGTAILADMIN_BASE_URL", None) |
Helper function to get the latest string representation of an object.
Draft changes are saved as revisions instead of immediately reflected to the
instance, so this function utilises the latest revision's object_str
attribute if available. | def get_latest_str(obj):
"""
Helper function to get the latest string representation of an object.
Draft changes are saved as revisions instead of immediately reflected to the
instance, so this function utilises the latest revision's object_str
attribute if available.
"""
from wagtail.models import DraftStateMixin, Page
if isinstance(obj, Page):
return obj.specific_deferred.get_admin_display_title()
if isinstance(obj, DraftStateMixin) and obj.latest_revision:
return obj.latest_revision.object_str
return str(obj) |
Subsets and Splits