response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Complete attributes of an object.
def attr_complete(prefix, ctx, filter_func): """Complete attributes of an object.""" attrs = set() m = RE_ATTR.match(prefix) if m is None: return attrs expr, attr = m.group(1, 3) expr = xt.subexpr_from_unbalanced(expr, "(", ")") expr = xt.subexpr_from_unbalanced(expr, "[", "]") expr = xt.subexpr_from_unbalanced(expr, "{", "}") val, _ctx = _safe_eval(expr, ctx) if val is None and _ctx is None: return attrs if len(attr) == 0: opts = [o for o in dir(val) if not o.startswith("_")] else: opts = [o for o in dir(val) if filter_func(o, attr)] prelen = len(prefix) for opt in opts: # check whether these options actually work (e.g., disallow 7.imag) _expr = f"{expr}.{opt}" _val_, _ctx_ = _safe_eval(_expr, _ctx) if _val_ is None and _ctx_ is None: continue a = getattr(val, opt) if XSH.env["COMPLETIONS_BRACKETS"]: if callable(a): rpl = opt + "(" elif isinstance(a, (cabc.Sequence, cabc.Mapping)): rpl = opt + "[" else: rpl = opt else: rpl = opt # note that prefix[:prelen-len(attr)] != prefix[:-len(attr)] # when len(attr) == 0. comp = prefix[: prelen - len(attr)] + rpl attrs.add(comp) return attrs
Completes a python function (or other callable) call by completing argument and keyword argument names.
def python_signature_complete(prefix, line, end, ctx, filter_func): """Completes a python function (or other callable) call by completing argument and keyword argument names. """ front = line[:end] if xt.is_balanced(front, "(", ")"): return set() funcname = xt.subexpr_before_unbalanced(front, "(", ")") val, _ctx = _safe_eval(funcname, ctx) if val is None: return set() try: sig = inspect.signature(val) except (ValueError, TypeError): return set() args = {p + "=" for p in sig.parameters if filter_func(p, prefix)} return args
Return an appropriate filtering function for completions, given the valid of $CASE_SENSITIVE_COMPLETIONS
def get_filter_function(): """ Return an appropriate filtering function for completions, given the valid of $CASE_SENSITIVE_COMPLETIONS """ csc = XSH.env.get("CASE_SENSITIVE_COMPLETIONS") if csc: return _filter_normal else: return _filter_ignorecase
Re-wrap the string s so that each line is no more than max_length characters long, padding all lines but the first on the left with the string left_pad.
def justify(s, max_length, left_pad=0): """ Re-wrap the string s so that each line is no more than max_length characters long, padding all lines but the first on the left with the string left_pad. """ txt = textwrap.wrap(s, width=max_length, subsequent_indent=" " * left_pad) return "\n".join(txt)
The ``__init__`` parameters' default values (excluding ``self`` and ``value``).
def RICH_COMPLETION_DEFAULTS(): """The ``__init__`` parameters' default values (excluding ``self`` and ``value``).""" return [ (name, param.default) for name, param in inspect.signature(RichCompletion.__init__).parameters.items() if name not in ("self", "value") ]
Decorator for a contextual completer This is used to mark completers that want to use the parsed completion context. See ``xonsh/parsers/completion_context.py``. ``func`` receives a single CompletionContext object.
def contextual_completer(func: ContextualCompleter): """Decorator for a contextual completer This is used to mark completers that want to use the parsed completion context. See ``xonsh/parsers/completion_context.py``. ``func`` receives a single CompletionContext object. """ func.contextual = True # type: ignore return func
like ``contextual_completer``, but will only run when completing a command and will directly receive the ``CommandContext`` object
def contextual_command_completer(func: tp.Callable[[CommandContext], CompleterResult]): """like ``contextual_completer``, but will only run when completing a command and will directly receive the ``CommandContext`` object """ @contextual_completer @wraps(func) def _completer(context: CompletionContext) -> CompleterResult: if context.command is not None: return func(context.command) return None return _completer
like ``contextual_command_completer``, but will only run when completing the ``cmd`` command
def contextual_command_completer_for(cmd: str): """like ``contextual_command_completer``, but will only run when completing the ``cmd`` command""" def decor(func: tp.Callable[[CommandContext], CompleterResult]): @contextual_completer @wraps(func) def _completer(context: CompletionContext) -> CompleterResult: if context.command is not None and context.command.completing_command(cmd): return func(context.command) return None return _completer return decor
Decorator for a non-exclusive completer This is used to mark completers that will be collected with other completer's results.
def non_exclusive_completer(func): """Decorator for a non-exclusive completer This is used to mark completers that will be collected with other completer's results. """ func.non_exclusive = True # type: ignore return func
Helper function to complete commands such as ``pip``,``django-admin``,... that use bash's ``complete``
def comp_based_completer(ctx: CommandContext, start_index=0, **env: str): """Helper function to complete commands such as ``pip``,``django-admin``,... that use bash's ``complete``""" prefix = ctx.prefix args = [arg.value for arg in ctx.args] if prefix: args.append(prefix) yield from complete_from_sub_proc( *args[: start_index + 1], sep=shlex.split, COMP_WORDS=os.linesep.join(args[start_index:]) + os.linesep, COMP_CWORD=str(ctx.arg_index - start_index), **env, )
for backward compatibility
def _remove_completer(args): """for backward compatibility""" return remove_completer(args[0])
Return all callable names in the current context
def complete_func_name_choices(xsh, **_): """Return all callable names in the current context""" for i, j in xsh.ctx.items(): if callable(j): yield i
Compute possible positions for the new completer
def complete_completer_pos_choices(xsh, **_): """Compute possible positions for the new completer""" yield from {"start", "end"} for k in xsh.completers.keys(): yield ">" + k yield "<" + k
Add a new completer to xonsh Parameters ---------- name unique name to use in the listing (run "completer list" to see the current completers in order) func the name of a completer function to use. This should be a function that takes a Completion Context object and marked with the ``xonsh.completers.tools.contextual_completer`` decorator. It should return a set of valid completions for the given prefix. If this completer should not be used in a given context, it should return an empty set or None. For more information see https://xon.sh/tutorial_completers.html#writing-a-new-completer. pos position into the list of completers at which the new completer should be added. It can be one of the following values: * "start" indicates that the completer should be added to the start of the list of completers (it should be run before all other exclusive completers) * "end" indicates that the completer should be added to the end of the list of completers (it should be run after all others) * ">KEY", where KEY is a pre-existing name, indicates that this should be added after the completer named KEY * "<KEY", where KEY is a pre-existing name, indicates that this should be added before the completer named KEY
def _register_completer( name: str, func: xcli.Annotated[str, xcli.Arg(completer=complete_func_name_choices)], pos: xcli.Annotated[ str, xcli.Arg(completer=complete_completer_pos_choices, nargs="?") ] = "start", _stack=None, ): """Add a new completer to xonsh Parameters ---------- name unique name to use in the listing (run "completer list" to see the current completers in order) func the name of a completer function to use. This should be a function that takes a Completion Context object and marked with the ``xonsh.completers.tools.contextual_completer`` decorator. It should return a set of valid completions for the given prefix. If this completer should not be used in a given context, it should return an empty set or None. For more information see https://xon.sh/tutorial_completers.html#writing-a-new-completer. pos position into the list of completers at which the new completer should be added. It can be one of the following values: * "start" indicates that the completer should be added to the start of the list of completers (it should be run before all other exclusive completers) * "end" indicates that the completer should be added to the end of the list of completers (it should be run after all others) * ">KEY", where KEY is a pre-existing name, indicates that this should be added after the completer named KEY * "<KEY", where KEY is a pre-existing name, indicates that this should be added before the completer named KEY """ err = None func_name = func xsh = XSH if name in xsh.completers: err = f"The name {name} is already a registered completer function." else: if func_name in xsh.ctx: func = xsh.ctx[func_name] if not callable(func): err = f"{func_name} is not callable" else: for frame_info in _stack: frame = frame_info[0] if func_name in frame.f_locals: func = frame.f_locals[func_name] break elif func_name in frame.f_globals: func = frame.f_globals[func_name] break else: err = f"No such function: {func_name}" if err is None: _add_one_completer(name, func, pos) else: return None, err + "\n", 1
Complete any alias that has ``xonsh_complete`` attribute. The said attribute should be a function. The current command context is passed to it.
def complete_aliases(command: CommandContext): """Complete any alias that has ``xonsh_complete`` attribute. The said attribute should be a function. The current command context is passed to it. """ if not command.args: return cmd = command.args[0].value if cmd not in XSH.aliases: # only complete aliases return alias = XSH.aliases.get(cmd) # type: ignore completer = getattr(alias, "xonsh_complete", None) if not completer: return if command.suffix: # completing in a middle of a word # (e.g. "completer some<TAB>thing") return possible = completer(command=command, alias=alias) return possible, False
Return number of units and list of history files to remove to get under the limit, Parameters: ----------- hsize (int): units of history, # of commands in this case. files ((mod_ts, num_commands, path)[], fsize): history files, sorted oldest first. Returns: -------- hsize_removed (int): units of history to be removed rm_files ((mod_ts, num_commands, path, fsize)[]): list of files to remove.
def _xhj_gc_commands_to_rmfiles(hsize, files): """Return number of units and list of history files to remove to get under the limit, Parameters: ----------- hsize (int): units of history, # of commands in this case. files ((mod_ts, num_commands, path)[], fsize): history files, sorted oldest first. Returns: -------- hsize_removed (int): units of history to be removed rm_files ((mod_ts, num_commands, path, fsize)[]): list of files to remove. """ n = 0 ncmds = 0 for _, fcmds, _, _ in reversed(files): # `files` comes in with empty files included (now), don't need special handling to gc them here. if ncmds + fcmds > hsize: break ncmds += fcmds n += 1 cmds_removed = 0 files_removed = files[:-n] for _, fcmds, _, _ in files_removed: cmds_removed += fcmds return cmds_removed, files_removed
Return the number and list of history files to remove to get under the file limit.
def _xhj_gc_files_to_rmfiles(hsize, files): """Return the number and list of history files to remove to get under the file limit.""" rmfiles = files[:-hsize] if len(files) > hsize else [] return len(rmfiles), rmfiles
Return excess duration and list of history files to remove to get under the age limit.
def _xhj_gc_seconds_to_rmfiles(hsize, files): """Return excess duration and list of history files to remove to get under the age limit.""" now = time.time() n = 0 for ts, _, _, _ in files: if (now - ts) < hsize: break n += 1 rmfiles = files[:n] size_over = now - hsize - rmfiles[0][0] if n > 0 else 0 return size_over, rmfiles
Return the history files to remove to get under the byte limit.
def _xhj_gc_bytes_to_rmfiles(hsize, files): """Return the history files to remove to get under the byte limit.""" n = 0 nbytes = 0 for _, _, _, fsize in reversed(files): if nbytes + fsize > hsize: break nbytes += fsize n += 1 bytes_removed = 0 files_removed = files[:-n] for _, _, _, fsize in files_removed: bytes_removed += fsize return bytes_removed, files_removed
Find and return the history files. Optionally sort files by modify time.
def _xhj_get_history_files(sort=True, newest_first=False): """Find and return the history files. Optionally sort files by modify time. """ data_dirs = [ _xhj_get_data_dir(), XSH.env.get("XONSH_DATA_DIR"), # backwards compatibility, remove in the future ] files = [] for data_dir in data_dirs: data_dir = xt.expanduser_abs_path(data_dir) try: files += [ os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.startswith("xonsh-") and f.endswith(".json") ] except OSError: if XSH.env.get("XONSH_DEBUG"): xt.print_exception( f"Could not collect xonsh history json files from {data_dir}" ) if sort: files.sort(key=lambda x: os.path.getmtime(x), reverse=newest_first) custom_history_file = XSH.env.get("XONSH_HISTORY_FILE", None) if custom_history_file: custom_history_file = xt.expanduser_abs_path(custom_history_file) if custom_history_file not in files: files.insert(0, custom_history_file) return files
Construct the history backend object.
def construct_history(backend=None, **kwargs) -> "History": """Construct the history backend object.""" env = XSH.env backend = backend or env.get("XONSH_HISTORY_BACKEND", "json") if isinstance(backend, str) and backend in HISTORY_BACKENDS: kls_history = HISTORY_BACKENDS[backend] elif xt.is_class(backend): kls_history = backend elif isinstance(backend, History): return backend else: print( f"Unknown history backend: {backend}. Using JSON version", file=sys.stderr, ) kls_history = JsonHistory return kls_history(**kwargs)
Returns history items of current session.
def _xh_session_parser(hist=None, newest_first=False, **kwargs): """Returns history items of current session.""" if hist is None: hist = XSH.history return hist.items()
Returns all history items.
def _xh_all_parser(hist=None, newest_first=False, **kwargs): """Returns all history items.""" if hist is None: hist = XSH.history return hist.all_items(newest_first=newest_first)
Return the path of the history file from the value of the envvar HISTFILE.
def _xh_find_histfile_var(file_list, default=None): """Return the path of the history file from the value of the envvar HISTFILE. """ for f in file_list: f = xt.expanduser_abs_path(f) if not os.path.isfile(f): continue with open(f) as rc_file: for line in rc_file: if line.startswith("HISTFILE="): hist_file = line.split("=", 1)[1].strip("'\"\n") hist_file = xt.expanduser_abs_path(hist_file) if os.path.isfile(hist_file): return hist_file else: if default: default = xt.expanduser_abs_path(default) if os.path.isfile(default): return default
Yield commands from bash history file
def _xh_bash_hist_parser(location=None, **kwargs): """Yield commands from bash history file""" if location is None: location = _xh_find_histfile_var( [os.path.join("~", ".bashrc"), os.path.join("~", ".bash_profile")], os.path.join("~", ".bash_history"), ) if location: with open(location, errors="backslashreplace") as bash_hist: for ind, line in enumerate(bash_hist): yield {"inp": line.rstrip(), "ts": 0.0, "ind": ind} else: print("No bash history file", file=sys.stderr)
Yield commands from zsh history file
def _xh_zsh_hist_parser(location=None, **kwargs): """Yield commands from zsh history file""" if location is None: location = _xh_find_histfile_var( [os.path.join("~", ".zshrc"), os.path.join("~", ".zprofile")], os.path.join("~", ".zsh_history"), ) if location: with open(location, errors="backslashreplace") as zsh_hist: for ind, line in enumerate(zsh_hist): if line.startswith(":"): try: start_time, command = line.split(";", 1) except ValueError: # Invalid history entry continue try: start_time = float(start_time.split(":")[1]) except ValueError: start_time = 0.0 yield {"inp": command.rstrip(), "ts": start_time, "ind": ind} else: yield {"inp": line.rstrip(), "ts": 0.0, "ind": ind} else: print("No zsh history file found", file=sys.stderr)
Yield only the commands between start and end time.
def _xh_filter_ts(commands, start_time, end_time): """Yield only the commands between start and end time.""" for cmd in commands: if start_time <= cmd["ts"] < end_time: yield cmd
Get the requested portion of shell history. Parameters ---------- session: {'session', 'all', 'xonsh', 'bash', 'zsh'} The history session to get. slices : list of slice-like objects, optional Get only portions of history. start_time, end_time: float, optional Filter commands by timestamp. location: string, optional The history file location (bash or zsh) Returns ------- generator A filtered list of commands
def _xh_get_history( session="session", *, slices=None, datetime_format=None, start_time=None, end_time=None, location=None, ): """Get the requested portion of shell history. Parameters ---------- session: {'session', 'all', 'xonsh', 'bash', 'zsh'} The history session to get. slices : list of slice-like objects, optional Get only portions of history. start_time, end_time: float, optional Filter commands by timestamp. location: string, optional The history file location (bash or zsh) Returns ------- generator A filtered list of commands """ cmds = [] for i, item in enumerate(_XH_HISTORY_SESSIONS[session](location=location)): item["ind"] = i cmds.append(item) if slices: # transform/check all slices slices = [xt.ensure_slice(s) for s in slices] cmds = xt.get_portions(cmds, slices) if start_time or end_time: if start_time is None: start_time = 0.0 else: start_time = xt.ensure_timestamp(start_time, datetime_format) if end_time is None: end_time = float("inf") else: end_time = xt.ensure_timestamp(end_time, datetime_format) cmds = _xh_filter_ts(cmds, start_time, end_time) return cmds
Create Table for history items. Columns: info - JSON formatted, reserved for future extension. frequency - in case of HISTCONTROL=erasedups, it tracks the frequency of the inputs. helps in sorting autocompletion
def _xh_sqlite_create_history_table(cursor): """Create Table for history items. Columns: info - JSON formatted, reserved for future extension. frequency - in case of HISTCONTROL=erasedups, it tracks the frequency of the inputs. helps in sorting autocompletion """ if not getattr(XH_SQLITE_CACHE, XH_SQLITE_CREATED_SQL_TBL, False): cursor.execute( f""" CREATE TABLE IF NOT EXISTS {XH_SQLITE_TABLE_NAME} (inp TEXT, rtn INTEGER, tsb REAL, tse REAL, sessionid TEXT, out TEXT, info TEXT, frequency INTEGER default 1, cwd TEXT ) """ ) # add frequency column if not exists for backward compatibility try: cursor.execute( "ALTER TABLE " + XH_SQLITE_TABLE_NAME + " ADD COLUMN frequency INTEGER default 1" ) except sqlite3.OperationalError: pass # add path column if not exists for backward compatibility try: cursor.execute( "ALTER TABLE " + XH_SQLITE_TABLE_NAME + " ADD COLUMN cwd TEXT" ) except sqlite3.OperationalError: pass # add index on inp. since we query when erasedups is True cursor.execute( f"""\ CREATE INDEX IF NOT EXISTS idx_inp_history ON {XH_SQLITE_TABLE_NAME}(inp);""" ) # mark that this function ran for this session setattr(XH_SQLITE_CACHE, XH_SQLITE_CREATED_SQL_TBL, True)
handy function to run insert query
def _sql_insert(cursor, values): # type: (sqlite3.Cursor, dict) -> None """handy function to run insert query""" sql = "INSERT INTO {} ({}) VALUES ({});" fields = ", ".join(values) marks = ", ".join(["?"] * len(values)) cursor.execute( sql.format(XH_SQLITE_TABLE_NAME, fields, marks), tuple(values.values()) )
Wipe the current session's entries from the database.
def xh_sqlite_wipe_session(sessionid=None, filename=None): """Wipe the current session's entries from the database.""" sql = "DELETE FROM xonsh_history WHERE sessionid = ?" with _xh_sqlite_get_conn(filename=filename) as conn: c = conn.cursor() _xh_sqlite_create_history_table(c) c.execute(sql, (str(sessionid),))
Deletes entries from the database where the input matches a pattern.
def xh_sqlite_delete_input_matching(pattern, filename=None): """Deletes entries from the database where the input matches a pattern.""" with _xh_sqlite_get_conn(filename=filename) as conn: c = conn.cursor() _xh_sqlite_create_history_table(c) for inp, *_ in _xh_sqlite_get_records(c): if pattern.match(inp): sql = f"DELETE FROM xonsh_history WHERE inp = '{inp}'" c.execute(sql)
Utility for converting an object to an iterable. Parameters ---------- iterable_or_scalar : anything Returns ------- l : iterable If `obj` was None, return the empty tuple. If `obj` was not iterable returns a 1-tuple containing `obj`. Otherwise return `obj` Notes ----- Although string types are iterable in Python, we are treating them as not iterable in this method. Thus, as_iterable(string) returns (string, ) Examples --------- >>> as_iterable(1) (1,) >>> as_iterable([1, 2, 3]) [1, 2, 3] >>> as_iterable("my string") ("my string", )
def as_iterable(iterable_or_scalar): """Utility for converting an object to an iterable. Parameters ---------- iterable_or_scalar : anything Returns ------- l : iterable If `obj` was None, return the empty tuple. If `obj` was not iterable returns a 1-tuple containing `obj`. Otherwise return `obj` Notes ----- Although string types are iterable in Python, we are treating them as not iterable in this method. Thus, as_iterable(string) returns (string, ) Examples --------- >>> as_iterable(1) (1,) >>> as_iterable([1, 2, 3]) [1, 2, 3] >>> as_iterable("my string") ("my string", ) """ if iterable_or_scalar is None: return () elif isinstance(iterable_or_scalar, (str, bytes)): return (iterable_or_scalar,) elif hasattr(iterable_or_scalar, "__iter__"): return iterable_or_scalar else: return (iterable_or_scalar,)
Remove a directory, even if it has read-only files (Windows). Git creates read-only files that must be removed on teardown. See https://stackoverflow.com/questions/2656322 for more info. Parameters ---------- dirname : str Directory to be removed force : bool If True force removal, defaults to False
def rmtree(dirname, force=False): """Remove a directory, even if it has read-only files (Windows). Git creates read-only files that must be removed on teardown. See https://stackoverflow.com/questions/2656322 for more info. Parameters ---------- dirname : str Directory to be removed force : bool If True force removal, defaults to False """ if sys.platform == "win32": cmd_args = "/S/Q" subproc_uncaptured(["rmdir", cmd_args, dirname]) else: cmd_args = "-r" if force: cmd_args += "f" subproc_uncaptured(["rm", cmd_args, dirname])
Drop in replacement for ``subprocess.run`` like functionality
def run(cmd, cwd=None, check=False): """Drop in replacement for ``subprocess.run`` like functionality""" env = XSH.env if cwd is None: with env.swap(RAISE_SUBPROC_ERROR=check): p = subproc_captured_hiddenobject(cmd) else: with indir(cwd), env.swap(RAISE_SUBPROC_ERROR=check): p = subproc_captured_hiddenobject(cmd) return p
Drop in replacement for ``subprocess.check_call`` like functionality
def check_call(cmd, cwd=None): """Drop in replacement for ``subprocess.check_call`` like functionality""" p = run(cmd, cwd=cwd, check=True) return p.returncode
Drop in replacement for ``subprocess.check_output`` like functionality
def check_output(cmd, cwd=None): """Drop in replacement for ``subprocess.check_output`` like functionality""" env = XSH.env if cwd is None: with env.swap(RAISE_SUBPROC_ERROR=True): output = subproc_captured_stdout(cmd) else: with indir(cwd), env.swap(RAISE_SUBPROC_ERROR=True): output = subproc_captured_stdout(cmd) return output.encode("utf-8")
Ensures that x is an AST node with elements.
def ensure_has_elts(x, lineno=None, col_offset=None): """Ensures that x is an AST node with elements.""" if not has_elts(x): if not isinstance(x, Iterable): x = [x] lineno = x[0].lineno if lineno is None else lineno col_offset = x[0].col_offset if col_offset is None else col_offset x = ast.Tuple(elts=x, ctx=ast.Load(), lineno=lineno, col_offset=col_offset) return x
Creates the AST node for an empty list.
def empty_list(lineno=None, col=None): """Creates the AST node for an empty list.""" return ast.List(elts=[], ctx=ast.Load(), lineno=lineno, col_offset=col)
Creates the AST node for a binary operation.
def binop(x, op, y, lineno=None, col=None): """Creates the AST node for a binary operation.""" lineno = x.lineno if lineno is None else lineno col = x.col_offset if col is None else col return ast.BinOp(left=x, op=op, right=y, lineno=lineno, col_offset=col)
Creates the AST node for calling the 'splitlines' attribute of an object, nominally a string.
def call_split_lines(x, lineno=None, col=None): """Creates the AST node for calling the 'splitlines' attribute of an object, nominally a string. """ return ast.Call( func=ast.Attribute( value=x, attr="splitlines", ctx=ast.Load(), lineno=lineno, col_offset=col ), args=[], keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col, )
Creates the AST node for the following expression:: [x] if isinstance(x, str) else x Somewhat useful.
def ensure_list_from_str_or_list(x, lineno=None, col=None): """Creates the AST node for the following expression:: [x] if isinstance(x, str) else x Somewhat useful. """ return ast.IfExp( test=ast.Call( func=ast.Name( id="isinstance", ctx=ast.Load(), lineno=lineno, col_offset=col ), args=[x, ast.Name(id="str", ctx=ast.Load(), lineno=lineno, col_offset=col)], keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col, ), body=ast.List(elts=[x], ctx=ast.Load(), lineno=lineno, col_offset=col), orelse=x, lineno=lineno, col_offset=col, )
Creates the AST node for calling the __xonsh__.help() function.
def xonsh_help(x, lineno=None, col=None): """Creates the AST node for calling the __xonsh__.help() function.""" return xonsh_call("__xonsh__.help", [x], lineno=lineno, col=col)
Creates the AST node for calling the __xonsh__.superhelp() function.
def xonsh_superhelp(x, lineno=None, col=None): """Creates the AST node for calling the __xonsh__.superhelp() function.""" return xonsh_call("__xonsh__.superhelp", [x], lineno=lineno, col=col)
Recursively sets ctx to ast.Load()
def load_ctx(x): """Recursively sets ctx to ast.Load()""" if not hasattr(x, "ctx"): return x.ctx = ast.Load() if isinstance(x, (ast.Tuple, ast.List)): for e in x.elts: load_ctx(e) elif isinstance(x, ast.Starred): load_ctx(x.value)
Recursively sets ctx to ast.Store()
def store_ctx(x): """Recursively sets ctx to ast.Store()""" if not hasattr(x, "ctx"): return x.ctx = ast.Store() if isinstance(x, (ast.Tuple, ast.List)): for e in x.elts: store_ctx(e) elif isinstance(x, ast.Starred): store_ctx(x.value)
Recursively sets ctx to ast.Del()
def del_ctx(x): """Recursively sets ctx to ast.Del()""" if not hasattr(x, "ctx"): return x.ctx = ast.Del() if isinstance(x, (ast.Tuple, ast.List)): for e in x.elts: del_ctx(e) elif isinstance(x, ast.Starred): del_ctx(x.value)
Extracts the line and column number for a node that may have an opening parenthesis, brace, or bracket.
def lopen_loc(x): """Extracts the line and column number for a node that may have an opening parenthesis, brace, or bracket. """ lineno = x._lopen_lineno if hasattr(x, "_lopen_lineno") else x.lineno col = x._lopen_col if hasattr(x, "_lopen_col") else x.col_offset return lineno, col
Returns True if a node has literal '*' for globbing.
def hasglobstar(x): """Returns True if a node has literal '*' for globbing.""" if ast.is_const_str(x): return "*" in x.value elif isinstance(x, list): for e in x: if hasglobstar(e): return True else: return False else: return False
Returns (line_continuation, replacement, diff). Diff is the diff in length for each replacement.
def LINE_CONT_REPLACEMENT_DIFF(): """Returns (line_continuation, replacement, diff). Diff is the diff in length for each replacement. """ line_cont = get_line_continuation() if " \\" == line_cont: # interactive windows replacement = " " else: replacement = "" line_cont += "\n" return line_cont, replacement, len(replacement) - len(line_cont)
If ``x`` represents a value that can be assigned to, return ``None``. Otherwise, return a string describing the object. For use in generating meaningful syntax errors.
def _not_assignable(x, augassign=False): """ If ``x`` represents a value that can be assigned to, return ``None``. Otherwise, return a string describing the object. For use in generating meaningful syntax errors. """ if augassign and isinstance(x, (ast.Tuple, ast.List)): return "literal" elif isinstance(x, (ast.Tuple, ast.List)): if len(x.elts) == 0: return "()" for i in x.elts: res = _not_assignable(i) if res is not None: return res elif any( [ isinstance(x, (ast.Set, ast.Dict)), xast.is_const_num(x), xast.is_const_str(x), xast.is_const_bytes(x), ] ): return "literal" elif isinstance(x, ast.Call): return "function call" elif isinstance(x, ast.Lambda): return "lambda" elif isinstance(x, (ast.BoolOp, ast.BinOp, ast.UnaryOp)): return "operator" elif isinstance(x, ast.IfExp): return "conditional expression" elif isinstance(x, ast.ListComp): return "list comprehension" elif isinstance(x, ast.DictComp): return "dictionary comprehension" elif isinstance(x, ast.SetComp): return "set comprehension" elif isinstance(x, ast.GeneratorExp): return "generator expression" elif isinstance(x, ast.Compare): return "comparison" elif isinstance(x, ast.Name) and x.id in _all_keywords: return "keyword" elif xast.is_const_name(x): return "keyword"
\s+
def t_CPP_WS(t): r'\s+' t.lexer.lineno += t.value.count("\n") return t
(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)
def CPP_INTEGER(t): r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)' return t
\"([^\\\n]|(\\(.|\n)))*?\"
def t_CPP_STRING(t): r'\"([^\\\n]|(\\(.|\n)))*?\"' t.lexer.lineno += t.value.count("\n") return t
(L)?\'([^\\\n]|(\\(.|\n)))*?\'
def t_CPP_CHAR(t): r'(L)?\'([^\\\n]|(\\(.|\n)))*?\'' t.lexer.lineno += t.value.count("\n") return t
(/\*(.|\n)*?\*/)
def t_CPP_COMMENT1(t): r'(/\*(.|\n)*?\*/)' ncr = t.value.count("\n") t.lexer.lineno += ncr # replace with one space or a number of '\n' t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' ' return t
(//.*?(\n|$))
def t_CPP_COMMENT2(t): r'(//.*?(\n|$))' # replace with '/n' t.type = 'CPP_WS'; t.value = '\n' return t
/\*(.|\n)*?\*/
def t_COMMENT(t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') return t
//.*\n
def t_CPPCOMMENT(t): r'//.*\n' t.lexer.lineno += 1 return t
Attempts to read lines without throwing an error.
def safe_readlines(handle, hint=-1): """Attempts to read lines without throwing an error.""" try: lines = handle.readlines(hint) except OSError: lines = [] return lines
Attempts to find if the handle is readable without throwing an error.
def safe_readable(handle): """Attempts to find if the handle is readable without throwing an error.""" try: status = handle.readable() except (OSError, ValueError): status = False return status
Sends SIGCONT to a process if possible.
def resume_process(p): """Sends SIGCONT to a process if possible.""" can_send_signal = ( hasattr(p, "send_signal") and xp.ON_POSIX and not xp.ON_MSYS and not xp.ON_CYGWIN ) if can_send_signal: try: p.send_signal(signal.SIGCONT) except PermissionError: pass
Determines whether a file descriptor is still writable by trying to write an empty string and seeing if it fails.
def still_writable(fd): """Determines whether a file descriptor is still writable by trying to write an empty string and seeing if it fails. """ try: os.write(fd, b"") status = True except OSError: status = False return status
Attempts to safely flush a file handle, returns success bool.
def safe_flush(handle): """Attempts to safely flush a file handle, returns success bool.""" status = True try: handle.flush() except OSError: status = False return status
Proxies may return a variety of outputs. This handles them generally. Parameters ---------- r : tuple, str, int, or None Return from proxy function stdout : file-like Current stdout stream stdout : file-like Current stderr stream Returns ------- cmd_result : int The return code of the proxy
def parse_proxy_return(r, stdout, stderr): """Proxies may return a variety of outputs. This handles them generally. Parameters ---------- r : tuple, str, int, or None Return from proxy function stdout : file-like Current stdout stream stdout : file-like Current stderr stream Returns ------- cmd_result : int The return code of the proxy """ cmd_result = 0 if isinstance(r, str): stdout.write(r) stdout.flush() elif isinstance(r, int): cmd_result = r elif isinstance(r, cabc.Sequence): rlen = len(r) if rlen > 0 and r[0] is not None: stdout.write(str(r[0])) stdout.flush() if rlen > 1 and r[1] is not None: stderr.write(str(r[1])) stderr.flush() if rlen > 2 and isinstance(r[2], int): cmd_result = r[2] elif r is not None: # for the random object... stdout.write(str(r)) stdout.flush() return cmd_result
Calls a proxy function which takes no parameters.
def proxy_zero(f, args, stdin, stdout, stderr, spec, stack): """Calls a proxy function which takes no parameters.""" return f()
Calls a proxy function which takes one parameter: args
def proxy_one(f, args, stdin, stdout, stderr, spec, stack): """Calls a proxy function which takes one parameter: args""" return f(args)
Calls a proxy function which takes two parameter: args and stdin.
def proxy_two(f, args, stdin, stdout, stderr, spec, stack): """Calls a proxy function which takes two parameter: args and stdin.""" return f(args, stdin)
Calls a proxy function which takes three parameter: args, stdin, stdout.
def proxy_three(f, args, stdin, stdout, stderr, spec, stack): """Calls a proxy function which takes three parameter: args, stdin, stdout.""" return f(args, stdin, stdout)
Calls a proxy function which takes four parameter: args, stdin, stdout, and stderr.
def proxy_four(f, args, stdin, stdout, stderr, spec, stack): """Calls a proxy function which takes four parameter: args, stdin, stdout, and stderr. """ return f(args, stdin, stdout, stderr)
Calls a proxy function which takes four parameter: args, stdin, stdout, stderr, and spec.
def proxy_five(f, args, stdin, stdout, stderr, spec, stack): """Calls a proxy function which takes four parameter: args, stdin, stdout, stderr, and spec. """ return f(args, stdin, stdout, stderr, spec)
Dispatches the appropriate proxy function based on the number of args.
def partial_proxy(f): """Dispatches the appropriate proxy function based on the number of args.""" numargs = 0 for name, param in inspect.signature(f).parameters.items(): # handle *args/**kwargs signature if param.kind in {param.VAR_KEYWORD, param.VAR_POSITIONAL}: numargs = 6 break if ( param.kind == param.POSITIONAL_ONLY or param.kind == param.POSITIONAL_OR_KEYWORD ): numargs += 1 elif name in xt.ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY: numargs += 1 if numargs < 6: return functools.partial(PROXIES[numargs], f) elif numargs == 6: # don't need to partial. return f else: e = "Expected proxy with 6 or fewer arguments for {}, not {}" raise xt.XonshError(e.format(", ".join(xt.ALIAS_KWARG_NAMES), numargs))
Reads 1 kb of data from a file descriptor into a queue. If this ends or fails, it flags the calling reader object as closed.
def populate_fd_queue(reader, fd, queue): """Reads 1 kb of data from a file descriptor into a queue. If this ends or fails, it flags the calling reader object as closed. """ while True: try: c = os.read(fd, 1024) except OSError: reader.closed = True break if c: queue.put(c) else: reader.closed = True break
Reads bytes from the file descriptor and copies them into a buffer. The reads happen in parallel using the pread() syscall; which is only available on POSIX systems. If the read fails for any reason, the reader is flagged as closed.
def populate_buffer(reader, fd, buffer, chunksize): """Reads bytes from the file descriptor and copies them into a buffer. The reads happen in parallel using the pread() syscall; which is only available on POSIX systems. If the read fails for any reason, the reader is flagged as closed. """ offset = 0 while True: try: buf = os.pread(fd, chunksize, offset) except OSError: reader.closed = True break if buf: buffer.write(buf) offset += len(buf) else: reader.closed = True break
Reads bytes from the file descriptor and puts lines into the queue. The reads happened in parallel, using xonsh.winutils.read_console_output_character(), and is thus only available on windows. If the read fails for any reason, the reader is flagged as closed.
def populate_console(reader, fd, buffer, chunksize, queue, expandsize=None): """Reads bytes from the file descriptor and puts lines into the queue. The reads happened in parallel, using xonsh.winutils.read_console_output_character(), and is thus only available on windows. If the read fails for any reason, the reader is flagged as closed. """ # OK, so this function is super annoying because Windows stores its # buffers as a 2D regular, dense array -- without trailing newlines. # Meanwhile, we want to add *lines* to the queue. Also, as is typical # with parallel reads, the entire buffer that you ask for may not be # filled. Thus we have to deal with the full generality. # 1. reads may end in the middle of a line # 2. excess whitespace at the end of a line may not be real, unless # 3. you haven't read to the end of the line yet! # So there are alignment issues everywhere. Also, Windows will automatically # read past the current cursor position, even though there is presumably # nothing to see there. # # These chunked reads basically need to happen like this because, # a. The default buffer size is HUGE for the console (90k lines x 120 cols) # as so we can't just read in everything at the end and see what we # care about without a noticeable performance hit. # b. Even with this huge size, it is still possible to write more lines than # this, so we should scroll along with the console. # Unfortunately, because we do not have control over the terminal emulator, # It is not possible to compute how far back we should set the beginning # read position because we don't know how many characters have been popped # off the top of the buffer. If we did somehow know this number we could do # something like the following: # # new_offset = (y*cols) + x # if new_offset == max_offset: # new_offset -= scrolled_offset # x = new_offset%cols # y = new_offset//cols # continue # # So this method is imperfect and only works as long as the screen has # room to expand to. Thus the trick here is to expand the screen size # when we get close enough to the end of the screen. There remain some # async issues related to not being able to set the cursor position. # but they just affect the alignment / capture of the output of the # first command run after a screen resize. if expandsize is None: expandsize = 100 * chunksize x, y, cols, rows = posize = xli.winutils.get_position_size(fd) pre_x = pre_y = -1 orig_posize = posize offset = (cols * y) + x max_offset = (rows - 1) * cols # I believe that there is a bug in PTK that if we reset the # cursor position, the cursor on the next prompt is accidentally on # the next line. If this is fixed, uncomment the following line. # if max_offset < offset + expandsize: # rows, max_offset, orig_posize = _expand_console_buffer( # cols, max_offset, expandsize, # orig_posize, fd) # winutils.set_console_cursor_position(x, y, fd=fd) while True: posize = xli.winutils.get_position_size(fd) offset = (cols * y) + x if ((posize[1], posize[0]) <= (y, x) and posize[2:] == (cols, rows)) or ( pre_x == x and pre_y == y ): # already at or ahead of the current cursor position. if reader.closed: break else: time.sleep(reader.timeout) continue elif max_offset <= offset + expandsize: ecb = _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd) rows, max_offset, orig_posize = ecb continue elif posize[2:] == (cols, rows): # cursor updated but screen size is the same. pass else: # screen size changed, which is offset preserving orig_posize = posize cols, rows = posize[2:] x = offset % cols y = offset // cols pre_x = pre_y = -1 max_offset = (rows - 1) * cols continue try: buf = xli.winutils.read_console_output_character( x=x, y=y, fd=fd, buf=buffer, bufsize=chunksize, raw=True ) except OSError: reader.closed = True break # cursor position and offset if not reader.closed: buf = buf.rstrip() nread = len(buf) if nread == 0: time.sleep(reader.timeout) continue cur_x, cur_y = posize[0], posize[1] cur_offset = (cols * cur_y) + cur_x beg_offset = (cols * y) + x end_offset = beg_offset + nread if end_offset > cur_offset and cur_offset != max_offset: buf = buf[: cur_offset - end_offset] # convert to lines xshift = cols - x yshift = (nread // cols) + (1 if nread % cols > 0 else 0) lines = [buf[:xshift]] lines += [ buf[l * cols + xshift : (l + 1) * cols + xshift] for l in range(yshift) # noqa ] lines = [line for line in lines if line] if not lines: time.sleep(reader.timeout) continue # put lines in the queue nl = b"\n" for line in lines[:-1]: queue.put(line.rstrip() + nl) if len(lines[-1]) == xshift: queue.put(lines[-1].rstrip() + nl) else: queue.put(lines[-1]) # update x and y locations if (beg_offset + len(buf)) % cols == 0: new_offset = beg_offset + len(buf) else: new_offset = beg_offset + len(buf.rstrip()) pre_x = x pre_y = y x = new_offset % cols y = new_offset // cols time.sleep(reader.timeout)
Closes a file handle in the safest way possible, and potentially storing the result.
def safe_fdclose(handle, cache=None): """Closes a file handle in the safest way possible, and potentially storing the result. """ if cache is not None and cache.get(handle, False): return status = True if handle is None: pass elif isinstance(handle, int): if handle >= 3: # don't close stdin, stdout, stderr, -1 try: os.close(handle) except OSError: status = False elif handle is sys.stdin or handle is sys.stdout or handle is sys.stderr: # don't close stdin, stdout, or stderr pass else: try: handle.close() except OSError: status = False if cache is not None: cache[handle] = status
App execution aliases behave strangly on Windows and Python. Here we try to detect if a file is an app execution alias.
def is_app_execution_alias(fname): """App execution aliases behave strangly on Windows and Python. Here we try to detect if a file is an app execution alias. """ fname = pathlib.Path(fname) try: return fname.stat().st_reparse_tag == stat.IO_REPARSE_TAG_APPEXECLINK # os.stat().st_reparse_tag is python 3.8+, and os.stat(app_exec_alias) throws OSError for <= 3.7 # so use old method as fallback except (AttributeError, OSError): return not os.path.exists(fname) and fname.name in os.listdir(fname.parent)
Given the name of a script outside the path, returns a list representing an appropriate subprocess command to execute the script or None if the argument is not readable or not a script. Raises PermissionError if the script is not executable.
def get_script_subproc_command(fname, args): """Given the name of a script outside the path, returns a list representing an appropriate subprocess command to execute the script or None if the argument is not readable or not a script. Raises PermissionError if the script is not executable. """ # make sure file is executable if not os.access(fname, os.X_OK): if not xp.ON_CYGWIN: raise PermissionError # explicitly look at all PATH entries for cmd w_path = os.getenv("PATH").split(":") w_fpath = list(map(lambda p: p + os.sep + fname, w_path)) if not any(list(map(lambda c: os.access(c, os.X_OK), w_fpath))): raise PermissionError if xp.ON_POSIX and not os.access(fname, os.R_OK): # on some systems, some important programs (e.g. sudo) will have # execute permissions but not read/write permissions. This enables # things with the SUID set to be run. Needs to come before _is_binary() # is called, because that function tries to read the file. return None elif _is_binary(fname): # if the file is a binary, we should call it directly return None if xp.ON_WINDOWS: # Windows can execute various filetypes directly # as given in PATHEXT _, ext = os.path.splitext(fname) if ext.upper() in XSH.env.get("PATHEXT"): return [fname] + args # find interpreter with open(fname, "rb") as f: first_line = f.readline().decode().strip() m = RE_SHEBANG.match(first_line) # xonsh is the default interpreter if m is None: interp = ["xonsh"] else: interp = m.group(1).strip() if len(interp) > 0: interp = shlex.split(interp) else: interp = ["xonsh"] if xp.ON_WINDOWS: o = [] for i in interp: o.extend(_un_shebang(i)) interp = o return interp + [fname] + args
Safely attempts to open a file in for xonsh subprocs.
def safe_open(fname, mode, buffering=-1): """Safely attempts to open a file in for xonsh subprocs.""" # file descriptors try: return open(fname, mode, buffering=buffering) except PermissionError as ex: raise xt.XonshError(f"xonsh: {fname}: permission denied") from ex except FileNotFoundError as ex: raise xt.XonshError(f"xonsh: {fname}: no such file or directory") from ex except Exception as ex: raise xt.XonshError(f"xonsh: {fname}: unable to open file") from ex
Safely attempts to close an object.
def safe_close(x): """Safely attempts to close an object.""" if not isinstance(x, io.IOBase): return if x.closed: return try: x.close() except Exception: pass
returns origin, mode, destination tuple
def _parse_redirects(r, loc=None): """returns origin, mode, destination tuple""" orig, mode, dest = _REDIR_REGEX.match(r).groups() # redirect to fd if dest.startswith("&"): try: dest = int(dest[1:]) if loc is None: loc, dest = dest, "" # NOQA else: e = f"Unrecognized redirection command: {r}" raise xt.XonshError(e) except (ValueError, xt.XonshError): raise except Exception: pass mode = _MODES.get(mode, None) if mode == "r" and (len(orig) > 0 or len(dest) > 0): raise xt.XonshError(f"Unrecognized redirection command: {r}") elif mode in _WRITE_MODES and len(dest) > 0: raise xt.XonshError(f"Unrecognized redirection command: {r}") return orig, mode, dest
Returns stdin, stdout, stderr tuple of redirections.
def _redirect_streams(r, loc=None): """Returns stdin, stdout, stderr tuple of redirections.""" stdin = stdout = stderr = None no_ampersand = r.replace("&", "") # special case of redirecting stderr to stdout if no_ampersand in _E2O_MAP: stderr = subprocess.STDOUT return stdin, stdout, stderr elif no_ampersand in _O2E_MAP: stdout = 2 # using 2 as a flag, rather than using a file object return stdin, stdout, stderr # get streams orig, mode, dest = _parse_redirects(r) if mode == "r": stdin = safe_open(loc, mode) elif mode in _WRITE_MODES: if orig in _REDIR_ALL: stdout = stderr = safe_open(loc, mode) elif orig in _REDIR_OUT: stdout = safe_open(loc, mode) elif orig in _REDIR_ERR: stderr = safe_open(loc, mode) else: raise xt.XonshError(f"Unrecognized redirection command: {r}") else: raise xt.XonshError(f"Unrecognized redirection command: {r}") return stdin, stdout, stderr
Transforms a command like ['ls', ('>', '/dev/null')] into ['ls', '>', '/dev/null'].
def _flatten_cmd_redirects(cmd): """Transforms a command like ['ls', ('>', '/dev/null')] into ['ls', '>', '/dev/null'].""" new_cmd = [] for c in cmd: if isinstance(c, tuple): new_cmd.extend(c) else: new_cmd.append(c) return new_cmd
Pauses a signal, as needed.
def default_signal_pauser(n, f): """Pauses a signal, as needed.""" signal.pause()
Default subprocess preexec function for when there is no existing pipeline group.
def no_pg_xonsh_preexec_fn(): """Default subprocess preexec function for when there is no existing pipeline group. """ os.setpgrp() signal.signal(signal.SIGTSTP, default_signal_pauser)
Makes sure that a pipe file descriptor properties are reasonable.
def _safe_pipe_properties(fd, use_tty=False): """Makes sure that a pipe file descriptor properties are reasonable.""" if not use_tty: return # due to some weird, long standing issue in Python, PTYs come out # replacing newline \n with \r\n. This causes issues for raw unix # protocols, like git and ssh, which expect unix line endings. # see https://mail.python.org/pipermail/python-list/2013-June/650460.html # for more details and the following solution. props = xli.termios.tcgetattr(fd) props[1] = props[1] & (~xli.termios.ONLCR) | xli.termios.ONLRET xli.termios.tcsetattr(fd, xli.termios.TCSANOW, props) # newly created PTYs have a stardard size (24x80), set size to the same size # than the current terminal winsize = None if sys.stdin.isatty(): winsize = xli.fcntl.ioctl(sys.stdin.fileno(), xli.termios.TIOCGWINSZ, b"0000") elif sys.stdout.isatty(): winsize = xli.fcntl.ioctl(sys.stdout.fileno(), xli.termios.TIOCGWINSZ, b"0000") elif sys.stderr.isatty(): winsize = xli.fcntl.ioctl(sys.stderr.fileno(), xli.termios.TIOCGWINSZ, b"0000") if winsize is not None: xli.fcntl.ioctl(fd, xli.termios.TIOCSWINSZ, winsize)
Converts a list of cmds to a list of SubprocSpec objects that are ready to be executed.
def cmds_to_specs(cmds, captured=False, envs=None): """Converts a list of cmds to a list of SubprocSpec objects that are ready to be executed. """ # first build the subprocs independently and separate from the redirects i = 0 specs = [] redirects = [] for i, cmd in enumerate(cmds): if isinstance(cmd, str): redirects.append(cmd) else: env = envs[i] if envs is not None else None spec = SubprocSpec.build(cmd, captured=captured, env=env) spec.pipeline_index = i specs.append(spec) i += 1 # now modify the subprocs based on the redirects. for i, redirect in enumerate(redirects): if redirect == "|": # these should remain integer file descriptors, and not Python # file objects since they connect processes. r, w = os.pipe() specs[i].stdout = w specs[i + 1].stdin = r elif redirect == "&" and i == len(redirects) - 1: specs[i].background = True else: raise xt.XonshError(f"unrecognized redirect {redirect!r}") # Apply boundary conditions if not XSH.env.get("XONSH_CAPTURE_ALWAYS"): # Make sure sub-specs are always captured. # I.e. ![some_alias | grep x] $(some_alias) specs_to_capture = specs if captured in STDOUT_CAPTURE_KINDS else specs[:-1] for spec in specs_to_capture: if spec.env is None: spec.env = {"XONSH_CAPTURE_ALWAYS": True} else: spec.env.setdefault("XONSH_CAPTURE_ALWAYS", True) _update_last_spec(specs[-1]) return specs
Runs a subprocess, in its many forms. This takes a list of 'commands,' which may be a list of command line arguments or a string, representing a special connecting character. For example:: $ ls | grep wakka is represented by the following cmds:: [['ls'], '|', ['grep', 'wakka']] Lastly, the captured argument affects only the last real command.
def run_subproc(cmds, captured=False, envs=None): """Runs a subprocess, in its many forms. This takes a list of 'commands,' which may be a list of command line arguments or a string, representing a special connecting character. For example:: $ ls | grep wakka is represented by the following cmds:: [['ls'], '|', ['grep', 'wakka']] Lastly, the captured argument affects only the last real command. """ specs = cmds_to_specs(cmds, captured=captured, envs=envs) if tr := XSH.env.get("XONSH_TRACE_SUBPROC", False): tracer = XSH.env.get("XONSH_TRACE_SUBPROC_FUNC", None) if callable(tracer): tracer(cmds, captured=captured) else: r = {"cmds": cmds, "captured": captured} print(f"Trace run_subproc({repr(r)})", file=sys.stderr) if tr == 2: for i, s in enumerate(specs): pcls = s.cls.__module__ + "." + s.cls.__name__ pcmd = ( [s.args[0].__name__] + s.args[1:] if callable(s.args[0]) else s.args ) p = { "cmd": pcmd, "cls": pcls, "alias": s.alias_name, "bin": s.binary_loc, "thread": s.threadable, "bg": s.background, } p = {k: v for k, v in p.items() if v is not None} print(f"{i}: {repr(p)}", file=sys.stderr) cmds = [ _flatten_cmd_redirects(cmd) if isinstance(cmd, list) else cmd for cmd in cmds ] if _should_set_title(): # context manager updates the command information that gets # accessed by CurrentJobField when setting the terminal's title with XSH.env["PROMPT_FIELDS"]["current_job"].update_current_cmds(cmds): # remove current_job from prompt level cache XSH.env["PROMPT_FIELDS"].reset_key("current_job") # The terminal's title needs to be set before starting the # subprocess to avoid accidentally answering interactive questions # from commands such as `rm -i` (see #1436) XSH.shell.settitle() # run the subprocess return _run_specs(specs, cmds) else: return _run_specs(specs, cmds)
Join the tokens Parameters ---------- container: ParsedTokens parsed tokens holder Returns ------- str process the tokens and finally return the prompt string
def prompt_tokens_formatter_default(container: ParsedTokens) -> str: """ Join the tokens Parameters ---------- container: ParsedTokens parsed tokens holder Returns ------- str process the tokens and finally return the prompt string """ return "".join([tok.value for tok in container.tokens])
Creates a new instance of the default prompt.
def default_prompt(): """Creates a new instance of the default prompt.""" if xp.ON_CYGWIN or xp.ON_MSYS: dp = ( "{env_name}" "{BOLD_GREEN}{user}@{hostname}" "{BOLD_BLUE} {cwd} {prompt_end}{RESET} " ) elif xp.ON_WINDOWS and not xp.win_ansi_support(): dp = ( "{env_name}" "{BOLD_INTENSE_GREEN}{user}@{hostname}{BOLD_INTENSE_CYAN} " "{cwd}{branch_color}{curr_branch: {}}{RESET} " "{BOLD_INTENSE_CYAN}{prompt_end}{RESET} " ) else: dp = ( "{env_name}" "{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} " "{cwd}{branch_color}{curr_branch: {}}{RESET} " "{RED}{last_return_code_if_nonzero:[{BOLD_INTENSE_RED}{}{RED}] }{RESET}" "{BOLD_BLUE}{prompt_end}{RESET} " ) return dp
Returns the filler text for the prompt in multiline scenarios.
def multiline_prompt(curr=""): """Returns the filler text for the prompt in multiline scenarios.""" line = curr.rsplit("\n", 1)[1] if "\n" in curr else curr line = RE_HIDDEN.sub("", line) # gets rid of colors # most prompts end in whitespace, head is the part before that. head = line.rstrip() headlen = len(head) # tail is the trailing whitespace tail = line if headlen == 0 else line.rsplit(head[-1], 1)[1] # now to construct the actual string dots = XSH.env.get("MULTILINE_PROMPT") dots = dots() if callable(dots) else dots if dots is None or len(dots) == 0: return "" tokstr = xt.format_color(dots, hide=True) baselen = 0 basetoks = [] for x in tokstr.split("\001"): pre, sep, post = x.partition("\002") if len(sep) == 0: basetoks.append(("", pre)) baselen += len(pre) else: basetoks.append(("\001" + pre + "\002", post)) baselen += len(post) if baselen == 0: return xt.format_color("{RESET}" + tail, hide=True) toks = basetoks * (headlen // baselen) n = headlen % baselen count = 0 for tok in basetoks: slen = len(tok[1]) newcount = slen + count if slen == 0: continue elif newcount <= n: toks.append(tok) else: toks.append((tok[0], tok[1][: n - count])) count = newcount if n <= count: break toks.append((xt.format_color("{RESET}", hide=True), tail)) rtn = "".join(itertools.chain.from_iterable(toks)) return rtn
Returns whether or not the string is a valid template.
def is_template_string(template, PROMPT_FIELDS=None): """Returns whether or not the string is a valid template.""" template = template() if callable(template) else template try: included_names = {i[1] for i in xt.FORMATTER.parse(template)} except ValueError: return False included_names.discard(None) if PROMPT_FIELDS is None: fmtter = XSH.env.get("PROMPT_FIELDS", PROMPT_FIELDS) else: fmtter = PROMPT_FIELDS known_names = set(fmtter.keys()) return included_names <= known_names
Formats a value from a template string {val!conv:spec}. The spec is applied as a format string itself, but if the value is None, the result will be empty. The purpose of this is to allow optional parts in a prompt string. For example, if the prompt contains '{current_job:{} | }', and 'current_job' returns 'sleep', the result is 'sleep | ', and if 'current_job' returns None, the result is ''.
def _format_value(val, spec, conv) -> str: """Formats a value from a template string {val!conv:spec}. The spec is applied as a format string itself, but if the value is None, the result will be empty. The purpose of this is to allow optional parts in a prompt string. For example, if the prompt contains '{current_job:{} | }', and 'current_job' returns 'sleep', the result is 'sleep | ', and if 'current_job' returns None, the result is ''. """ if val is None or (isinstance(val, BasePromptField) and val.value is None): return "" val = xt.FORMATTER.convert_field(val, conv) if spec: val = xt.FORMATTER.format(spec, val) if not isinstance(val, str): val = format(val) return val
Return the compact current working directory. It respects the environment variable DYNAMIC_CWD_WIDTH.
def _dynamically_collapsed_pwd(): """Return the compact current working directory. It respects the environment variable DYNAMIC_CWD_WIDTH. """ original_path = _replace_home_cwd() target_width, units = XSH.env["DYNAMIC_CWD_WIDTH"] elision_char = XSH.env["DYNAMIC_CWD_ELISION_CHAR"] if target_width == float("inf"): return original_path if units == "%": cols, _ = shutil.get_terminal_size() target_width = (cols * target_width) // 100 sep = xt.get_sep() pwd = original_path.split(sep) last = pwd.pop() remaining_space = target_width - len(last) # Reserve space for separators remaining_space_for_text = remaining_space - len(pwd) parts = [] for i in range(len(pwd)): part = pwd[i] part_len = int( min(len(part), max(1, remaining_space_for_text // (len(pwd) - i))) ) remaining_space_for_text -= part_len if len(part) > part_len: reduced_part = part[0 : part_len - len(elision_char)] + elision_char parts.append(reduced_part) else: parts.append(part) parts.append(last) full = sep.join(parts) truncature_char = elision_char if elision_char else "..." # If even if displaying one letter per dir we are too long if len(full) > target_width: # We truncate the left most part full = truncature_char + full[int(-target_width) + len(truncature_char) :] # if there is not even a single separator we still # want to display at least the beginning of the directory if full.find(sep) == -1: full = (truncature_char + sep + last)[ 0 : int(target_width) - len(truncature_char) ] + truncature_char return full
Find current environment name from available sources. If ``$VIRTUAL_ENV`` is set, it is determined from the prompt setting in ``<venv>/pyvenv.cfg`` or from the folder name of the environment. Otherwise - if it is set - from ``$CONDA_DEFAULT_ENV``.
def find_env_name() -> Optional[str]: """Find current environment name from available sources. If ``$VIRTUAL_ENV`` is set, it is determined from the prompt setting in ``<venv>/pyvenv.cfg`` or from the folder name of the environment. Otherwise - if it is set - from ``$CONDA_DEFAULT_ENV``. """ virtual_env = XSH.env.get("VIRTUAL_ENV") if virtual_env: name = _determine_env_name(virtual_env) if name: return name conda_default_env = XSH.env.get("CONDA_DEFAULT_ENV") if conda_default_env: return conda_default_env
Build env_name based on different sources. Respect order of precedence. Name from VIRTUAL_ENV_PROMPT will be used as-is. Names from other sources are surrounded with ``{env_prefix}`` and ``{env_postfix}`` fields.
def env_name() -> str: """Build env_name based on different sources. Respect order of precedence. Name from VIRTUAL_ENV_PROMPT will be used as-is. Names from other sources are surrounded with ``{env_prefix}`` and ``{env_postfix}`` fields. """ if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT"): return "" virtual_env_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") if virtual_env_prompt: return virtual_env_prompt found_envname = find_env_name() return _surround_env_name(found_envname) if found_envname else ""
Use prompt setting from pyvenv.cfg or basename of virtual_env. Tries to be resilient to subtle changes in whitespace and quoting in the configuration file format as it adheres to no clear standard.
def _determine_env_name(virtual_env: str) -> str: """Use prompt setting from pyvenv.cfg or basename of virtual_env. Tries to be resilient to subtle changes in whitespace and quoting in the configuration file format as it adheres to no clear standard. """ venv_path = Path(virtual_env) pyvenv_cfg = venv_path / "pyvenv.cfg" if pyvenv_cfg.is_file(): match = re.search(r"prompt\s*=\s*(.*)", pyvenv_cfg.read_text()) if match: return match.group(1).strip().lstrip("'\"").rstrip("'\"") return venv_path.name
This prints an escape sequence that tells VTE terminals the hostname and pwd. This should not be needed in most cases, but sometimes is for certain Linux terminals that do not read the PWD from the environment on startup. Note that this does not return a string, it simply prints and flushes the escape sequence to stdout directly.
def vte_new_tab_cwd() -> None: """This prints an escape sequence that tells VTE terminals the hostname and pwd. This should not be needed in most cases, but sometimes is for certain Linux terminals that do not read the PWD from the environment on startup. Note that this does not return a string, it simply prints and flushes the escape sequence to stdout directly. """ env = XSH.env t = "\033]7;file://{}{}\007" s = t.format(env.get("HOSTNAME"), env.get("PWD")) print(s, end="", flush=True)
Get git-stash count
def get_stash_count(gitdir: str): """Get git-stash count""" with contextlib.suppress(OSError): with open(os.path.join(gitdir, "logs/refs/stash")) as f: return sum(1 for _ in f) return 0
get the current git operation e.g. MERGE/REBASE...
def get_operations(gitdir: str): """get the current git operation e.g. MERGE/REBASE...""" for file, name in ( ("rebase-merge", "REBASE"), ("rebase-apply", "AM/REBASE"), ("MERGE_HEAD", "MERGING"), ("CHERRY_PICK_HEAD", "CHERRY-PICKING"), ("REVERT_HEAD", "REVERTING"), ("BISECT_LOG", "BISECTING"), ): if os.path.exists(os.path.join(gitdir, file)): yield name