index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
23,603 | panphon.featuretable | match_pattern_seq | Implements limited pattern matching. Matches just in case pattern is
the same length (in segments) as the constituent and each of the
segments in the pattern is a featural subset of the corresponding
segment in the word.
Args:
pat (list): pattern consisting of a list of feature dicts, e.g.
[{'voi': 1}]
const (list): a sequence of Unicode IPA strings consisting of zero
or more segments.
normalize (bool): whether to pre-normalize the segments
Returns:
bool: `True` if `const` matches `pat`
| def match_pattern_seq(self, pat, const, normalize=True):
"""Implements limited pattern matching. Matches just in case pattern is
the same length (in segments) as the constituent and each of the
segments in the pattern is a featural subset of the corresponding
segment in the word.
Args:
pat (list): pattern consisting of a list of feature dicts, e.g.
[{'voi': 1}]
const (list): a sequence of Unicode IPA strings consisting of zero
or more segments.
normalize (bool): whether to pre-normalize the segments
Returns:
bool: `True` if `const` matches `pat`
"""
segs = [self.fts(s, normalize) for s in const]
if len(pat) != len(segs):
return False
else:
return all([s >= p for (s, p) in zip(segs, pat)])
| (self, pat, const, normalize=True) |
23,604 | panphon.featuretable | normalize | null | @staticmethod
def normalize(data):
return unicodedata.normalize('NFD', data)
| (data) |
23,605 | panphon.featuretable | seg_known | Return True if `segment` is in segment <=> features database
Args:
segment (unicode): consonant or vowel
normalize (bool): whether to pre-normalize the segment
Returns:
bool: True, if `segment` is in the database
| def seg_known(self, segment, normalize=True):
"""Return True if `segment` is in segment <=> features database
Args:
segment (unicode): consonant or vowel
normalize (bool): whether to pre-normalize the segment
Returns:
bool: True, if `segment` is in the database
"""
if normalize:
segment = FeatureTable.normalize(segment)
return segment in self.seg_dict
| (self, segment, normalize=True) |
23,606 | panphon.featuretable | segment_to_vector | Given a Unicode IPA segment, return a list of feature specificiations
in canonical order.
Args:
seg (unicode): IPA consonant or vowel
normalize: whether to pre-normalize the segment
Returns:
list: feature specifications ('+'/'-'/'0') in the order from
`FeatureTable.names`
| def segment_to_vector(self, seg, normalize=True):
"""Given a Unicode IPA segment, return a list of feature specificiations
in canonical order.
Args:
seg (unicode): IPA consonant or vowel
normalize: whether to pre-normalize the segment
Returns:
list: feature specifications ('+'/'-'/'0') in the order from
`FeatureTable.names`
"""
return self.fts(seg, normalize).strings()
| (self, seg, normalize=True) |
23,607 | panphon.featuretable | segs_safe | Return a list of segments (as strings) from a word
Characters that are not valid segments are included in the list as
individual characters.
Args:
word (unicode): word as an IPA string
normalize (bool): whether to pre-normalize the word
Returns:
list: list of Unicode IPA strings corresponding to segments in
`word`
| def segs_safe(self, word, normalize=True):
"""Return a list of segments (as strings) from a word
Characters that are not valid segments are included in the list as
individual characters.
Args:
word (unicode): word as an IPA string
normalize (bool): whether to pre-normalize the word
Returns:
list: list of Unicode IPA strings corresponding to segments in
`word`
"""
if normalize:
word = FeatureTable.normalize(word)
return self._segs(word, include_invalid=True, normalize=normalize)
| (self, word, normalize=True) |
23,608 | panphon.featuretable | validate_word | Returns True if `word` consists exhaustively of valid IPA segments
Args:
word (unicode): input word as Unicode IPA string
normalize (bool): whether to pre-normalize the word
Returns:
bool: True if `word` can be divided exhaustively into IPA segments
that exist in the database
| def validate_word(self, word, normalize=True):
"""Returns True if `word` consists exhaustively of valid IPA segments
Args:
word (unicode): input word as Unicode IPA string
normalize (bool): whether to pre-normalize the word
Returns:
bool: True if `word` can be divided exhaustively into IPA segments
that exist in the database
"""
return not self._segs(word, include_valid=False, include_invalid=True, normalize=normalize)
| (self, word, normalize=True) |
23,609 | panphon.featuretable | word_array | Return a nparray of features namd in ft_name for the segments in word
Args:
ft_names (list): strings naming subset of features in self.names
word (unicode): word to be analyzed
normalize (bool): whether to pre-normalize the word
Returns:
ndarray: segments in rows, features in columns as [-1, 0, 1]
| def word_array(self, ft_names, word, normalize=True):
"""Return a nparray of features namd in ft_name for the segments in word
Args:
ft_names (list): strings naming subset of features in self.names
word (unicode): word to be analyzed
normalize (bool): whether to pre-normalize the word
Returns:
ndarray: segments in rows, features in columns as [-1, 0, 1]
"""
return numpy.array([s.numeric(ft_names) for s in self.word_fts(word, normalize)])
| (self, ft_names, word, normalize=True) |
23,610 | panphon.featuretable | word_fts | Return a list of Segment objects corresponding to the segments in
word.
Args:
word (unicode): word consisting of IPA segments
normalize (bool): whether to pre-normalize the word
Returns:
list: list of Segment objects corresponding to word
| def word_fts(self, word, normalize=True):
"""Return a list of Segment objects corresponding to the segments in
word.
Args:
word (unicode): word consisting of IPA segments
normalize (bool): whether to pre-normalize the word
Returns:
list: list of Segment objects corresponding to word
"""
return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)]
| (self, word, normalize=True) |
23,611 | panphon.featuretable | word_to_vector_list | Return a list of feature vectors, given a Unicode IPA word.
Args:
word (unicode): string in IPA (or X-SAMPA, provided `xsampa` is True)
numeric (bool): if True, return features as numeric values instead
of strings
xsampa (bool): whether the word is in X-SAMPA instead of IPA
normalize: whether to pre-normalize the word (applies to IPA only)
Returns:
list: a list of lists of '+'/'-'/'0' or 1/-1/0
| def word_to_vector_list(self, word, numeric=False, xsampa=False, normalize=True):
"""Return a list of feature vectors, given a Unicode IPA word.
Args:
word (unicode): string in IPA (or X-SAMPA, provided `xsampa` is True)
numeric (bool): if True, return features as numeric values instead
of strings
xsampa (bool): whether the word is in X-SAMPA instead of IPA
normalize: whether to pre-normalize the word (applies to IPA only)
Returns:
list: a list of lists of '+'/'-'/'0' or 1/-1/0
"""
if xsampa:
word = self.xsampa.convert(word)
segs = self.word_fts(word, normalize or xsampa)
if numeric:
tensor = [x.numeric() for x in segs]
else:
tensor = [x.strings() for x in segs]
return tensor
| (self, word, numeric=False, xsampa=False, normalize=True) |
23,615 | panphon._panphon | pat | Given a string `p` with feature matrices (features grouped with square
brackets into segments, return a list of sets of (value, feature) tuples.
Args:
p (str): list of feature matrices as strings
Return:
list: list of sets of (value, feature) tuples
| def pat(p):
"""Given a string `p` with feature matrices (features grouped with square
brackets into segments, return a list of sets of (value, feature) tuples.
Args:
p (str): list of feature matrices as strings
Return:
list: list of sets of (value, feature) tuples
"""
pattern = []
for matrix in [m.group(0) for m in MT_REGEX.finditer(p)]:
segment = set([m.groups() for m in FT_REGEX.finditer(matrix)])
pattern.append(segment)
return pattern
| (p) |
23,618 | iso8601.iso8601 | FixedOffset | null | def FixedOffset(
offset_hours: float, offset_minutes: float, name: str
) -> datetime.timezone:
return datetime.timezone(
datetime.timedelta(hours=offset_hours, minutes=offset_minutes), name
)
| (offset_hours: float, offset_minutes: float, name: str) -> datetime.timezone |
23,619 | iso8601.iso8601 | ParseError | Raised when there is a problem parsing a date string | class ParseError(ValueError):
"""Raised when there is a problem parsing a date string"""
| null |
23,620 | iso8601.iso8601 | is_iso8601 | Check if a string matches an ISO 8601 format.
:param datestring: The string to check for validity
:returns: True if the string matches an ISO 8601 format, False otherwise
| def is_iso8601(datestring: str) -> bool:
"""Check if a string matches an ISO 8601 format.
:param datestring: The string to check for validity
:returns: True if the string matches an ISO 8601 format, False otherwise
"""
try:
m = ISO8601_REGEX.match(datestring)
return bool(m)
except Exception as e:
raise ParseError(e)
| (datestring: str) -> bool |
23,622 | iso8601.iso8601 | parse_date | Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
:param datestring: The date to parse as a string
:param default_timezone: A datetime tzinfo instance to use when no timezone
is specified in the datestring. If this is set to
None then a naive datetime object is returned.
:returns: A datetime.datetime instance
:raises: ParseError when there is a problem parsing the date or
constructing the datetime instance.
| def parse_date(
datestring: str, default_timezone: typing.Optional[datetime.timezone] = UTC
) -> datetime.datetime:
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
:param datestring: The date to parse as a string
:param default_timezone: A datetime tzinfo instance to use when no timezone
is specified in the datestring. If this is set to
None then a naive datetime object is returned.
:returns: A datetime.datetime instance
:raises: ParseError when there is a problem parsing the date or
constructing the datetime instance.
"""
try:
m = ISO8601_REGEX.match(datestring)
except Exception as e:
raise ParseError(e)
if not m:
raise ParseError(f"Unable to parse date string {datestring!r}")
# Drop any Nones from the regex matches
# TODO: check if there's a way to omit results in regexes
groups: typing.Dict[str, str] = {
k: v for k, v in m.groupdict().items() if v is not None
}
try:
return datetime.datetime(
year=int(groups.get("year", 0)),
month=int(groups.get("month", groups.get("monthdash", 1))),
day=int(groups.get("day", groups.get("daydash", 1))),
hour=int(groups.get("hour", 0)),
minute=int(groups.get("minute", 0)),
second=int(groups.get("second", 0)),
microsecond=int(
Decimal(f"0.{groups.get('second_fraction', 0)}") * Decimal("1000000.0")
),
tzinfo=parse_timezone(groups, default_timezone=default_timezone),
)
except Exception as e:
raise ParseError(e)
| (datestring: str, default_timezone: Optional[datetime.timezone] = datetime.timezone.utc) -> datetime.datetime |
23,623 | docutils.parsers.rst | Directive |
Base class for reStructuredText directives.
The following attributes may be set by subclasses. They are
interpreted by the directive parser (which runs the directive
class):
- `required_arguments`: The number of required arguments (default:
0).
- `optional_arguments`: The number of optional arguments (default:
0).
- `final_argument_whitespace`: A boolean, indicating if the final
argument may contain whitespace (default: False).
- `option_spec`: A dictionary, mapping known option names to
conversion functions such as `int` or `float` (default: {}, no
options). Several conversion functions are defined in the
directives/__init__.py module.
Option conversion functions take a single parameter, the option
argument (a string or ``None``), validate it and/or convert it
to the appropriate form. Conversion functions may raise
`ValueError` and `TypeError` exceptions.
- `has_content`: A boolean; True if content is allowed. Client
code must handle the case where content is required but not
supplied (an empty content list will be supplied).
Arguments are normally single whitespace-separated words. The
final argument may contain whitespace and/or newlines if
`final_argument_whitespace` is True.
If the form of the arguments is more complex, specify only one
argument (either required or optional) and set
`final_argument_whitespace` to True; the client code must do any
context-sensitive parsing.
When a directive implementation is being run, the directive class
is instantiated, and the `run()` method is executed. During
instantiation, the following instance variables are set:
- ``name`` is the directive type or name (string).
- ``arguments`` is the list of positional arguments (strings).
- ``options`` is a dictionary mapping option names (strings) to
values (type depends on option conversion functions; see
`option_spec` above).
- ``content`` is a list of strings, the directive content line by line.
- ``lineno`` is the absolute line number of the first line
of the directive.
- ``content_offset`` is the line offset of the first line
of the content from the beginning of the current input.
Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state
which called the directive function.
- ``reporter`` is the state machine's `reporter` instance.
Directive functions return a list of nodes which will be inserted
into the document tree at the point where the directive was
encountered. This can be an empty list if there is nothing to
insert.
For ordinary directives, the list must contain body elements or
structural elements. Some directives are intended specifically
for substitution definitions, and must return a list of `Text`
nodes and/or inline elements (suitable for inline insertion, in
place of the substitution reference). Such directives must verify
substitution definition context, typically using code like this::
if not isinstance(state, states.SubstitutionDef):
error = self.reporter.error(
'Invalid context: the "%s" directive can only be used '
'within a substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
| class Directive:
"""
Base class for reStructuredText directives.
The following attributes may be set by subclasses. They are
interpreted by the directive parser (which runs the directive
class):
- `required_arguments`: The number of required arguments (default:
0).
- `optional_arguments`: The number of optional arguments (default:
0).
- `final_argument_whitespace`: A boolean, indicating if the final
argument may contain whitespace (default: False).
- `option_spec`: A dictionary, mapping known option names to
conversion functions such as `int` or `float` (default: {}, no
options). Several conversion functions are defined in the
directives/__init__.py module.
Option conversion functions take a single parameter, the option
argument (a string or ``None``), validate it and/or convert it
to the appropriate form. Conversion functions may raise
`ValueError` and `TypeError` exceptions.
- `has_content`: A boolean; True if content is allowed. Client
code must handle the case where content is required but not
supplied (an empty content list will be supplied).
Arguments are normally single whitespace-separated words. The
final argument may contain whitespace and/or newlines if
`final_argument_whitespace` is True.
If the form of the arguments is more complex, specify only one
argument (either required or optional) and set
`final_argument_whitespace` to True; the client code must do any
context-sensitive parsing.
When a directive implementation is being run, the directive class
is instantiated, and the `run()` method is executed. During
instantiation, the following instance variables are set:
- ``name`` is the directive type or name (string).
- ``arguments`` is the list of positional arguments (strings).
- ``options`` is a dictionary mapping option names (strings) to
values (type depends on option conversion functions; see
`option_spec` above).
- ``content`` is a list of strings, the directive content line by line.
- ``lineno`` is the absolute line number of the first line
of the directive.
- ``content_offset`` is the line offset of the first line
of the content from the beginning of the current input.
Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state
which called the directive function.
- ``reporter`` is the state machine's `reporter` instance.
Directive functions return a list of nodes which will be inserted
into the document tree at the point where the directive was
encountered. This can be an empty list if there is nothing to
insert.
For ordinary directives, the list must contain body elements or
structural elements. Some directives are intended specifically
for substitution definitions, and must return a list of `Text`
nodes and/or inline elements (suitable for inline insertion, in
place of the substitution reference). Such directives must verify
substitution definition context, typically using code like this::
if not isinstance(state, states.SubstitutionDef):
error = self.reporter.error(
'Invalid context: the "%s" directive can only be used '
'within a substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
"""
# There is a "Creating reStructuredText Directives" how-to at
# <https://docutils.sourceforge.io/docs/howto/rst-directives.html>. If you
# update this docstring, please update the how-to as well.
required_arguments = 0
"""Number of required directive arguments."""
optional_arguments = 0
"""Number of optional arguments after the required arguments."""
final_argument_whitespace = False
"""May the final argument contain whitespace?"""
option_spec = None
"""Mapping of option names to validator functions."""
has_content = False
"""May the directive have content?"""
def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
self.name = name
self.arguments = arguments
self.options = options
self.content = content
self.lineno = lineno
self.content_offset = content_offset
self.block_text = block_text
self.state = state
self.state_machine = state_machine
self.reporter = state_machine.reporter
def run(self):
raise NotImplementedError('Must override run() in subclass.')
# Directive errors:
def directive_error(self, level, message):
"""
Return a DirectiveError suitable for being thrown as an exception.
Call "raise self.directive_error(level, message)" from within
a directive implementation to return one single system message
at level `level`, which automatically gets the directive block
and the line number added.
Preferably use the `debug`, `info`, `warning`, `error`, or `severe`
wrapper methods, e.g. ``self.error(message)`` to generate an
ERROR-level directive error.
"""
return DirectiveError(level, message)
def debug(self, message):
return self.directive_error(0, message)
def info(self, message):
return self.directive_error(1, message)
def warning(self, message):
return self.directive_error(2, message)
def error(self, message):
return self.directive_error(3, message)
def severe(self, message):
return self.directive_error(4, message)
# Convenience methods:
def assert_has_content(self):
"""
Throw an ERROR-level DirectiveError if the directive doesn't
have contents.
"""
if not self.content:
raise self.error('Content block expected for the "%s" directive; '
'none found.' % self.name)
def add_name(self, node):
"""Append self.options['name'] to node['names'] if it exists.
Also normalize the name string and register it as explicit target.
"""
if 'name' in self.options:
name = nodes.fully_normalize_name(self.options.pop('name'))
if 'name' in node:
del node['name']
node['names'].append(name)
self.state.document.note_explicit_target(node, node)
| (name, arguments, options, content, lineno, content_offset, block_text, state, state_machine) |
23,624 | docutils.parsers.rst | __init__ | null | def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
self.name = name
self.arguments = arguments
self.options = options
self.content = content
self.lineno = lineno
self.content_offset = content_offset
self.block_text = block_text
self.state = state
self.state_machine = state_machine
self.reporter = state_machine.reporter
| (self, name, arguments, options, content, lineno, content_offset, block_text, state, state_machine) |
23,625 | docutils.parsers.rst | add_name | Append self.options['name'] to node['names'] if it exists.
Also normalize the name string and register it as explicit target.
| def add_name(self, node):
"""Append self.options['name'] to node['names'] if it exists.
Also normalize the name string and register it as explicit target.
"""
if 'name' in self.options:
name = nodes.fully_normalize_name(self.options.pop('name'))
if 'name' in node:
del node['name']
node['names'].append(name)
self.state.document.note_explicit_target(node, node)
| (self, node) |
23,626 | docutils.parsers.rst | assert_has_content |
Throw an ERROR-level DirectiveError if the directive doesn't
have contents.
| def assert_has_content(self):
"""
Throw an ERROR-level DirectiveError if the directive doesn't
have contents.
"""
if not self.content:
raise self.error('Content block expected for the "%s" directive; '
'none found.' % self.name)
| (self) |
23,627 | docutils.parsers.rst | debug | null | def debug(self, message):
return self.directive_error(0, message)
| (self, message) |
23,628 | docutils.parsers.rst | directive_error |
Return a DirectiveError suitable for being thrown as an exception.
Call "raise self.directive_error(level, message)" from within
a directive implementation to return one single system message
at level `level`, which automatically gets the directive block
and the line number added.
Preferably use the `debug`, `info`, `warning`, `error`, or `severe`
wrapper methods, e.g. ``self.error(message)`` to generate an
ERROR-level directive error.
| def directive_error(self, level, message):
"""
Return a DirectiveError suitable for being thrown as an exception.
Call "raise self.directive_error(level, message)" from within
a directive implementation to return one single system message
at level `level`, which automatically gets the directive block
and the line number added.
Preferably use the `debug`, `info`, `warning`, `error`, or `severe`
wrapper methods, e.g. ``self.error(message)`` to generate an
ERROR-level directive error.
"""
return DirectiveError(level, message)
| (self, level, message) |
23,629 | docutils.parsers.rst | error | null | def error(self, message):
return self.directive_error(3, message)
| (self, message) |
23,630 | docutils.parsers.rst | info | null | def info(self, message):
return self.directive_error(1, message)
| (self, message) |
23,631 | docutils.parsers.rst | run | null | def run(self):
raise NotImplementedError('Must override run() in subclass.')
| (self) |
23,632 | docutils.parsers.rst | severe | null | def severe(self, message):
return self.directive_error(4, message)
| (self, message) |
23,633 | docutils.parsers.rst | warning | null | def warning(self, message):
return self.directive_error(2, message)
| (self, message) |
23,701 | sphinx_thebe | ThebeButton | Specify a button to activate thebe on the page
Arguments
---------
text : str (optional)
If provided, the button text to display
Content
-------
None
| class ThebeButton(Directive):
"""Specify a button to activate thebe on the page
Arguments
---------
text : str (optional)
If provided, the button text to display
Content
-------
None
"""
optional_arguments = 1
final_argument_whitespace = True
has_content = False
def run(self):
kwargs = {"text": self.arguments[0]} if self.arguments else {}
return [ThebeButtonNode(**kwargs)]
| (name, arguments, options, content, lineno, content_offset, block_text, state, state_machine) |
23,709 | sphinx_thebe | run | null | def run(self):
kwargs = {"text": self.arguments[0]} if self.arguments else {}
return [ThebeButtonNode(**kwargs)]
| (self) |
23,712 | sphinx_thebe | ThebeButtonNode | Appended to the doctree by the ThebeButton directive
Renders as a button to enable thebe on the page.
If no ThebeButton directive is found in the document but thebe
is enabled, the node is added at the bottom of the document.
| class ThebeButtonNode(nodes.Element):
"""Appended to the doctree by the ThebeButton directive
Renders as a button to enable thebe on the page.
If no ThebeButton directive is found in the document but thebe
is enabled, the node is added at the bottom of the document.
"""
def __init__(self, rawsource="", *children, text="Run code", **attributes):
super().__init__("", text=text)
def html(self):
text = self["text"]
return (
'<button title="{text}" class="thebelab-button thebe-launch-button" '
'onclick="initThebe()">{text}</button>'.format(text=text)
)
| (rawsource='', *children, text='Run code', **attributes) |
23,719 | sphinx_thebe | __init__ | null | def __init__(self, rawsource="", *children, text="Run code", **attributes):
super().__init__("", text=text)
| (self, rawsource='', *children, text='Run code', **attributes) |
23,722 | docutils.nodes | __repr__ | null | def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
| (self) |
23,735 | sphinx.util.nodes | _copy_except__document | Monkey-patch ```nodes.Element.copy``` to not copy the ``_document``
attribute.
xref: https://github.com/sphinx-doc/sphinx/issues/11116#issuecomment-1376767086
| def _copy_except__document(el: Element) -> Element:
"""Monkey-patch ```nodes.Element.copy``` to not copy the ``_document``
attribute.
xref: https://github.com/sphinx-doc/sphinx/issues/11116#issuecomment-1376767086
"""
newnode = object.__new__(el.__class__)
# set in Element.__init__()
newnode.children = []
newnode.rawsource = el.rawsource
newnode.tagname = el.tagname
# copied in Element.copy()
newnode.attributes = {k: (v
if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'}
else v[:])
for k, v in el.attributes.items()}
newnode.line = el.line
newnode.source = el.source
return newnode
| (el: 'Element') -> 'Element' |
23,740 | sphinx.util.nodes | _deepcopy | Monkey-patch ```nodes.Element.deepcopy``` for speed. | def _deepcopy(el: Element) -> Element:
"""Monkey-patch ```nodes.Element.deepcopy``` for speed."""
newnode = el.copy()
newnode.children = [child.deepcopy() for child in el.children]
for child in newnode.children:
child.parent = newnode
if el.document:
child.document = el.document
if child.source is None:
child.source = el.document.current_source
if child.line is None:
child.line = el.document.current_line
return newnode
| (el: 'Element') -> 'Element' |
23,752 | sphinx_thebe | html | null | def html(self):
text = self["text"]
return (
'<button title="{text}" class="thebelab-button thebe-launch-button" '
'onclick="initThebe()">{text}</button>'.format(text=text)
)
| (self) |
23,780 | sphinx_thebe | _bool | null | def _bool(b):
if isinstance(b, bool):
return b
else:
return b in ["true", "True"]
| (b) |
23,781 | sphinx_thebe | _do_load_thebe | Decide whether to load thebe based on the page's context. | def _do_load_thebe(doctree, config_thebe):
"""Decide whether to load thebe based on the page's context."""
# No doctree means there's no page content at all
if not doctree:
return False
# If we aren't properly configured
if not config_thebe:
logger.warning(
"[sphinx-thebe]: Didn't find `thebe_config` in conf.py, add to use thebe"
)
return False
return True
| (doctree, config_thebe) |
23,782 | sphinx_thebe | _split_repo_url | Split a repository URL into an org / repo combination. | def _split_repo_url(url):
"""Split a repository URL into an org / repo combination."""
if "github.com/" in url:
end = url.split("github.com/")[-1]
org, repo = end.split("/")[:2]
else:
logger.warning(
f"[sphinx-thebe]: Currently Thebe repositories must be on GitHub, got {url}"
)
org = repo = None
return org, repo
| (url) |
23,784 | textwrap | dedent | Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace.
Entirely blank lines are normalized to a newline character.
| def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\\thello" are
considered to have no common leading whitespace.
Entirely blank lines are normalized to a newline character.
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
| (text) |
23,786 | sphinx_thebe | init_thebe_core | Add scripts to configure thebe, and optionally add thebe itself.
By default, defer loading the `thebe` JS bundle until bootstrap is called
in order to speed up page load times.
| def init_thebe_core(app, env, docnames):
"""Add scripts to configure thebe, and optionally add thebe itself.
By default, defer loading the `thebe` JS bundle until bootstrap is called
in order to speed up page load times.
"""
config_thebe = app.config["thebe_config"]
# Add configuration variables
THEBE_JS_URL = f"https://unpkg.com/thebe@{THEBE_VERSION}/lib/index.js"
thebe_config_lines = [
f"const THEBE_JS_URL = \"{ THEBE_JS_URL }\"",
f"const thebe_selector = \"{ app.config.thebe_config['selector'] }\"",
f"const thebe_selector_input = \"{ app.config.thebe_config['selector_input'] }\"",
f"const thebe_selector_output = \"{ app.config.thebe_config['selector_output'] }\""
]
app.add_js_file(None, body='; '.join(thebe_config_lines))
app.add_js_file(filename="sphinx-thebe.js", **{"async": "async"})
if config_thebe.get("always_load") is True:
# If we've got `always load` on, then load thebe on every page.
app.add_js_file(THEBE_JS_URL, **{"async": "async"})
| (app, env, docnames) |
23,787 | sphinx_thebe | init_thebe_default_config | Create a default config for fields that aren't given by the user. | def init_thebe_default_config(app, env, docnames):
"""Create a default config for fields that aren't given by the user."""
thebe_config = app.config.thebe_config
defaults = {
"always_load": False,
"selector": ".thebe,.cell",
"selector_input": "pre",
"selector_output": ".output, .cell_output",
}
for key, val in defaults.items():
if key not in thebe_config:
thebe_config[key] = val
# Standardize types for certain values
BOOL_KEYS = ["always_load"]
for key in BOOL_KEYS:
thebe_config[key] = _bool(thebe_config[key])
| (app, env, docnames) |
23,792 | sphinx_thebe | setup | null | def setup(app):
logger.verbose("Adding copy buttons to code blocks...")
# Add our static path
app.connect("builder-inited", st_static_path)
# Set default values for the configuration
app.connect("env-before-read-docs", init_thebe_default_config)
# Load the JS/CSS assets for thebe if needed
app.connect("env-before-read-docs", init_thebe_core)
# Update the doctree with thebe-specific information if needed
app.connect("doctree-resolved", update_thebe_context)
# configuration for this tool
app.add_config_value("thebe_config", {}, "html")
# override=True in case Jupyter Sphinx has already been loaded
app.add_directive("thebe-button", ThebeButton, override=True)
# Add relevant code to headers
app.add_css_file("sphinx-thebe.css")
# ThebeButtonNode is the button that activates thebe
# and is only rendered for the HTML builder
app.add_node(
ThebeButtonNode,
html=(visit_element_html, None),
latex=(skip, None),
textinfo=(skip, None),
text=(skip, None),
man=(skip, None),
override=True,
)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| (app) |
23,793 | sphinx_thebe | skip | null | def skip(self, node):
raise nodes.SkipNode
| (self, node) |
23,794 | sphinx_thebe | st_static_path | null | def st_static_path(app):
static_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "_static"))
app.config.html_static_path.append(static_path)
| (app) |
23,795 | sphinx_thebe | update_thebe_context | Add thebe config nodes to this doctree using page-dependent information. | def update_thebe_context(app, doctree, docname):
"""Add thebe config nodes to this doctree using page-dependent information."""
config_thebe = app.config["thebe_config"]
# Skip modifying the doctree if we don't need to load thebe
if not _do_load_thebe(doctree, config_thebe):
return
# Thebe configuration
if config_thebe is True:
config_thebe = {}
if not isinstance(config_thebe, dict):
raise ValueError(
"thebe configuration must be `True` or a dictionary for configuration."
)
codemirror_theme = config_thebe.get("codemirror-theme", "abcdef")
# Choose the kernel we'll use
meta = app.env.metadata.get(docname, {})
kernel_name = meta.get("thebe-kernel")
if kernel_name is None:
if meta.get("kernelspec"):
if isinstance(meta.get("kernelspec"), str):
kernel_name = json.loads(meta["kernelspec"]).get("name")
else:
kernel_name = meta["kernelspec"].get("name")
else:
kernel_name = "python3"
# Codemirror syntax
cm_language = kernel_name
if "python" in cm_language:
cm_language = "python"
elif cm_language == "ir":
cm_language = "r"
# Create the URL for the kernel request
repo_url = config_thebe.get(
"repository_url",
"https://github.com/binder-examples/jupyter-stacks-datascience",
)
branch = config_thebe.get("repository_branch", "master")
path_to_docs = config_thebe.get("path_to_docs", ".").strip("/") + "/"
org, repo = _split_repo_url(repo_url)
# Update the doctree with some nodes for the thebe configuration
thebe_html_config = f"""
<script type="text/x-thebe-config">
{{
requestKernel: true,
binderOptions: {{
repo: "{org}/{repo}",
ref: "{branch}",
}},
codeMirrorConfig: {{
theme: "{codemirror_theme}",
mode: "{cm_language}"
}},
kernelOptions: {{
name: "{kernel_name}",
path: "{path_to_docs}{str(Path(docname).parent)}"
}},
predefinedOutput: true
}}
</script>
"""
# Append to the docutils doctree so it makes it into the build outputs
doctree.append(nodes.raw(text=thebe_html_config, format="html"))
doctree.append(
nodes.raw(text=f"<script>kernelName = '{kernel_name}'</script>", format="html")
)
| (app, doctree, docname) |
23,796 | sphinx_thebe | visit_element_html | null | def visit_element_html(self, node):
self.body.append(node.html())
raise nodes.SkipNode
| (self, node) |
23,798 | missingno.missingno | bar |
A bar chart visualization of the nullity of the given DataFrame.
:param df: The input DataFrame.
:param log: Whether or not to display a logarithmic plot. Defaults to False (linear).
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param sort: The column sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. This default to 16.
:param labels: Whether or not to display the column names. Would need to be turned off on particularly large
displays. Defaults to True.
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:param color: The color of the filled columns. Default to the RGB multiple `(0.25, 0.25, 0.25)`.
:param orientation: The way the bar plot is oriented. Defaults to vertical if there are less than or equal to 50
columns and horizontal if there are more.
:return: The plot axis.
| def bar(
df, figsize=None, fontsize=16, labels=None, label_rotation=45, log=False, color='dimgray',
filter=None, n=0, p=0, sort=None, ax=None, orientation=None
):
"""
A bar chart visualization of the nullity of the given DataFrame.
:param df: The input DataFrame.
:param log: Whether or not to display a logarithmic plot. Defaults to False (linear).
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param sort: The column sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. This default to 16.
:param labels: Whether or not to display the column names. Would need to be turned off on particularly large
displays. Defaults to True.
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:param color: The color of the filled columns. Default to the RGB multiple `(0.25, 0.25, 0.25)`.
:param orientation: The way the bar plot is oriented. Defaults to vertical if there are less than or equal to 50
columns and horizontal if there are more.
:return: The plot axis.
"""
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort, axis='rows')
nullity_counts = len(df) - df.isnull().sum()
if orientation is None:
if len(df.columns) > 50:
orientation = 'left'
else:
orientation = 'bottom'
if ax is None:
ax1 = plt.gca()
if figsize is None:
if len(df.columns) <= 50 or orientation == 'top' or orientation == 'bottom':
figsize = (25, 10)
else:
figsize = (25, (25 + len(df.columns) - 50) * 0.5)
else:
ax1 = ax
figsize = None # for behavioral consistency with other plot types, re-use the given size
plot_args = {'figsize': figsize, 'fontsize': fontsize, 'log': log, 'color': color, 'ax': ax1}
if orientation == 'bottom':
(nullity_counts / len(df)).plot.bar(**plot_args)
else:
(nullity_counts / len(df)).plot.barh(**plot_args)
axes = [ax1]
# Start appending elements, starting with a modified bottom x axis.
if labels or (labels is None and len(df.columns) <= 50):
ax1.set_xticklabels(
ax1.get_xticklabels(), rotation=label_rotation, ha='right', fontsize=fontsize
)
# Create the numerical ticks.
ax2 = ax1.twinx()
axes.append(ax2)
if not log:
ax1.set_ylim([0, 1])
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels([int(n * len(df)) for n in ax1.get_yticks()], fontsize=fontsize)
else:
# For some reason when a logarithmic plot is specified `ax1` always contains two more ticks than actually
# appears in the plot. The fix is to ignore the first and last entries. Also note that when a log scale
# is used, we have to make it match the `ax1` layout ourselves.
ax2.set_yscale('log')
ax2.set_ylim(ax1.get_ylim())
ax2.set_yticklabels([int(n * len(df)) for n in ax1.get_yticks()], fontsize=fontsize)
# Create the third axis, which displays columnar totals above the rest of the plot.
ax3 = ax1.twiny()
axes.append(ax3)
ax3.set_xticks(ax1.get_xticks())
ax3.set_xlim(ax1.get_xlim())
ax3.set_xticklabels(
nullity_counts.values, fontsize=fontsize, rotation=label_rotation, ha='left'
)
else:
# Create the numerical ticks.
ax2 = ax1.twinx()
axes.append(ax2)
if not log:
# Width
ax1.set_xlim([0, 1])
# Bottom
ax2.set_xticks(ax1.get_xticks())
ax2.set_xticklabels([int(n * len(df)) for n in ax1.get_xticks()], fontsize=fontsize)
# Right
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels(nullity_counts.values, fontsize=fontsize, ha='left')
else:
# For some reason when a logarithmic plot is specified `ax1` always contains two more ticks than actually
# appears in the plot. The fix is to ignore the first and last entries. Also note that when a log scale
# is used, we have to make it match the `ax1` layout ourselves.
ax1.set_xscale('log')
ax1.set_xlim(ax1.get_xlim())
# Bottom
ax2.set_xticks(ax1.get_xticks())
ax2.set_xticklabels([int(n * len(df)) for n in ax1.get_xticks()], fontsize=fontsize)
# Right
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels(nullity_counts.values, fontsize=fontsize, ha='left')
# Create the third axis, which displays columnar totals above the rest of the plot.
ax3 = ax1.twiny()
axes.append(ax3)
ax3.set_yticks(ax1.get_yticks())
if log:
ax3.set_xscale('log')
ax3.set_xlim(ax1.get_xlim())
ax3.set_ylim(ax1.get_ylim())
ax3.grid(False)
for ax in axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
return ax1
| (df, figsize=None, fontsize=16, labels=None, label_rotation=45, log=False, color='dimgray', filter=None, n=0, p=0, sort=None, ax=None, orientation=None) |
23,799 | missingno.missingno | dendrogram |
Fits a `scipy` hierarchical clustering algorithm to the given DataFrame's variables and visualizes the results as
a `scipy` dendrogram.
The default vertical display will fit up to 50 columns. If more than 50 columns are specified and orientation is
left unspecified the dendrogram will automatically swap to a horizontal display to fit the additional variables.
:param df: The DataFrame whose completeness is being dendrogrammed.
:param method: The distance measure being used for clustering. This is a parameter that is passed to
`scipy.hierarchy`.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to `(25, 10)`.
:param fontsize: The figure's font size.
:param orientation: The way the dendrogram is oriented. Defaults to top-down if there are less than or equal to 50
columns and left-right if there are more.
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:return: The plot axis.
| def dendrogram(
df, method='average', filter=None, n=0, p=0, orientation=None, figsize=None, fontsize=16,
label_rotation=45, ax=None
):
"""
Fits a `scipy` hierarchical clustering algorithm to the given DataFrame's variables and visualizes the results as
a `scipy` dendrogram.
The default vertical display will fit up to 50 columns. If more than 50 columns are specified and orientation is
left unspecified the dendrogram will automatically swap to a horizontal display to fit the additional variables.
:param df: The DataFrame whose completeness is being dendrogrammed.
:param method: The distance measure being used for clustering. This is a parameter that is passed to
`scipy.hierarchy`.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to `(25, 10)`.
:param fontsize: The figure's font size.
:param orientation: The way the dendrogram is oriented. Defaults to top-down if there are less than or equal to 50
columns and left-right if there are more.
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:return: The plot axis.
"""
if not figsize:
if len(df.columns) <= 50 or orientation == 'top' or orientation == 'bottom':
figsize = (25, 10)
else:
figsize = (25, (25 + len(df.columns) - 50) * 0.5)
if ax is None:
plt.figure(figsize=figsize)
ax0 = plt.gca()
else:
ax0 = ax
df = nullity_filter(df, filter=filter, n=n, p=p)
# Link the hierarchical output matrix, figure out orientation, construct base dendrogram.
x = np.transpose(df.isnull().astype(int).values)
z = hierarchy.linkage(x, method)
if not orientation:
if len(df.columns) > 50:
orientation = 'left'
else:
orientation = 'bottom'
hierarchy.dendrogram(
z,
orientation=orientation,
labels=df.columns.tolist(),
distance_sort='descending',
link_color_func=lambda c: 'black',
leaf_font_size=fontsize,
ax=ax0
)
# Remove extraneous default visual elements.
ax0.set_aspect('auto')
ax0.grid(visible=False)
if orientation == 'bottom':
ax0.xaxis.tick_top()
ax0.xaxis.set_ticks_position('none')
ax0.yaxis.set_ticks_position('none')
ax0.spines['top'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['left'].set_visible(False)
ax0.patch.set_visible(False)
# Set up the categorical axis labels and draw.
if orientation == 'bottom':
ax0.set_xticklabels(ax0.xaxis.get_majorticklabels(), rotation=label_rotation, ha='left')
elif orientation == 'top':
ax0.set_xticklabels(ax0.xaxis.get_majorticklabels(), rotation=label_rotation, ha='right')
if orientation == 'bottom' or orientation == 'top':
ax0.tick_params(axis='y', labelsize=int(fontsize / 16 * 20))
else:
ax0.tick_params(axis='x', labelsize=int(fontsize / 16 * 20))
return ax0
| (df, method='average', filter=None, n=0, p=0, orientation=None, figsize=None, fontsize=16, label_rotation=45, ax=None) |
23,800 | missingno.missingno | heatmap |
Presents a `seaborn` heatmap visualization of nullity correlation in the given DataFrame.
Note that this visualization has no special support for large datasets. For those, try the dendrogram instead.
:param df: The DataFrame whose completeness is being heatmapped.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default). See
`nullity_filter()` for more information.
:param n: The cap on the number of columns to include in the filtered DataFrame. See `nullity_filter()` for
more information.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame. See `nullity_filter()` for
more information.
:param sort: The column sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to (20, 12).
:param fontsize: The figure's font size.
:param labels: Whether or not to label each matrix entry with its correlation (default is True).
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:param cmap: What `matplotlib` colormap to use. Defaults to `RdBu`.
:param vmin: The normalized colormap threshold. Defaults to -1, e.g. the bottom of the color scale.
:param vmax: The normalized colormap threshold. Defaults to 1, e.g. the bottom of the color scale.
:return: The plot axis.
| def heatmap(
df, filter=None, n=0, p=0, sort=None, figsize=(20, 12), fontsize=16, labels=True,
label_rotation=45, cmap='RdBu', vmin=-1, vmax=1, cbar=True, ax=None
):
"""
Presents a `seaborn` heatmap visualization of nullity correlation in the given DataFrame.
Note that this visualization has no special support for large datasets. For those, try the dendrogram instead.
:param df: The DataFrame whose completeness is being heatmapped.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default). See
`nullity_filter()` for more information.
:param n: The cap on the number of columns to include in the filtered DataFrame. See `nullity_filter()` for
more information.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame. See `nullity_filter()` for
more information.
:param sort: The column sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to (20, 12).
:param fontsize: The figure's font size.
:param labels: Whether or not to label each matrix entry with its correlation (default is True).
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:param cmap: What `matplotlib` colormap to use. Defaults to `RdBu`.
:param vmin: The normalized colormap threshold. Defaults to -1, e.g. the bottom of the color scale.
:param vmax: The normalized colormap threshold. Defaults to 1, e.g. the bottom of the color scale.
:return: The plot axis.
"""
# Apply filters and sorts, set up the figure.
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort, axis='rows')
if ax is None:
plt.figure(figsize=figsize)
ax0 = plt.gca()
else:
ax0 = ax
# Remove completely filled or completely empty variables.
df = df.iloc[:, [i for i, n in enumerate(np.var(df.isnull(), axis='rows')) if n > 0]]
# Create and mask the correlation matrix. Construct the base heatmap.
corr_mat = df.isnull().corr()
mask = np.zeros_like(corr_mat)
mask[np.triu_indices_from(mask)] = True
if labels:
sns.heatmap(corr_mat, mask=mask, cmap=cmap, ax=ax0, cbar=cbar,
annot=True, annot_kws={'size': fontsize - 2},
vmin=vmin, vmax=vmax)
else:
sns.heatmap(corr_mat, mask=mask, cmap=cmap, ax=ax0, cbar=cbar,
vmin=vmin, vmax=vmax)
# Apply visual corrections and modifications.
ax0.xaxis.tick_bottom()
ax0.set_xticklabels(
ax0.xaxis.get_majorticklabels(), rotation=label_rotation, ha='right', fontsize=fontsize
)
ax0.set_yticklabels(ax0.yaxis.get_majorticklabels(), rotation=0, fontsize=fontsize)
ax0.xaxis.set_ticks_position('none')
ax0.yaxis.set_ticks_position('none')
ax0.patch.set_visible(False)
for text in ax0.texts:
t = float(text.get_text())
if 0.95 <= t < 1:
text.set_text('<1')
elif -1 < t <= -0.95:
text.set_text('>-1')
elif t == 1:
text.set_text('1')
elif t == -1:
text.set_text('-1')
elif -0.05 < t < 0.05:
text.set_text('')
else:
text.set_text(round(t, 1))
return ax0
| (df, filter=None, n=0, p=0, sort=None, figsize=(20, 12), fontsize=16, labels=True, label_rotation=45, cmap='RdBu', vmin=-1, vmax=1, cbar=True, ax=None) |
23,801 | missingno.missingno | matrix |
A matrix visualization of the nullity of the given DataFrame.
:param df: The `DataFrame` being mapped.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The max number of columns to include in the filtered DataFrame.
:param p: The max percentage fill of the columns in the filtered DataFrame.
:param sort: The row sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. Default to 16.
:param labels: Whether or not to display the column names. Defaults to the underlying data labels when there are
50 columns or less, and no labels when there are more than 50 columns.
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:param sparkline: Whether or not to display the sparkline. Defaults to True.
:param width_ratios: The ratio of the width of the matrix to the width of the sparkline. Defaults to `(15, 1)`.
Does nothing if `sparkline=False`.
:param color: The color of the filled columns. Default is `(0.25, 0.25, 0.25)`.
:return: The plot axis.
| def matrix(
df, filter=None, n=0, p=0, sort=None, figsize=(25, 10), width_ratios=(15, 1),
color=(0.25, 0.25, 0.25), fontsize=16, labels=None, label_rotation=45, sparkline=True,
freq=None, ax=None
):
"""
A matrix visualization of the nullity of the given DataFrame.
:param df: The `DataFrame` being mapped.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The max number of columns to include in the filtered DataFrame.
:param p: The max percentage fill of the columns in the filtered DataFrame.
:param sort: The row sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. Default to 16.
:param labels: Whether or not to display the column names. Defaults to the underlying data labels when there are
50 columns or less, and no labels when there are more than 50 columns.
:param label_rotation: What angle to rotate the text labels to. Defaults to 45 degrees.
:param sparkline: Whether or not to display the sparkline. Defaults to True.
:param width_ratios: The ratio of the width of the matrix to the width of the sparkline. Defaults to `(15, 1)`.
Does nothing if `sparkline=False`.
:param color: The color of the filled columns. Default is `(0.25, 0.25, 0.25)`.
:return: The plot axis.
"""
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort, axis='columns')
height = df.shape[0]
width = df.shape[1]
# z is the color-mask array, g is a NxNx3 matrix. Apply the z color-mask to set the RGB of each pixel.
z = df.notnull().values
g = np.zeros((height, width, 3), dtype=np.float32)
g[z < 0.5] = [1, 1, 1]
g[z > 0.5] = color
# Set up the matplotlib grid layout. A unary subplot if no sparkline, a left-right splot if yes sparkline.
if ax is None:
plt.figure(figsize=figsize)
if sparkline:
gs = gridspec.GridSpec(1, 2, width_ratios=width_ratios)
gs.update(wspace=0.08)
ax1 = plt.subplot(gs[1])
else:
gs = gridspec.GridSpec(1, 1)
ax0 = plt.subplot(gs[0])
else:
if sparkline is not False:
warnings.warn(
"Plotting a sparkline on an existing axis is not currently supported. "
"To remove this warning, set sparkline=False."
)
sparkline = False
ax0 = ax
# Create the nullity plot.
ax0.imshow(g, interpolation='none')
# Remove extraneous default visual elements.
ax0.set_aspect('auto')
ax0.grid(visible=False)
ax0.xaxis.tick_top()
ax0.xaxis.set_ticks_position('none')
ax0.yaxis.set_ticks_position('none')
ax0.spines['top'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['left'].set_visible(False)
# Set up and rotate the column ticks. The labels argument is set to None by default. If the user specifies it in
# the argument, respect that specification. Otherwise display for <= 50 columns and do not display for > 50.
if labels or (labels is None and len(df.columns) <= 50):
ha = 'left'
ax0.set_xticks(list(range(0, width)))
ax0.set_xticklabels(list(df.columns), rotation=label_rotation, ha=ha, fontsize=fontsize)
else:
ax0.set_xticks([])
# Adds Timestamps ticks if freq is not None, else set up the two top-bottom row ticks.
if freq:
ts_list = []
if type(df.index) == pd.PeriodIndex:
ts_array = pd.date_range(df.index.to_timestamp().date[0],
df.index.to_timestamp().date[-1],
freq=freq).values
ts_ticks = pd.date_range(df.index.to_timestamp().date[0],
df.index.to_timestamp().date[-1],
freq=freq).map(lambda t:
t.strftime('%Y-%m-%d'))
elif type(df.index) == pd.DatetimeIndex:
ts_array = pd.date_range(df.index[0], df.index[-1],
freq=freq).values
ts_ticks = pd.date_range(df.index[0], df.index[-1],
freq=freq).map(lambda t:
t.strftime('%Y-%m-%d'))
else:
raise KeyError('Dataframe index must be PeriodIndex or DatetimeIndex.')
try:
for value in ts_array:
ts_list.append(df.index.get_loc(value))
except KeyError:
raise KeyError('Could not divide time index into desired frequency.')
ax0.set_yticks(ts_list)
ax0.set_yticklabels(ts_ticks, fontsize=int(fontsize / 16 * 20), rotation=0)
else:
ax0.set_yticks([0, df.shape[0] - 1])
ax0.set_yticklabels([1, df.shape[0]], fontsize=int(fontsize / 16 * 20), rotation=0)
# Create the inter-column vertical grid.
in_between_point = [x + 0.5 for x in range(0, width - 1)]
for in_between_point in in_between_point:
ax0.axvline(in_between_point, linestyle='-', color='white')
if sparkline:
# Calculate row-wise completeness for the sparkline.
completeness_srs = df.notnull().astype(bool).sum(axis=1)
x_domain = list(range(0, height))
y_range = list(reversed(completeness_srs.values))
min_completeness = min(y_range)
max_completeness = max(y_range)
min_completeness_index = y_range.index(min_completeness)
max_completeness_index = y_range.index(max_completeness)
# Set up the sparkline, remove the border element.
ax1.grid(visible=False)
ax1.set_aspect('auto')
# GH 25
if int(mpl.__version__[0]) <= 1:
ax1.set_axis_bgcolor((1, 1, 1))
else:
ax1.set_facecolor((1, 1, 1))
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.spines['left'].set_visible(False)
ax1.set_ymargin(0)
# Plot sparkline---plot is sideways so the x and y axis are reversed.
ax1.plot(y_range, x_domain, color=color)
if labels:
# Figure out what case to display the label in: mixed, upper, lower.
label = 'Data Completeness'
if str(df.columns[0]).islower():
label = label.lower()
if str(df.columns[0]).isupper():
label = label.upper()
# Set up and rotate the sparkline label.
ha = 'left'
ax1.set_xticks([min_completeness + (max_completeness - min_completeness) / 2])
ax1.set_xticklabels([label], rotation=label_rotation, ha=ha, fontsize=fontsize)
ax1.xaxis.tick_top()
ax1.set_yticks([])
else:
ax1.set_xticks([])
ax1.set_yticks([])
# Add maximum and minimum labels, circles.
ax1.annotate(max_completeness,
xy=(max_completeness, max_completeness_index),
xytext=(max_completeness + 2, max_completeness_index),
fontsize=int(fontsize / 16 * 14),
va='center',
ha='left')
ax1.annotate(min_completeness,
xy=(min_completeness, min_completeness_index),
xytext=(min_completeness - 2, min_completeness_index),
fontsize=int(fontsize / 16 * 14),
va='center',
ha='right')
ax1.set_xlim([min_completeness - 2, max_completeness + 2]) # Otherwise the circles are cut off.
ax1.plot([min_completeness], [min_completeness_index], '.', color=color, markersize=10.0)
ax1.plot([max_completeness], [max_completeness_index], '.', color=color, markersize=10.0)
# Remove tick mark (only works after plotting).
ax1.xaxis.set_ticks_position('none')
return ax0
| (df, filter=None, n=0, p=0, sort=None, figsize=(25, 10), width_ratios=(15, 1), color=(0.25, 0.25, 0.25), fontsize=16, labels=None, label_rotation=45, sparkline=True, freq=None, ax=None) |
23,803 | missingno.utils | nullity_filter |
Filters a DataFrame according to its nullity, using some combination of 'top' and 'bottom' numerical and
percentage values. Percentages and numerical thresholds can be specified simultaneously: for example,
to get a DataFrame with columns of at least 75% completeness but with no more than 5 columns, use
`nullity_filter(df, filter='top', p=.75, n=5)`.
:param df: The DataFrame whose columns are being filtered.
:param filter: The orientation of the filter being applied to the DataFrame. One of, "top", "bottom",
or None (default). The filter will simply return the DataFrame if you leave the filter argument unspecified or
as None.
:param p: A completeness ratio cut-off. If non-zero the filter will limit the DataFrame to columns with at least p
completeness. Input should be in the range [0, 1].
:param n: A numerical cut-off. If non-zero no more than this number of columns will be returned.
:return: The nullity-filtered `DataFrame`.
| def nullity_filter(df, filter=None, p=0, n=0):
"""
Filters a DataFrame according to its nullity, using some combination of 'top' and 'bottom' numerical and
percentage values. Percentages and numerical thresholds can be specified simultaneously: for example,
to get a DataFrame with columns of at least 75% completeness but with no more than 5 columns, use
`nullity_filter(df, filter='top', p=.75, n=5)`.
:param df: The DataFrame whose columns are being filtered.
:param filter: The orientation of the filter being applied to the DataFrame. One of, "top", "bottom",
or None (default). The filter will simply return the DataFrame if you leave the filter argument unspecified or
as None.
:param p: A completeness ratio cut-off. If non-zero the filter will limit the DataFrame to columns with at least p
completeness. Input should be in the range [0, 1].
:param n: A numerical cut-off. If non-zero no more than this number of columns will be returned.
:return: The nullity-filtered `DataFrame`.
"""
if filter == 'top':
if p:
df = df.iloc[:, [c >= p for c in df.count(axis='rows').values / len(df)]]
if n:
df = df.iloc[:, np.sort(np.argsort(df.count(axis='rows').values)[-n:])]
elif filter == 'bottom':
if p:
df = df.iloc[:, [c <= p for c in df.count(axis='rows').values / len(df)]]
if n:
df = df.iloc[:, np.sort(np.argsort(df.count(axis='rows').values)[:n])]
return df
| (df, filter=None, p=0, n=0) |
23,804 | missingno.utils | nullity_sort |
Sorts a DataFrame according to its nullity, in either ascending or descending order.
:param df: The DataFrame object being sorted.
:param sort: The sorting method: either "ascending", "descending", or None (default).
:return: The nullity-sorted DataFrame.
| def nullity_sort(df, sort=None, axis='columns'):
"""
Sorts a DataFrame according to its nullity, in either ascending or descending order.
:param df: The DataFrame object being sorted.
:param sort: The sorting method: either "ascending", "descending", or None (default).
:return: The nullity-sorted DataFrame.
"""
if sort is None:
return df
elif sort not in ['ascending', 'descending']:
raise ValueError('The "sort" parameter must be set to "ascending" or "descending".')
if axis not in ['rows', 'columns']:
raise ValueError('The "axis" parameter must be set to "rows" or "columns".')
if axis == 'columns':
if sort == 'ascending':
return df.iloc[np.argsort(df.count(axis='columns').values), :]
elif sort == 'descending':
return df.iloc[np.flipud(np.argsort(df.count(axis='columns').values)), :]
elif axis == 'rows':
if sort == 'ascending':
return df.iloc[:, np.argsort(df.count(axis='rows').values)]
elif sort == 'descending':
return df.iloc[:, np.flipud(np.argsort(df.count(axis='rows').values))]
| (df, sort=None, axis='columns') |
23,820 | kagglehub.auth | login | Prompt the user for their Kaggle username and API key and save them globally. | def login(validate_credentials: bool = True) -> None: # noqa: FBT002, FBT001
"""Prompt the user for their Kaggle username and API key and save them globally."""
if _is_in_notebook():
_notebook_login(validate_credentials)
return
else:
username = input("Enter your Kaggle username: ")
api_key = input("Enter your Kaggle API key: ")
set_kaggle_credentials(username=username, api_key=api_key)
if not validate_credentials:
return
_validate_credentials_helper()
| (validate_credentials: bool = True) -> NoneType |
23,821 | kagglehub.models | model_download | Download model files.
Args:
handle: (string) the model handle.
path: (string) Optional path to a file within the model bundle.
force_download: (bool) Optional flag to force download a model, even if it's cached.
Returns:
A string representing the path to the requested model files.
| def model_download(handle: str, path: Optional[str] = None, *, force_download: Optional[bool] = False) -> str:
"""Download model files.
Args:
handle: (string) the model handle.
path: (string) Optional path to a file within the model bundle.
force_download: (bool) Optional flag to force download a model, even if it's cached.
Returns:
A string representing the path to the requested model files.
"""
h = parse_model_handle(handle)
return registry.model_resolver(h, path, force_download=force_download)
| (handle: str, path: Optional[str] = None, *, force_download: Optional[bool] = False) -> str |
23,822 | kagglehub.models | model_upload | Upload model files.
Args:
handle: (string) the model handle.
local_model_dir: (string) path to a file in a local directory.
license_name: (string) model license.
version_notes: (string) Optional to write to model versions.
| def model_upload(
handle: str, local_model_dir: str, license_name: Optional[str] = None, version_notes: str = ""
) -> None:
"""Upload model files.
Args:
handle: (string) the model handle.
local_model_dir: (string) path to a file in a local directory.
license_name: (string) model license.
version_notes: (string) Optional to write to model versions.
"""
# parse slug
h = parse_model_handle(handle)
if h.is_versioned():
is_versioned_exception = "The model handle should not include the version"
raise ValueError(is_versioned_exception)
# Create the model if it doesn't already exist
create_model_if_missing(h.owner, h.model)
# Upload the model files to GCS
tokens = upload_files_and_directories(local_model_dir, "model")
# Create a model instance if it doesn't exist, and create a new instance version if an instance exists
create_model_instance_or_version(h, tokens, license_name, version_notes)
| (handle: str, local_model_dir: str, license_name: Optional[str] = None, version_notes: str = '') -> NoneType |
23,827 | kagglehub.auth | whoami |
Return a dictionary with the username of the authenticated Kaggle user or raise an error if unauthenticated.
| def whoami() -> dict:
"""
Return a dictionary with the username of the authenticated Kaggle user or raise an error if unauthenticated.
"""
try:
credentials = get_kaggle_credentials()
if credentials and credentials.username:
return {"username": credentials.username}
else:
raise UnauthenticatedError()
except Exception as e:
raise UnauthenticatedError() from e
| () -> dict |
23,832 | pytoolconfig.pytoolconfig | PyToolConfig | Python Tool Configuration Aggregator. | class PyToolConfig(Generic[DataclassT]):
"""Python Tool Configuration Aggregator."""
sources: list[Source]
tool: str
working_directory: Path
model: type[DataclassT]
fall_through: bool = False
arg_parser: ArgumentParser | None = None
_config_fields: dict[str, ConfigField]
def __init__( # noqa: PLR0913
self,
tool: str,
working_directory: Path,
model: type[DataclassT],
arg_parser: ArgumentParser | None = None,
custom_sources: Sequence[Source] | None = None,
global_config: bool = False,
global_sources: Sequence[Source] | None = None,
fall_through: bool = False,
*args: Any,
**kwargs: Any,
) -> None:
"""Initialize the configuration object.
:param tool: name of the tool to use.
:param working_directory: working directory in use.
:param model: Model of configuration.
:param arg_parser: Arugument Parser.
:param custom_sources: Custom sources
:param global_config: Enable global configuration
:param global_sources: Custom global sources
:param fall_through: Configuration options should fall through between sources.
:param args: Passed to constructor for PyProject
:param kwargs: Passed to constructor for PyProject
"""
assert is_dataclass(model)
self.model = model
self._config_fields = _gather_config_fields(model)
self.tool = tool
self.sources = [PyProject(working_directory, tool, *args, **kwargs)]
if custom_sources:
self.sources.extend(custom_sources)
if global_config:
self.sources.append(PyTool(tool))
if global_sources:
self.sources.extend(global_sources)
self.arg_parser = arg_parser
self.fall_through = fall_through
self._setup_arg_parser()
def parse(self, args: list[str] | None = None) -> DataclassT:
"""Parse the configuration.
:param args: any additional command line overwrites.
"""
configuration = self._parse_sources()
assert isinstance(self.sources[0], PyProject)
universal: UniversalConfig = self.sources[0].universalconfig()
if self.arg_parser:
if args is None:
args = []
parsed = self.arg_parser.parse_args(args)
for name, value in parsed._get_kwargs():
setattr(configuration, name, value)
for name, field in self._config_fields.items():
if field.universal_config:
universal_value = vars(universal)[field.universal_config.name]
if universal_value is not None:
setattr(
configuration,
name,
universal_value,
)
return configuration
def _setup_arg_parser(self) -> None:
if self.arg_parser:
for name, field in self._config_fields.items():
if field.command_line:
flags = field.command_line
self.arg_parser.add_argument(
*flags,
type=field._type,
help=field.description,
default=SUPPRESS,
metavar=name,
dest=name,
)
def _parse_sources(self) -> DataclassT:
configuration = self.model()
if self.fall_through:
for source in reversed(self.sources):
parsed = source.parse()
if parsed is not None:
configuration = _recursive_merge(configuration, parsed)
else:
for source in self.sources:
parsed = source.parse()
if parsed:
return _dict_to_dataclass(self.model, parsed)
return configuration
| (tool: str, working_directory: pathlib.Path, model: type[~DataclassT], arg_parser: argparse.ArgumentParser | None = None, custom_sources: 'Sequence[Source] | None' = None, global_config: 'bool' = False, global_sources: 'Sequence[Source] | None' = None, fall_through: bool = False, *args: 'Any', **kwargs: 'Any') -> 'None' |
23,833 | pytoolconfig.pytoolconfig | __init__ | Initialize the configuration object.
:param tool: name of the tool to use.
:param working_directory: working directory in use.
:param model: Model of configuration.
:param arg_parser: Arugument Parser.
:param custom_sources: Custom sources
:param global_config: Enable global configuration
:param global_sources: Custom global sources
:param fall_through: Configuration options should fall through between sources.
:param args: Passed to constructor for PyProject
:param kwargs: Passed to constructor for PyProject
| def __init__( # noqa: PLR0913
self,
tool: str,
working_directory: Path,
model: type[DataclassT],
arg_parser: ArgumentParser | None = None,
custom_sources: Sequence[Source] | None = None,
global_config: bool = False,
global_sources: Sequence[Source] | None = None,
fall_through: bool = False,
*args: Any,
**kwargs: Any,
) -> None:
"""Initialize the configuration object.
:param tool: name of the tool to use.
:param working_directory: working directory in use.
:param model: Model of configuration.
:param arg_parser: Arugument Parser.
:param custom_sources: Custom sources
:param global_config: Enable global configuration
:param global_sources: Custom global sources
:param fall_through: Configuration options should fall through between sources.
:param args: Passed to constructor for PyProject
:param kwargs: Passed to constructor for PyProject
"""
assert is_dataclass(model)
self.model = model
self._config_fields = _gather_config_fields(model)
self.tool = tool
self.sources = [PyProject(working_directory, tool, *args, **kwargs)]
if custom_sources:
self.sources.extend(custom_sources)
if global_config:
self.sources.append(PyTool(tool))
if global_sources:
self.sources.extend(global_sources)
self.arg_parser = arg_parser
self.fall_through = fall_through
self._setup_arg_parser()
| (self, tool: str, working_directory: pathlib.Path, model: type[~DataclassT], arg_parser: Optional[argparse.ArgumentParser] = None, custom_sources: Optional[Sequence[pytoolconfig.sources.source.Source]] = None, global_config: bool = False, global_sources: Optional[Sequence[pytoolconfig.sources.source.Source]] = None, fall_through: bool = False, *args: Any, **kwargs: Any) -> NoneType |
23,834 | pytoolconfig.pytoolconfig | _parse_sources | null | def _parse_sources(self) -> DataclassT:
configuration = self.model()
if self.fall_through:
for source in reversed(self.sources):
parsed = source.parse()
if parsed is not None:
configuration = _recursive_merge(configuration, parsed)
else:
for source in self.sources:
parsed = source.parse()
if parsed:
return _dict_to_dataclass(self.model, parsed)
return configuration
| (self) -> ~DataclassT |
23,835 | pytoolconfig.pytoolconfig | _setup_arg_parser | null | def _setup_arg_parser(self) -> None:
if self.arg_parser:
for name, field in self._config_fields.items():
if field.command_line:
flags = field.command_line
self.arg_parser.add_argument(
*flags,
type=field._type,
help=field.description,
default=SUPPRESS,
metavar=name,
dest=name,
)
| (self) -> NoneType |
23,836 | pytoolconfig.pytoolconfig | parse | Parse the configuration.
:param args: any additional command line overwrites.
| def parse(self, args: list[str] | None = None) -> DataclassT:
"""Parse the configuration.
:param args: any additional command line overwrites.
"""
configuration = self._parse_sources()
assert isinstance(self.sources[0], PyProject)
universal: UniversalConfig = self.sources[0].universalconfig()
if self.arg_parser:
if args is None:
args = []
parsed = self.arg_parser.parse_args(args)
for name, value in parsed._get_kwargs():
setattr(configuration, name, value)
for name, field in self._config_fields.items():
if field.universal_config:
universal_value = vars(universal)[field.universal_config.name]
if universal_value is not None:
setattr(
configuration,
name,
universal_value,
)
return configuration
| (self, args: Optional[list[str]] = None) -> ~DataclassT |
23,837 | pytoolconfig.types | UniversalKey | See universal config documentation. | class UniversalKey(Enum):
"""See universal config documentation."""
formatter = auto()
max_line_length = auto()
min_py_version = auto()
max_py_version = auto()
dependencies = auto()
optional_dependencies = auto()
version = auto()
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
23,839 | pytoolconfig.fields | field | Create a dataclass field with metadata. | def field( # noqa: PLR0913
default: T | _MISSINGTYPE = _MISSINGTYPE.MISSING,
description: str | None = None,
command_line: tuple[str] | None = None,
universal_config: UniversalKey | None = None,
default_factory: Callable[[], T] | _MISSINGTYPE = _MISSINGTYPE.MISSING,
init: bool = True,
) -> T:
"""Create a dataclass field with metadata."""
metadata = {
_METADATA_KEY: ConfigField(
description=description,
universal_config=universal_config,
command_line=command_line,
_default=default,
),
}
if default_factory is not MISSING:
metadata[_METADATA_KEY]._default = default_factory()
return dataclasses.field(
default_factory=default_factory,
metadata=metadata,
init=init,
)
assert default is not MISSING
return dataclasses.field(default=default, metadata=metadata, init=init)
| (default: Union[~T, pytoolconfig.fields._MISSINGTYPE] = <_MISSINGTYPE.MISSING: 1>, description: Optional[str] = None, command_line: Optional[tuple[str]] = None, universal_config: Optional[pytoolconfig.types.UniversalKey] = None, default_factory: Union[Callable[[], ~T], pytoolconfig.fields._MISSINGTYPE] = <_MISSINGTYPE.MISSING: 1>, init: bool = True) -> ~T |
23,851 | astropy | astronomical_constants |
The version of astronomical constants to use.
| class astronomical_constants(base_constants_version):
"""
The version of astronomical constants to use.
"""
# Maintainers: update when new constants are added
_value = "iau2015"
_versions = dict(
iau2015="iau2015",
iau2012="iau2012",
astropyconst40="iau2015",
astropyconst20="iau2015",
astropyconst13="iau2012",
)
| () |
23,852 | astropy.utils.state | __init__ | null | def __init__(self):
raise RuntimeError("This class is a singleton. Do not instantiate.")
| (self) |
23,855 | astropy.utils.misc | find_api_page |
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
| def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
from zlib import decompress
from astropy.utils.data import get_readable_fileobj
if (
not isinstance(obj, str)
and hasattr(obj, "__module__")
and hasattr(obj, "__name__")
):
obj = obj.__module__ + "." + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from astropy import version
if version.release:
version = "v" + version.version
else:
version = "dev"
if "://" in version:
if version.endswith("index.html"):
baseurl = version[:-10]
elif version.endswith("/"):
baseurl = version
else:
baseurl = version + "/"
elif version == "dev" or version == "latest":
baseurl = "http://devdocs.astropy.org/"
else:
baseurl = f"https://docs.astropy.org/en/{version}/"
# Custom request headers; see
# https://github.com/astropy/astropy/issues/8990
url = baseurl + "objects.inv"
headers = {"User-Agent": f"Astropy/{version}"}
with get_readable_fileobj(
url, encoding="binary", remote_timeout=timeout, http_headers=headers
) as uf:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b"\n", oldidx + 1)
headerlines.append(oiread[(oldidx + 1) : idx].decode("utf-8"))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if "The remainder of this file is compressed using zlib" not in compr:
raise ValueError(
f"The file downloaded from {baseurl}objects.inv does not seem to be"
"the usual Sphinx objects.inv format. Maybe it "
"has changed?"
)
compressed = oiread[(idx + 1) :]
decompressed = decompress(compressed).decode("utf-8")
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith("$"):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError(f"Could not find the docs for the object {obj}")
elif openinbrowser:
webbrowser.open(resurl)
return resurl
| (obj, version=None, openinbrowser=True, timeout=None) |
23,857 | astropy | online_help |
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
| def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
import webbrowser
from urllib.parse import urlencode
url = online_docs_root + f"search.html?{urlencode({'q': query})}"
webbrowser.open(url)
| (query) |
23,858 | astropy | physical_constants |
The version of physical constants to use.
| class physical_constants(base_constants_version):
"""
The version of physical constants to use.
"""
# Maintainers: update when new constants are added
_value = "codata2018"
_versions = dict(
codata2018="codata2018",
codata2014="codata2014",
codata2010="codata2010",
astropyconst40="codata2018",
astropyconst20="codata2014",
astropyconst13="codata2010",
)
| () |
23,860 | astropy | test |
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
docs_path : str, optional
The path to the documentation .rst files.
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
pastebin : ('failed', 'all', None), optional
Convenience option for turning on pytest pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
verbose : bool, optional
Convenience option to turn on verbose output from pytest. Passing
True is the same as specifying ``-v`` in ``args``.
| null | (**kwargs) |
23,887 | traitlets.config.configurable | Configurable | null | class Configurable(HasTraits):
config = Instance(Config, (), {})
parent = Instance("traitlets.config.configurable.Configurable", allow_none=True)
def __init__(self, **kwargs: t.Any) -> None:
"""Create a configurable given a config config.
Parameters
----------
config : Config
If this is empty, default values are used. If config is a
:class:`Config` instance, it will be used to configure the
instance.
parent : Configurable instance, optional
The parent Configurable instance of this object.
Notes
-----
Subclasses of Configurable must call the :meth:`__init__` method of
:class:`Configurable` *before* doing anything else and using
:func:`super`::
class MyConfigurable(Configurable):
def __init__(self, config=None):
super(MyConfigurable, self).__init__(config=config)
# Then any other code you need to finish initialization.
This ensures that instances will be configured properly.
"""
parent = kwargs.pop("parent", None)
if parent is not None:
# config is implied from parent
if kwargs.get("config", None) is None:
kwargs["config"] = parent.config
self.parent = parent
config = kwargs.pop("config", None)
# load kwarg traits, other than config
super().__init__(**kwargs)
# record traits set by config
config_override_names = set()
def notice_config_override(change: Bunch) -> None:
"""Record traits set by both config and kwargs.
They will need to be overridden again after loading config.
"""
if change.name in kwargs:
config_override_names.add(change.name)
self.observe(notice_config_override)
# load config
if config is not None:
# We used to deepcopy, but for now we are trying to just save
# by reference. This *could* have side effects as all components
# will share config. In fact, I did find such a side effect in
# _config_changed below. If a config attribute value was a mutable type
# all instances of a component were getting the same copy, effectively
# making that a class attribute.
# self.config = deepcopy(config)
self.config = config
else:
# allow _config_default to return something
self._load_config(self.config)
self.unobserve(notice_config_override)
for name in config_override_names:
setattr(self, name, kwargs[name])
# -------------------------------------------------------------------------
# Static trait notifications
# -------------------------------------------------------------------------
@classmethod
def section_names(cls) -> list[str]:
"""return section names as a list"""
return [
c.__name__
for c in reversed(cls.__mro__)
if issubclass(c, Configurable) and issubclass(cls, c)
]
def _find_my_config(self, cfg: Config) -> t.Any:
"""extract my config from a global Config object
will construct a Config object of only the config values that apply to me
based on my mro(), as well as those of my parent(s) if they exist.
If I am Bar and my parent is Foo, and their parent is Tim,
this will return merge following config sections, in this order::
[Bar, Foo.Bar, Tim.Foo.Bar]
With the last item being the highest priority.
"""
cfgs = [cfg]
if self.parent:
cfgs.append(self.parent._find_my_config(cfg))
my_config = Config()
for c in cfgs:
for sname in self.section_names():
# Don't do a blind getattr as that would cause the config to
# dynamically create the section with name Class.__name__.
if c._has_section(sname):
my_config.merge(c[sname])
return my_config
def _load_config(
self,
cfg: Config,
section_names: list[str] | None = None,
traits: dict[str, TraitType[t.Any, t.Any]] | None = None,
) -> None:
"""load traits from a Config object"""
if traits is None:
traits = self.traits(config=True)
if section_names is None:
section_names = self.section_names()
my_config = self._find_my_config(cfg)
# hold trait notifications until after all config has been loaded
with self.hold_trait_notifications():
for name, config_value in my_config.items():
if name in traits:
if isinstance(config_value, LazyConfigValue):
# ConfigValue is a wrapper for using append / update on containers
# without having to copy the initial value
initial = getattr(self, name)
config_value = config_value.get_value(initial)
elif isinstance(config_value, DeferredConfig):
# DeferredConfig tends to come from CLI/environment variables
config_value = config_value.get_value(traits[name])
# We have to do a deepcopy here if we don't deepcopy the entire
# config object. If we don't, a mutable config_value will be
# shared by all instances, effectively making it a class attribute.
setattr(self, name, deepcopy(config_value))
elif not _is_section_key(name) and not isinstance(config_value, Config):
from difflib import get_close_matches
if isinstance(self, LoggingConfigurable):
assert self.log is not None
warn = self.log.warning
else:
def warn(msg: t.Any) -> None:
return warnings.warn(msg, UserWarning, stacklevel=9)
matches = get_close_matches(name, traits)
msg = f"Config option `{name}` not recognized by `{self.__class__.__name__}`."
if len(matches) == 1:
msg += f" Did you mean `{matches[0]}`?"
elif len(matches) >= 1:
msg += " Did you mean one of: `{matches}`?".format(
matches=", ".join(sorted(matches))
)
warn(msg)
@observe("config")
@observe_compat
def _config_changed(self, change: Bunch) -> None:
"""Update all the class traits having ``config=True`` in metadata.
For any class trait with a ``config`` metadata attribute that is
``True``, we update the trait with the value of the corresponding
config entry.
"""
# Get all traits with a config metadata entry that is True
traits = self.traits(config=True)
# We auto-load config section for this class as well as any parent
# classes that are Configurable subclasses. This starts with Configurable
# and works down the mro loading the config for each section.
section_names = self.section_names()
self._load_config(change.new, traits=traits, section_names=section_names)
def update_config(self, config: Config) -> None:
"""Update config and load the new values"""
# traitlets prior to 4.2 created a copy of self.config in order to trigger change events.
# Some projects (IPython < 5) relied upon one side effect of this,
# that self.config prior to update_config was not modified in-place.
# For backward-compatibility, we must ensure that self.config
# is a new object and not modified in-place,
# but config consumers should not rely on this behavior.
self.config = deepcopy(self.config)
# load config
self._load_config(config)
# merge it into self.config
self.config.merge(config)
# TODO: trigger change event if/when dict-update change events take place
# DO NOT trigger full trait-change
@classmethod
def class_get_help(cls, inst: HasTraits | None = None) -> str:
"""Get the help string for this class in ReST format.
If `inst` is given, its current trait values will be used in place of
class defaults.
"""
assert inst is None or isinstance(inst, cls)
final_help = []
base_classes = ", ".join(p.__name__ for p in cls.__bases__)
final_help.append(f"{cls.__name__}({base_classes}) options")
final_help.append(len(final_help[0]) * "-")
for _, v in sorted(cls.class_traits(config=True).items()):
help = cls.class_get_trait_help(v, inst)
final_help.append(help)
return "\n".join(final_help)
@classmethod
def class_get_trait_help(
cls,
trait: TraitType[t.Any, t.Any],
inst: HasTraits | None = None,
helptext: str | None = None,
) -> str:
"""Get the helptext string for a single trait.
:param inst:
If given, its current trait values will be used in place of
the class default.
:param helptext:
If not given, uses the `help` attribute of the current trait.
"""
assert inst is None or isinstance(inst, cls)
lines = []
header = f"--{cls.__name__}.{trait.name}"
if isinstance(trait, (Container, Dict)):
multiplicity = trait.metadata.get("multiplicity", "append")
if isinstance(trait, Dict):
sample_value = "<key-1>=<value-1>"
else:
sample_value = "<%s-item-1>" % trait.__class__.__name__.lower()
if multiplicity == "append":
header = f"{header}={sample_value}..."
else:
header = f"{header} {sample_value}..."
else:
header = f"{header}=<{trait.__class__.__name__}>"
# header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__)
lines.append(header)
if helptext is None:
helptext = trait.help
if helptext != "":
helptext = "\n".join(wrap_paragraphs(helptext, 76))
lines.append(indent(helptext))
if "Enum" in trait.__class__.__name__:
# include Enum choices
lines.append(indent("Choices: %s" % trait.info()))
if inst is not None:
lines.append(indent(f"Current: {getattr(inst, trait.name or '')!r}"))
else:
try:
dvr = trait.default_value_repr()
except Exception:
dvr = None # ignore defaults we can't construct
if dvr is not None:
if len(dvr) > 64:
dvr = dvr[:61] + "..."
lines.append(indent("Default: %s" % dvr))
return "\n".join(lines)
@classmethod
def class_print_help(cls, inst: HasTraits | None = None) -> None:
"""Get the help string for a single trait and print it."""
print(cls.class_get_help(inst)) # noqa: T201
@classmethod
def _defining_class(
cls, trait: TraitType[t.Any, t.Any], classes: t.Sequence[type[HasTraits]]
) -> type[Configurable]:
"""Get the class that defines a trait
For reducing redundant help output in config files.
Returns the current class if:
- the trait is defined on this class, or
- the class where it is defined would not be in the config file
Parameters
----------
trait : Trait
The trait to look for
classes : list
The list of other classes to consider for redundancy.
Will return `cls` even if it is not defined on `cls`
if the defining class is not in `classes`.
"""
defining_cls = cls
assert trait.name is not None
for parent in cls.mro():
if (
issubclass(parent, Configurable)
and parent in classes
and parent.class_own_traits(config=True).get(trait.name, None) is trait
):
defining_cls = parent
return defining_cls
@classmethod
def class_config_section(cls, classes: t.Sequence[type[HasTraits]] | None = None) -> str:
"""Get the config section for this class.
Parameters
----------
classes : list, optional
The list of other classes in the config file.
Used to reduce redundant information.
"""
def c(s: str) -> str:
"""return a commented, wrapped block."""
s = "\n\n".join(wrap_paragraphs(s, 78))
return "## " + s.replace("\n", "\n# ")
# section header
breaker = "#" + "-" * 78
parent_classes = ", ".join(p.__name__ for p in cls.__bases__ if issubclass(p, Configurable))
s = f"# {cls.__name__}({parent_classes}) configuration"
lines = [breaker, s, breaker]
# get the description trait
desc = cls.class_traits().get("description")
if desc:
desc = desc.default_value
if not desc:
# no description from trait, use __doc__
desc = getattr(cls, "__doc__", "") # type:ignore[arg-type]
if desc:
lines.append(c(desc)) # type:ignore[arg-type]
lines.append("")
for name, trait in sorted(cls.class_traits(config=True).items()):
default_repr = trait.default_value_repr()
if classes:
defining_class = cls._defining_class(trait, classes)
else:
defining_class = cls
if defining_class is cls:
# cls owns the trait, show full help
if trait.help:
lines.append(c(trait.help))
if "Enum" in type(trait).__name__:
# include Enum choices
lines.append("# Choices: %s" % trait.info())
lines.append("# Default: %s" % default_repr)
else:
# Trait appears multiple times and isn't defined here.
# Truncate help to first line + "See also Original.trait"
if trait.help:
lines.append(c(trait.help.split("\n", 1)[0]))
lines.append(f"# See also: {defining_class.__name__}.{name}")
lines.append(f"# c.{cls.__name__}.{name} = {default_repr}")
lines.append("")
return "\n".join(lines)
@classmethod
def class_config_rst_doc(cls) -> str:
"""Generate rST documentation for this class' config options.
Excludes traits defined on parent classes.
"""
lines = []
classname = cls.__name__
for _, trait in sorted(cls.class_traits(config=True).items()):
ttype = trait.__class__.__name__
if not trait.name:
continue
termline = classname + "." + trait.name
# Choices or type
if "Enum" in ttype:
# include Enum choices
termline += " : " + trait.info_rst() # type:ignore[attr-defined]
else:
termline += " : " + ttype
lines.append(termline)
# Default value
try:
dvr = trait.default_value_repr()
except Exception:
dvr = None # ignore defaults we can't construct
if dvr is not None:
if len(dvr) > 64:
dvr = dvr[:61] + "..."
# Double up backslashes, so they get to the rendered docs
dvr = dvr.replace("\\n", "\\\\n")
lines.append(indent("Default: ``%s``" % dvr))
lines.append("")
help = trait.help or "No description"
lines.append(indent(dedent(help)))
# Blank line
lines.append("")
return "\n".join(lines)
| (**kwargs: 't.Any') -> 'None' |
23,889 | traitlets.config.configurable | __init__ | Create a configurable given a config config.
Parameters
----------
config : Config
If this is empty, default values are used. If config is a
:class:`Config` instance, it will be used to configure the
instance.
parent : Configurable instance, optional
The parent Configurable instance of this object.
Notes
-----
Subclasses of Configurable must call the :meth:`__init__` method of
:class:`Configurable` *before* doing anything else and using
:func:`super`::
class MyConfigurable(Configurable):
def __init__(self, config=None):
super(MyConfigurable, self).__init__(config=config)
# Then any other code you need to finish initialization.
This ensures that instances will be configured properly.
| def __init__(self, **kwargs: t.Any) -> None:
"""Create a configurable given a config config.
Parameters
----------
config : Config
If this is empty, default values are used. If config is a
:class:`Config` instance, it will be used to configure the
instance.
parent : Configurable instance, optional
The parent Configurable instance of this object.
Notes
-----
Subclasses of Configurable must call the :meth:`__init__` method of
:class:`Configurable` *before* doing anything else and using
:func:`super`::
class MyConfigurable(Configurable):
def __init__(self, config=None):
super(MyConfigurable, self).__init__(config=config)
# Then any other code you need to finish initialization.
This ensures that instances will be configured properly.
"""
parent = kwargs.pop("parent", None)
if parent is not None:
# config is implied from parent
if kwargs.get("config", None) is None:
kwargs["config"] = parent.config
self.parent = parent
config = kwargs.pop("config", None)
# load kwarg traits, other than config
super().__init__(**kwargs)
# record traits set by config
config_override_names = set()
def notice_config_override(change: Bunch) -> None:
"""Record traits set by both config and kwargs.
They will need to be overridden again after loading config.
"""
if change.name in kwargs:
config_override_names.add(change.name)
self.observe(notice_config_override)
# load config
if config is not None:
# We used to deepcopy, but for now we are trying to just save
# by reference. This *could* have side effects as all components
# will share config. In fact, I did find such a side effect in
# _config_changed below. If a config attribute value was a mutable type
# all instances of a component were getting the same copy, effectively
# making that a class attribute.
# self.config = deepcopy(config)
self.config = config
else:
# allow _config_default to return something
self._load_config(self.config)
self.unobserve(notice_config_override)
for name in config_override_names:
setattr(self, name, kwargs[name])
| (self, **kwargs: Any) -> NoneType |
23,893 | traitlets.config.configurable | _find_my_config | extract my config from a global Config object
will construct a Config object of only the config values that apply to me
based on my mro(), as well as those of my parent(s) if they exist.
If I am Bar and my parent is Foo, and their parent is Tim,
this will return merge following config sections, in this order::
[Bar, Foo.Bar, Tim.Foo.Bar]
With the last item being the highest priority.
| def _find_my_config(self, cfg: Config) -> t.Any:
"""extract my config from a global Config object
will construct a Config object of only the config values that apply to me
based on my mro(), as well as those of my parent(s) if they exist.
If I am Bar and my parent is Foo, and their parent is Tim,
this will return merge following config sections, in this order::
[Bar, Foo.Bar, Tim.Foo.Bar]
With the last item being the highest priority.
"""
cfgs = [cfg]
if self.parent:
cfgs.append(self.parent._find_my_config(cfg))
my_config = Config()
for c in cfgs:
for sname in self.section_names():
# Don't do a blind getattr as that would cause the config to
# dynamically create the section with name Class.__name__.
if c._has_section(sname):
my_config.merge(c[sname])
return my_config
| (self, cfg: traitlets.config.loader.Config) -> Any |
23,895 | traitlets.config.configurable | _load_config | load traits from a Config object | def _load_config(
self,
cfg: Config,
section_names: list[str] | None = None,
traits: dict[str, TraitType[t.Any, t.Any]] | None = None,
) -> None:
"""load traits from a Config object"""
if traits is None:
traits = self.traits(config=True)
if section_names is None:
section_names = self.section_names()
my_config = self._find_my_config(cfg)
# hold trait notifications until after all config has been loaded
with self.hold_trait_notifications():
for name, config_value in my_config.items():
if name in traits:
if isinstance(config_value, LazyConfigValue):
# ConfigValue is a wrapper for using append / update on containers
# without having to copy the initial value
initial = getattr(self, name)
config_value = config_value.get_value(initial)
elif isinstance(config_value, DeferredConfig):
# DeferredConfig tends to come from CLI/environment variables
config_value = config_value.get_value(traits[name])
# We have to do a deepcopy here if we don't deepcopy the entire
# config object. If we don't, a mutable config_value will be
# shared by all instances, effectively making it a class attribute.
setattr(self, name, deepcopy(config_value))
elif not _is_section_key(name) and not isinstance(config_value, Config):
from difflib import get_close_matches
if isinstance(self, LoggingConfigurable):
assert self.log is not None
warn = self.log.warning
else:
def warn(msg: t.Any) -> None:
return warnings.warn(msg, UserWarning, stacklevel=9)
matches = get_close_matches(name, traits)
msg = f"Config option `{name}` not recognized by `{self.__class__.__name__}`."
if len(matches) == 1:
msg += f" Did you mean `{matches[0]}`?"
elif len(matches) >= 1:
msg += " Did you mean one of: `{matches}`?".format(
matches=", ".join(sorted(matches))
)
warn(msg)
| (self, cfg: traitlets.config.loader.Config, section_names: Optional[list[str]] = None, traits: Optional[dict[str, traitlets.traitlets.TraitType[Any, Any]]] = None) -> NoneType |
23,916 | traitlets.config.configurable | update_config | Update config and load the new values | def update_config(self, config: Config) -> None:
"""Update config and load the new values"""
# traitlets prior to 4.2 created a copy of self.config in order to trigger change events.
# Some projects (IPython < 5) relied upon one side effect of this,
# that self.config prior to update_config was not modified in-place.
# For backward-compatibility, we must ensure that self.config
# is a new object and not modified in-place,
# but config consumers should not rely on this behavior.
self.config = deepcopy(self.config)
# load config
self._load_config(config)
# merge it into self.config
self.config.merge(config)
# TODO: trigger change event if/when dict-update change events take place
# DO NOT trigger full trait-change
| (self, config: traitlets.config.loader.Config) -> NoneType |
23,947 | jupyterlab_git.git | Git |
A single parent class containing all of the individual git methods in it.
| class Git:
"""
A single parent class containing all of the individual git methods in it.
"""
_GIT_CREDENTIAL_CACHE_DAEMON_PROCESS: subprocess.Popen = None
def __init__(self, config=None):
self._config = config
self._execute_timeout = (
20.0 if self._config is None else self._config.git_command_timeout
)
def __del__(self):
if self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS:
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.terminate()
async def __execute(
self,
cmdline: "List[str]",
cwd: "str",
env: "Optional[Dict[str, str]]" = None,
username: "Optional[str]" = None,
password: "Optional[str]" = None,
is_binary=False,
) -> "Tuple[int, str, str]":
return await execute(
cmdline,
cwd=cwd,
timeout=self._execute_timeout,
env=env,
username=username,
password=password,
is_binary=is_binary,
)
async def config(self, path, **kwargs):
"""Get or set Git options.
If no kwargs, all options are returned. Otherwise kwargs are set.
"""
response = {"code": 1}
if len(kwargs):
output = []
for k, v in kwargs.items():
cmd = ["git", "config", "--add", k, v]
code, out, err = await self.__execute(cmd, cwd=path)
output.append(out.strip())
response["code"] = code
if code != 0:
response["command"] = " ".join(cmd)
response["message"] = err.strip()
return response
response["message"] = "\n".join(output).strip()
else:
cmd = ["git", "config", "--list"]
code, output, error = await self.__execute(cmd, cwd=path)
response = {"code": code}
if code != 0:
response["command"] = " ".join(cmd)
response["message"] = error.strip()
else:
raw = output.strip()
s = CONFIG_PATTERN.split(raw)
response["options"] = {k: v for k, v in zip(s[1::2], s[2::2])}
return response
async def changed_files(self, path, base=None, remote=None, single_commit=None):
"""Gets the list of changed files between two Git refs, or the files changed in a single commit
There are two reserved "refs" for the base
1. WORKING : Represents the Git working tree
2. INDEX: Represents the Git staging area / index
Keyword Arguments:
single_commit {string} -- The single commit ref
base {string} -- the base Git ref
remote {string} -- the remote Git ref
Returns:
dict -- the response of format {
"code": int, # Command status code
"files": [string, string], # List of files changed.
"message": [string] # Error response
}
"""
if single_commit:
cmd = ["git", "diff", single_commit, "--name-only", "-z"]
elif base and remote:
if base == "WORKING":
cmd = ["git", "diff", remote, "--name-only", "-z"]
elif base == "INDEX":
cmd = ["git", "diff", "--staged", remote, "--name-only", "-z"]
else:
cmd = ["git", "diff", base, remote, "--name-only", "-z"]
else:
raise tornado.web.HTTPError(
400, "Either single_commit or (base and remote) must be provided"
)
response = {}
try:
code, output, error = await self.__execute(cmd, cwd=path)
except subprocess.CalledProcessError as e:
response["code"] = e.returncode
response["message"] = e.output.decode("utf-8")
else:
response["code"] = code
if code != 0:
response["command"] = " ".join(cmd)
response["message"] = error
else:
response["files"] = strip_and_split(output)
return response
async def clone(self, path, repo_url, auth=None, versioning=True, submodules=False):
"""
Execute `git clone`.
When no auth is provided, disables prompts for the password to avoid the terminal hanging.
When auth is provided, await prompts for username/passwords and sends them
:param path: the directory where the clone will be performed.
:param repo_url: the URL of the repository to be cloned.
:param auth: OPTIONAL dictionary with 'username' and 'password' fields
:param versioning: OPTIONAL whether to clone or download a snapshot of the remote repository; default clone
:param submodules: OPTIONAL whether to clone submodules content; default False
:return: response with status code and error message.
"""
env = os.environ.copy()
cmd = ["git", "clone"]
if not versioning:
cmd.append("--depth=1")
current_content = set(os.listdir(path))
if submodules:
cmd.append("--recurse-submodules")
cmd.append(unquote(repo_url))
if auth:
if auth.get("cache_credentials"):
await self.ensure_credential_helper(path)
env["GIT_TERMINAL_PROMPT"] = "1"
cmd.append("-q")
code, output, error = await self.__execute(
cmd,
username=auth["username"],
password=auth["password"],
cwd=path,
env=env,
)
else:
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(
cmd,
cwd=path,
env=env,
)
if not versioning:
new_content = set(os.listdir(path))
directory = (new_content - current_content).pop()
shutil.rmtree(f"{path}/{directory}/.git")
response = {"code": code, "message": output.strip()}
if code != 0:
response["message"] = error.strip()
return response
async def fetch(self, path, auth=None):
"""
Execute git fetch command
"""
cwd = path
# Start by fetching to get accurate ahead/behind status
cmd = [
"git",
"fetch",
"--all",
"--prune",
] # Run prune by default to help beginners
env = os.environ.copy()
if auth:
if auth.get("cache_credentials"):
await self.ensure_credential_helper(path)
env["GIT_TERMINAL_PROMPT"] = "1"
code, _, fetch_error = await self.__execute(
cmd,
cwd=cwd,
username=auth["username"],
password=auth["password"],
env=env,
)
else:
env["GIT_TERMINAL_PROMPT"] = "0"
code, _, fetch_error = await self.__execute(cmd, cwd=cwd, env=env)
result = {
"code": code,
}
if code != 0:
result["command"] = " ".join(cmd)
result["error"] = fetch_error
result["message"] = fetch_error
return result
async def get_nbdiff(
self, prev_content: str, curr_content: str, base_content=None
) -> dict:
"""Compute the diff between two notebooks.
Args:
prev_content: Notebook previous content
curr_content: Notebook current content
base_content: Notebook base content - only passed during a merge conflict
Returns:
if not base_content:
{"base": Dict, "diff": Dict}
else:
{"base": Dict, "merge_decisions": Dict}
"""
def read_notebook(content):
if not content:
return nbformat.versions[nbformat.current_nbformat].new_notebook()
if isinstance(content, dict):
# Content may come from model as a dict directly
return (
nbformat.versions[
content.get("nbformat", nbformat.current_nbformat)
]
.nbjson.JSONReader()
.to_notebook(content)
)
else:
return nbformat.reads(content, as_version=4)
# TODO Fix this in nbdime
def remove_cell_ids(nb):
for cell in nb.cells:
cell.pop("id", None)
return nb
current_loop = tornado.ioloop.IOLoop.current()
prev_nb = await current_loop.run_in_executor(None, read_notebook, prev_content)
curr_nb = await current_loop.run_in_executor(None, read_notebook, curr_content)
if base_content:
base_nb = await current_loop.run_in_executor(
None, read_notebook, base_content
)
# Only remove ids from merge_notebooks as a workaround
_, merge_decisions = await current_loop.run_in_executor(
None,
merge_notebooks,
remove_cell_ids(base_nb),
remove_cell_ids(prev_nb),
remove_cell_ids(curr_nb),
)
return {"base": base_nb, "merge_decisions": merge_decisions}
else:
thediff = await current_loop.run_in_executor(
None, diff_notebooks, prev_nb, curr_nb
)
return {"base": prev_nb, "diff": thediff}
async def status(self, path: str) -> dict:
"""
Execute git status command & return the result.
"""
cmd = ["git", "status", "--porcelain", "-b", "-u", "-z"]
code, status, my_error = await self.__execute(cmd, cwd=path)
if code != 0:
return {
"code": code,
"command": " ".join(cmd),
"message": my_error,
}
# Add attribute `is_binary`
command = [ # Compare stage to an empty tree see `_is_binary`
"git",
"diff",
"--numstat",
"-z",
"--cached",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
]
text_code, text_output, _ = await self.__execute(command, cwd=path)
are_binary = dict()
if text_code == 0:
for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
diff, name = line.rsplit("\t", maxsplit=1)
are_binary[name] = diff.startswith("-\t-")
data = {
"code": code,
"branch": None,
"remote": None,
"ahead": 0,
"behind": 0,
"files": [],
}
result = []
line_iterable = (line for line in strip_and_split(status) if line)
try:
first_line = next(line_iterable)
# Interpret branch line
match = GIT_BRANCH_STATUS.match(first_line)
if match is not None:
d = match.groupdict()
branch = d.get("branch")
if branch == "HEAD (no branch)":
branch = "(detached)"
elif branch.startswith("No commits yet on "):
branch = "(initial)"
data["branch"] = branch
data["remote"] = d.get("remote")
data["ahead"] = int(d.get("ahead") or 0)
data["behind"] = int(d.get("behind") or 0)
# Interpret file lines
for line in line_iterable:
name = line[3:]
result.append(
{
"x": line[0],
"y": line[1],
"to": name,
# if file was renamed, next line contains original path
"from": next(line_iterable) if line[0] == "R" else name,
"is_binary": are_binary.get(name, None),
}
)
data["files"] = result
except StopIteration: # Raised if line_iterable is empty
pass
# Test for repository state
states = {
State.CHERRY_PICKING: "CHERRY_PICK_HEAD",
State.MERGING: "MERGE_HEAD",
# Looking at REBASE_HEAD is not reliable as it may not be clean in the .git folder
# e.g. when skipping the last commit of a ongoing rebase
# So looking for folder `rebase-apply` and `rebase-merge`; see https://stackoverflow.com/questions/3921409/how-to-know-if-there-is-a-git-rebase-in-progress
State.REBASING: ["rebase-merge", "rebase-apply"],
}
state = State.DEFAULT
for state_, head in states.items():
if isinstance(head, str):
code, _, _ = await self.__execute(
["git", "show", "--quiet", head], cwd=path
)
if code == 0:
state = state_
break
else:
found = False
for directory in head:
code, output, _ = await self.__execute(
["git", "rev-parse", "--git-path", directory], cwd=path
)
filepath = output.strip("\n\t ")
if code == 0 and (Path(path) / filepath).exists():
found = True
state = state_
break
if found:
break
if state == State.DEFAULT and data["branch"] == "(detached)":
state = State.DETACHED
data["state"] = state
return data
async def log(self, path, history_count=10, follow_path=None):
"""
Execute git log command & return the result.
"""
is_single_file = follow_path != None
cmd = [
"git",
"log",
"--pretty=format:%H%n%an%n%ar%n%s%n%P",
("-%d" % history_count),
]
if is_single_file:
cmd += [
"-z",
"--numstat",
"--follow",
"--",
follow_path,
]
code, my_output, my_error = await self.__execute(
cmd,
cwd=path,
)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": my_error}
result = []
line_array = my_output.splitlines()
if is_single_file:
parsed_lines = []
for line in line_array:
parsed_lines.extend(
re.sub(r"\t\0|\0", "\t", l)
for l in line.strip("\0\t").split("\0\0", maxsplit=1)
)
line_array = parsed_lines
PREVIOUS_COMMIT_OFFSET = 6 if is_single_file else 5
for i in range(0, len(line_array), PREVIOUS_COMMIT_OFFSET):
commit = {
"commit": line_array[i],
"author": line_array[i + 1],
"date": line_array[i + 2],
"commit_msg": line_array[i + 3],
"pre_commits": line_array[i + 4].split(" ")
if i + 4 < len(line_array) and line_array[i + 4]
else [],
}
if is_single_file:
commit["is_binary"] = line_array[i + 5].startswith("-\t-\t")
# [insertions, deletions, previous_file_path?, current_file_path]
file_info = line_array[i + 5].split()
if len(file_info) == 4:
commit["previous_file_path"] = file_info[2]
commit["file_path"] = file_info[-1]
result.append(commit)
return {"code": code, "commits": result}
async def detailed_log(self, selected_hash, path):
"""
Execute git log -m --cc -1 --numstat --oneline -z command (used to get
insertions & deletions per file) & return the result.
"""
cmd = [
"git",
"log",
"--cc",
"-m",
"-1",
"--oneline",
"--numstat",
"--pretty=format:%b%x00",
"-z",
selected_hash,
]
code, my_output, my_error = await self.__execute(
cmd,
cwd=path,
)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": my_error}
total_insertions = 0
total_deletions = 0
result = []
first_split = my_output.split("\x00", 1)
commit_body = first_split[0].strip()
line_iterable = iter(strip_and_split(first_split[1].strip()))
for line in line_iterable:
is_binary = line.startswith("-\t-\t")
previous_file_path = ""
tokens = line.split("\t")
if len(tokens) == 3:
insertions, deletions, file = line.split("\t")
insertions = insertions.replace("-", "0")
deletions = deletions.replace("-", "0")
if file == "":
# file was renamed or moved, we need next two lines of output
from_path = next(line_iterable)
to_path = next(line_iterable)
previous_file_path = from_path
modified_file_name = from_path + " => " + to_path
modified_file_path = to_path
else:
modified_file_name = file.split("/")[-1]
modified_file_path = file
file_info = {
"modified_file_path": modified_file_path,
"modified_file_name": modified_file_name,
"insertion": insertions,
"deletion": deletions,
"is_binary": is_binary,
}
if previous_file_path:
file_info["previous_file_path"] = previous_file_path
result.append(file_info)
total_insertions += int(insertions)
total_deletions += int(deletions)
modified_file_note = "{num_files} files changed, {insertions} insertions(+), {deletions} deletions(-)".format(
num_files=len(result),
insertions=total_insertions,
deletions=total_deletions,
)
return {
"code": code,
"commit_body": commit_body,
"modified_file_note": modified_file_note,
"modified_files_count": str(len(result)),
"number_of_insertions": str(total_insertions),
"number_of_deletions": str(total_deletions),
"modified_files": result,
}
async def diff(self, path, previous=None, current=None):
"""
Execute git diff command & return the result.
"""
cmd = ["git", "diff", "--numstat", "-z"]
if previous:
cmd.append(previous)
if current:
cmd.append(current)
code, my_output, my_error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": my_error}
result = []
line_array = strip_and_split(my_output)
for line in line_array:
linesplit = line.split()
result.append(
{
"insertions": linesplit[0],
"deletions": linesplit[1],
"filename": linesplit[2],
}
)
return {"code": code, "result": result}
async def branch(self, path):
"""
Execute 'git for-each-ref' command & return the result.
"""
heads = await self.branch_heads(path)
if heads["code"] != 0:
# error; bail
return heads
remotes = await self.branch_remotes(path)
if remotes["code"] != 0:
# error; bail
return remotes
# Extract commit hash in case of detached head
is_detached = GIT_DETACHED_HEAD.match(heads["current_branch"]["name"])
if is_detached is not None:
try:
heads["current_branch"]["name"] = is_detached.groupdict()["commit"]
except KeyError:
pass
else:
# Extract branch name in case of rebasing
rebasing = GIT_REBASING_BRANCH.match(heads["current_branch"]["name"])
if rebasing is not None:
try:
heads["current_branch"]["name"] = rebasing.groupdict()["branch"]
except KeyError:
pass
# all's good; concatenate results and return
return {
"code": 0,
"branches": heads["branches"] + remotes["branches"],
"current_branch": heads["current_branch"],
}
async def branch_delete(self, path, branch):
"""Execute 'git branch -D <branchname>'"""
cmd = ["git", "branch", "-D", branch]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
else:
return {"code": code}
async def branch_heads(self, path):
"""
Execute 'git for-each-ref' command on refs/heads & return the result.
"""
# Format reference: https://git-scm.com/docs/git-for-each-ref#_field_names
formats = ["refname:short", "objectname", "upstream:short", "HEAD"]
cmd = [
"git",
"for-each-ref",
"--format=" + "%09".join("%({})".format(f) for f in formats),
"refs/heads/",
]
code, output, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
current_branch = None
results = []
try:
for name, commit_sha, upstream_name, is_current_branch in (
line.split("\t") for line in output.splitlines()
):
is_current_branch = bool(is_current_branch.strip())
branch = {
"is_current_branch": is_current_branch,
"is_remote_branch": False,
"name": name,
"upstream": upstream_name if upstream_name else None,
"top_commit": commit_sha,
"tag": None,
}
results.append(branch)
if is_current_branch:
current_branch = branch
# Above can fail in certain cases, such as an empty repo with
# no commits. In that case, just fall back to determining
# current branch
if not current_branch:
current_name = await self.get_current_branch(path)
branch = {
"is_current_branch": True,
"is_remote_branch": False,
"name": current_name,
"upstream": None,
"top_commit": None,
"tag": None,
}
results.append(branch)
current_branch = branch
return {
"code": code,
"branches": results,
"current_branch": current_branch,
}
except Exception as downstream_error:
return {
"code": -1,
"command": " ".join(cmd),
"message": str(downstream_error),
}
async def branch_remotes(self, path):
"""
Execute 'git for-each-ref' command on refs/heads & return the result.
"""
# Format reference: https://git-scm.com/docs/git-for-each-ref#_field_names
formats = ["refname:short", "objectname"]
cmd = [
"git",
"for-each-ref",
"--format=" + "%09".join("%({})".format(f) for f in formats),
"refs/remotes/",
]
code, output, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
results = []
try:
for name, commit_sha in (line.split("\t") for line in output.splitlines()):
results.append(
{
"is_current_branch": False,
"is_remote_branch": True,
"name": name,
"upstream": None,
"top_commit": commit_sha,
"tag": None,
}
)
return {"code": code, "branches": results}
except Exception as downstream_error:
return {
"code": -1,
"command": " ".join(cmd),
"message": str(downstream_error),
}
async def show_top_level(self, path):
"""
Execute git --show-toplevel command & return the result.
"""
cmd = ["git", "rev-parse", "--show-toplevel"]
code, my_output, my_error = await self.__execute(
cmd,
cwd=path,
)
if code == 0:
return {"code": code, "path": my_output.strip("\n")}
else:
# Handle special case where cwd not inside a git repo
lower_error = my_error.lower()
if "fatal: not a git repository" in lower_error:
return {"code": 0, "path": None}
return {
"code": code,
"command": " ".join(cmd),
"message": my_error,
}
async def show_prefix(self, path):
"""
Execute git --show-prefix command & return the result.
"""
cmd = ["git", "rev-parse", "--show-prefix"]
code, my_output, my_error = await self.__execute(
cmd,
cwd=path,
)
if code == 0:
result = {"code": code, "path": my_output.strip("\n")}
return result
else:
# Handle special case where cwd not inside a git repo
lower_error = my_error.lower()
if "fatal: not a git repository" in lower_error:
return {"code": 0, "path": None}
return {
"code": code,
"command": " ".join(cmd),
"message": my_error,
}
async def add(self, filename, path):
"""
Execute git add<filename> command & return the result.
"""
if not isinstance(filename, str):
# assume filename is a sequence of str
cmd = ["git", "add"] + list(filename)
else:
cmd = ["git", "add", filename]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def add_all(self, path):
"""
Execute git add all command & return the result.
"""
cmd = ["git", "add", "-A"]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def add_all_unstaged(self, path):
"""
Execute git add all unstaged command & return the result.
"""
cmd = ["git", "add", "-u"]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def add_all_untracked(self, path):
"""
Find all untracked files, execute git add & return the result.
"""
status = await self.status(path)
if status["code"] != 0:
return status
untracked = []
for f in status["files"]:
if f["x"] == "?" and f["y"] == "?":
untracked.append(f["from"].strip('"'))
return await self.add(untracked, path)
async def reset(self, filename, path):
"""
Execute git reset <filename> command & return the result.
"""
cmd = ["git", "reset", "--", filename]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def reset_all(self, path):
"""
Execute git reset command & return the result.
"""
cmd = ["git", "reset"]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def delete_commit(self, commit_id, path):
"""
Delete a specified commit from the repository.
"""
cmd = ["git", "revert", "-m", "1", "--no-commit", commit_id]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def reset_to_commit(self, commit_id, path):
"""
Reset the current branch to a specific past commit.
"""
cmd = ["git", "reset", "--hard"]
if commit_id:
cmd.append(commit_id)
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def checkout_new_branch(self, branchname, startpoint, path):
"""
Execute git checkout <make-branch> command & return the result.
"""
cmd = ["git", "checkout", "-b", branchname, startpoint]
code, my_output, my_error = await self.__execute(
cmd,
cwd=path,
)
if code == 0:
return {"code": code, "message": my_output}
else:
return {
"code": code,
"command": " ".join(cmd),
"message": my_error,
}
async def _get_branch_reference(self, branchname, path):
"""
Execute git rev-parse --symbolic-full-name <branch-name> and return the result (or None).
"""
code, my_output, _ = await self.__execute(
["git", "rev-parse", "--symbolic-full-name", branchname],
cwd=path,
)
if code == 0:
return my_output.strip("\n")
else:
return None
async def checkout_branch(self, branchname, path):
"""
Execute git checkout <branch-name> command & return the result.
Use the --track parameter for a remote branch.
"""
reference_name = await self._get_branch_reference(branchname, path)
if reference_name is None:
is_remote_branch = False
else:
is_remote_branch = self._is_remote_branch(reference_name)
if is_remote_branch:
local_branchname = branchname.split("/")[-1]
cmd = ["git", "checkout", "-B", local_branchname, branchname]
else:
cmd = ["git", "checkout", branchname]
code, my_output, my_error = await self.__execute(cmd, cwd=path)
if code == 0:
return {"code": 0, "message": my_output}
else:
return {"code": code, "message": my_error, "command": " ".join(cmd)}
async def checkout(self, filename, path):
"""
Execute git checkout command for the filename & return the result.
"""
cmd = ["git", "checkout", "--", filename]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def checkout_all(self, path):
"""
Execute git checkout command & return the result.
"""
cmd = ["git", "checkout", "--", "."]
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def merge(self, branch: str, path: str) -> dict:
"""
Execute git merge command & return the result.
"""
cmd = ["git", "merge", branch]
code, output, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code, "message": output.strip()}
async def commit(self, commit_msg, amend, path, author=None):
"""
Execute git commit <filename> command & return the result.
If the amend argument is true, amend the commit instead of creating a new one.
"""
cmd = ["git", "commit"]
if author:
cmd.extend(["--author", author])
if amend:
cmd.extend(["--amend", "--no-edit"])
else:
cmd.extend(["--m", commit_msg])
code, _, error = await self.__execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code}
async def pull(self, path, auth=None, cancel_on_conflict=False):
"""
Execute git pull --no-commit. Disables prompts for the password to avoid the terminal hanging while waiting
for auth.
"""
env = os.environ.copy()
if auth:
if auth.get("cache_credentials"):
await self.ensure_credential_helper(path)
env["GIT_TERMINAL_PROMPT"] = "1"
code, output, error = await self.__execute(
["git", "pull", "--no-commit"],
username=auth["username"],
password=auth["password"],
cwd=path,
env=env,
)
else:
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(
["git", "pull", "--no-commit"],
env=env,
cwd=path,
)
response = {"code": code, "message": output.strip()}
if code != 0:
output = output.strip()
has_conflict = (
"automatic merge failed; fix conflicts and then commit the result."
in output.lower()
)
if cancel_on_conflict and has_conflict:
code, _, error = await self.__execute(
["git", "merge", "--abort"],
cwd=path,
)
if code == 0:
response[
"message"
] = "Unable to pull latest changes as doing so would result in a merge conflict. In order to push your local changes, you may want to consider creating a new branch based on your current work and pushing the new branch. Provided your repository is hosted (e.g., on GitHub), once pushed, you can create a pull request against the original branch on the remote repository and manually resolve the conflicts during pull request review."
else:
response["message"] = error.strip()
elif has_conflict:
response["message"] = output
else:
response["message"] = error.strip()
return response
async def push(
self,
remote,
branch,
path,
auth=None,
set_upstream=False,
force=False,
tags=True,
):
"""
Execute `git push $UPSTREAM $BRANCH`. The choice of upstream and branch is up to the caller.
"""
command = ["git", "push"]
if tags:
command.append("--tags")
if force:
command.append("--force-with-lease")
if set_upstream:
command.append("--set-upstream")
command.extend([remote, branch])
env = os.environ.copy()
if auth:
if auth.get("cache_credentials"):
await self.ensure_credential_helper(path)
env["GIT_TERMINAL_PROMPT"] = "1"
code, output, error = await self.__execute(
command,
username=auth["username"],
password=auth["password"],
cwd=path,
env=env,
)
else:
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(
command,
env=env,
cwd=path,
)
response = {"code": code, "message": output.strip()}
if code != 0:
response["message"] = error.strip()
return response
async def init(self, path):
"""
Execute git init command & return the result.
"""
cmd = ["git", "init"]
cwd = path
code, _, error = await self.__execute(cmd, cwd=cwd)
actions = None
if code == 0:
code, actions = await self._maybe_run_actions("post_init", cwd)
if code != 0:
return {
"code": code,
"command": " ".join(cmd),
"message": error,
"actions": actions,
}
return {"code": code, "actions": actions}
async def _maybe_run_actions(self, name, cwd):
code = 0
actions = None
if self._config and name in self._config.actions:
actions = []
actions_list = self._config.actions[name]
for action in actions_list:
try:
# We trust the actions as they were passed via a config and not the UI
code, stdout, stderr = await self.__execute(
shlex.split(action), cwd=cwd
)
actions.append(
{
"cmd": action,
"code": code,
"stdout": stdout,
"stderr": stderr,
}
)
# After any failure, stop
except Exception as e:
code = 1
actions.append(
{
"cmd": action,
"code": 1,
"stdout": None,
"stderr": "Exception: {}".format(e),
}
)
if code != 0:
break
return code, actions
def _is_remote_branch(self, branch_reference):
"""Check if given branch is remote branch by comparing with 'remotes/',
TODO : Consider a better way to check remote branch
"""
return branch_reference.startswith("refs/remotes/")
async def get_current_branch(self, path):
"""Use `symbolic-ref` to get the current branch name. In case of
failure, assume that the HEAD is currently detached or rebasing, and fall back
to the `branch` command to get the name.
See https://git-blame.blogspot.com/2013/06/checking-current-branch-programatically.html
"""
command = ["git", "symbolic-ref", "--short", "HEAD"]
code, output, error = await self.__execute(command, cwd=path)
if code == 0:
return output.strip()
elif "not a symbolic ref" in error.lower():
current_branch = await self._get_current_branch_detached(path)
return current_branch
else:
raise Exception(
"Error [{}] occurred while executing [{}] command to get current branch.".format(
error, " ".join(command)
)
)
async def _get_current_branch_detached(self, path):
"""Execute 'git branch -a' to get current branch details in case of dirty state (rebasing, detached head,...)."""
command = ["git", "branch", "-a"]
code, output, error = await self.__execute(command, cwd=path)
if code == 0:
for branch in output.splitlines():
branch = branch.strip()
if branch.startswith("*"):
return branch.lstrip("* ")
else:
raise Exception(
"Error [{}] occurred while executing [{}] command to get current state.".format(
error, " ".join(command)
)
)
async def get_upstream_branch(self, path, branch_name):
"""Execute 'git rev-parse --abbrev-ref branch_name@{upstream}' to get
upstream branch name tracked by given local branch.
Reference : https://git-scm.com/docs/git-rev-parse#git-rev-parse-emltbranchnamegtupstreamemegemmasterupstreamememuem
"""
command = [
"git",
"rev-parse",
"--abbrev-ref",
"{}@{{upstream}}".format(branch_name),
]
code, output, error = await self.__execute(command, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(command), "message": error}
rev_parse_output = output.strip()
command = ["git", "config", "--local", "branch.{}.remote".format(branch_name)]
code, output, error = await self.__execute(command, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(command), "message": error}
remote_name = output.strip()
remote_branch = rev_parse_output.strip().replace(remote_name + "/", "", 1)
return {
"code": code,
"remote_short_name": remote_name,
"remote_branch": remote_branch,
}
async def _get_tag(self, path, commit_sha):
"""Execute 'git describe commit_sha' to get
nearest tag associated with latest commit in branch.
Reference : https://git-scm.com/docs/git-describe#git-describe-ltcommit-ishgt82308203
"""
command = ["git", "describe", "--tags", commit_sha]
code, output, error = await self.__execute(command, cwd=path)
if code == 0:
return output.strip()
elif "fatal: no tags can describe '{}'.".format(commit_sha) in error.lower():
return None
elif "fatal: no names found" in error.lower():
return None
else:
raise Exception(
"Error [{}] occurred while executing [{}] command to get nearest tag associated with branch.".format(
error, " ".join(command)
)
)
async def _get_base_ref(self, path, filename):
"""Get the object reference for an unmerged ``filename`` at base stage.
Execute git ls-files -u -z <filename>
Returns:
The object reference or None
"""
command = ["git", "ls-files", "-u", "-z", filename]
code, output, error = await self.__execute(command, cwd=path)
if code != 0:
raise subprocess.CalledProcessError(
code, " ".join(command), output=output, stderr=error
)
split_line = strip_and_split(output)[0].split()
return split_line[1] if len(split_line) > 1 else None
async def show(self, path, ref, filename=None, is_binary=False):
"""
Execute
git show <ref:filename>
Or
git show <ref>
Return the file content
"""
command = ["git", "show"]
if filename is None:
command.append(ref)
else:
command.append(f"{ref}:{filename}")
code, output, error = await self.__execute(
command, cwd=path, is_binary=is_binary
)
error_messages = map(
lambda n: n.lower(),
[
"fatal: Path '{}' exists on disk, but not in '{}'".format(
filename, ref
),
"fatal: Path '{}' does not exist (neither on disk nor in the index)".format(
filename
),
"fatal: Path '{}' does not exist in '{}'".format(filename, ref),
"fatal: Invalid object name 'HEAD'",
],
)
lower_error = error.lower()
if code == 0:
return output
elif any([msg in lower_error for msg in error_messages]):
return ""
else:
raise tornado.web.HTTPError(
log_message="Error [{}] occurred while executing [{}] command to retrieve plaintext diff.".format(
error, " ".join(command)
)
)
async def get_content(self, contents_manager, filename, path):
"""
Get the file content of filename.
"""
relative_repo = os.path.relpath(path, contents_manager.root_dir)
try:
# Never request notebook model - see https://github.com/jupyterlab/jupyterlab-git/issues/970
model = await ensure_async(
contents_manager.get(
path=os.path.join(relative_repo, filename), type="file"
)
)
except tornado.web.HTTPError as error:
# Handle versioned file being deleted case
if error.status_code == 404 and (
error.log_message.startswith("No such file or directory: ")
or error.log_message.startswith("file or directory does not exist:")
):
return ""
raise error
return model["content"]
async def get_content_at_reference(
self, filename, reference, path, contents_manager
):
"""
Collect get content of the file at the git reference.
"""
if "special" in reference:
if reference["special"] == "WORKING":
content = await self.get_content(contents_manager, filename, path)
elif reference["special"] == "INDEX":
is_binary = await self._is_binary(filename, "INDEX", path)
if is_binary:
content = await self.show(
path, reference["git"], filename, is_binary=True
)
else:
content = await self.show(path, "", filename)
elif reference["special"] == "BASE":
# Special case of file in merge conflict for which we want the base (aka common ancestor) version
ref = await self._get_base_ref(path, filename)
content = await self.show(path, ref)
else:
raise tornado.web.HTTPError(
log_message="Error while retrieving plaintext content, unknown special ref '{}'.".format(
reference["special"]
)
)
elif "git" in reference:
is_binary = await self._is_binary(filename, reference["git"], path)
if is_binary:
content = await self.show(
path, reference["git"], filename, is_binary=True
)
else:
content = await self.show(path, reference["git"], filename)
else:
content = ""
return {"content": content}
async def _is_binary(self, filename, ref, path):
"""
Determine whether Git handles a file as binary or text.
## References
- <https://stackoverflow.com/questions/6119956/how-to-determine-if-git-handles-a-file-as-binary-or-as-text/6134127#6134127>
- <https://git-scm.com/docs/git-diff#Documentation/git-diff.txt---numstat>
- <https://git-scm.com/docs/git-diff#_other_diff_formats>
Args:
filename (str): Filename (relative to the git repository)
ref (str): Commit reference or "INDEX" if file is staged
path (str): Git repository filepath
Returns:
bool: Is file binary?
Raises:
HTTPError: if git command failed
"""
if ref == "INDEX":
command = [
"git",
"diff",
"--numstat",
"--cached",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"--",
filename,
]
else:
command = [
"git",
"diff",
"--numstat",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
ref,
"--",
filename,
] # where 4b825... is a magic SHA which represents the empty tree
code, output, error = await self.__execute(command, cwd=path)
if code != 0:
error_messages = map(
lambda n: n.lower(),
[
"fatal: Path '{}' does not exist (neither on disk nor in the index)".format(
filename
),
"fatal: bad revision 'HEAD'",
],
)
lower_error = error.lower()
if any([msg in lower_error for msg in error_messages]):
return False
raise tornado.web.HTTPError(
log_message="Error while determining if file is binary or text '{}'.".format(
error
)
)
# For binary files, `--numstat` outputs two `-` characters separated by TABs:
return output.startswith("-\t-\t")
async def remote_add(self, path, url, name=DEFAULT_REMOTE_NAME):
"""Handle call to `git remote add` command.
path: str
Top Git repository path
url: str
Git remote url
name: str
Remote name; default "origin"
"""
cmd = ["git", "remote", "add", name, url]
code, _, error = await self.__execute(cmd, cwd=path)
response = {"code": code, "command": " ".join(cmd)}
if code != 0:
response["message"] = error
return response
async def remote_show(self, path, verbose=False):
"""Handle call to `git remote show` command.
Args:
path (str): Git repository path
verbose (bool): true if details are needed, otherwise, false
Returns:
if not verbose: List[str]: Known remotes
if verbose: List[ { name: str, url: str } ]: Known remotes
"""
command = ["git", "remote"]
if verbose:
command.extend(["-v", "show"])
else:
command.append("show")
code, output, error = await self.__execute(command, cwd=path)
response = {"code": code, "command": " ".join(command)}
if code == 0:
if verbose:
response["remotes"] = [
{"name": r.split("\t")[0], "url": r.split("\t")[1][:-7]}
for r in output.splitlines()
if "(push)" in r
]
else:
response["remotes"] = [r.strip() for r in output.splitlines()]
else:
response["message"] = error
return response
async def remote_remove(self, path, name):
"""Handle call to `git remote remove <name>` command.
Args:
path (str): Git repository path
name (str): Remote name
"""
command = ["git", "remote", "remove", name]
code, _, error = await self.__execute(command, cwd=path)
response = {"code": code, "command": " ".join(command)}
if code != 0:
response["message"] = error
return response
def read_file(self, path):
"""
Reads file content located at path and returns it as a string
path: str
The path of the file
"""
try:
file = pathlib.Path(path)
content = file.read_text()
return {"code": 0, "content": content}
except BaseException as error:
return {"code": -1, "content": ""}
async def ensure_gitignore(self, path):
"""Handle call to ensure .gitignore file exists and the
next append will be on a new line (this means an empty file
or a file ending with \n).
path: str
Top Git repository path
"""
try:
gitignore = pathlib.Path(path) / ".gitignore"
if not gitignore.exists():
gitignore.touch()
elif gitignore.stat().st_size > 0:
content = gitignore.read_text()
if content[-1] != "\n":
with gitignore.open("a") as f:
f.write("\n")
except BaseException as error:
return {"code": -1, "message": str(error)}
return {"code": 0}
async def ignore(self, path, file_path):
"""Handle call to add an entry in .gitignore.
path: str
Top Git repository path
file_path: str
The path of the file in .gitignore
"""
try:
res = await self.ensure_gitignore(path)
if res["code"] != 0:
return res
gitignore = pathlib.Path(path) / ".gitignore"
with gitignore.open("a") as f:
f.write(file_path + "\n")
except BaseException as error:
return {"code": -1, "message": str(error)}
return {"code": 0}
async def write_gitignore(self, path, content):
"""
Handle call to overwrite .gitignore.
Takes the .gitignore file and clears its previous contents
Writes the new content onto the file
path: str
Top Git repository path
content: str
New file contents
"""
try:
res = await self.ensure_gitignore(path)
if res["code"] != 0:
return res
gitignore = pathlib.Path(path) / ".gitignore"
if content and content[-1] != "\n":
content += "\n"
gitignore.write_text(content)
except BaseException as error:
return {"code": -1, "message": str(error)}
return {"code": 0}
async def version(self):
"""Return the Git command version.
If an error occurs, return None.
"""
command = ["git", "--version"]
code, output, _ = await self.__execute(command, cwd=os.curdir)
if code == 0:
version = GIT_VERSION_REGEX.match(output)
if version is not None:
return version.group("version")
return None
async def tags(self, path):
"""List all tags of the git repository, including the commit each tag points to.
path: str
Git path repository
"""
formats = ["refname:short", "objectname"]
command = [
"git",
"for-each-ref",
"--format=" + "%09".join("%({})".format(f) for f in formats),
"refs/tags",
]
code, output, error = await self.__execute(command, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(command), "message": error}
tags = []
for tag_name, commit_id in (line.split("\t") for line in output.splitlines()):
tag = {"name": tag_name, "baseCommitId": commit_id}
tags.append(tag)
return {"code": code, "tags": tags}
async def tag_checkout(self, path, tag):
"""Checkout the git repository at a given tag.
path: str
Git path repository
tag : str
Tag to checkout
"""
command = ["git", "checkout", "tags/" + tag]
code, _, error = await self.__execute(command, cwd=path)
if code == 0:
return {"code": code, "message": "Tag {} checked out".format(tag)}
else:
return {
"code": code,
"command": " ".join(command),
"message": error,
}
async def set_tag(self, path, tag, commitId):
"""Set a git tag pointing to a specific commit.
path: str
Git path repository
tag : str
Name of new tag.
commitId:
Identifier of commit tag is pointing to.
"""
command = ["git", "tag", tag, commitId]
code, _, error = await self.__execute(command, cwd=path)
if code == 0:
return {
"code": code,
"message": "Tag {} created, pointing to commit {}".format(
tag, commitId
),
}
else:
return {
"code": code,
"command": " ".join(command),
"message": error,
}
async def check_credential_helper(self, path: str) -> Optional[bool]:
"""
Check if the credential helper exists, and whether we need to setup a Git credential cache daemon in case the credential helper is Git credential cache.
path: str
Git path repository
Return None if the credential helper is not set.
Otherwise, return True if we need to setup a Git credential cache daemon, else False.
Raise an exception if `git config` errored.
"""
git_config_response: Dict[str, str] = await self.config(path)
if git_config_response["code"] != 0:
raise RuntimeError(git_config_response["message"])
git_config_kv_pairs = git_config_response["options"]
has_credential_helper = "credential.helper" in git_config_kv_pairs
if not has_credential_helper:
return None
if has_credential_helper and GIT_CREDENTIAL_HELPER_CACHE.match(
git_config_kv_pairs["credential.helper"].strip()
):
return True
return False
async def ensure_credential_helper(
self, path: str, env: Dict[str, str] = None
) -> None:
"""
Check whether `git config --list` contains `credential.helper`.
If it is not set, then it will be set to the value string for `credential.helper`
defined in the server settings.
path: str
Git path repository
env: Dict[str, str]
Environment variables
"""
try:
has_credential_helper = await self.check_credential_helper(path)
if has_credential_helper == False:
return
except RuntimeError as e:
get_logger().error("Error checking credential helper: %s", e, exc_info=True)
return
cache_daemon_required = has_credential_helper == True
if has_credential_helper is None:
credential_helper: str = self._config.credential_helper
await self.config(path, **{"credential.helper": credential_helper})
if GIT_CREDENTIAL_HELPER_CACHE.match(credential_helper.strip()):
cache_daemon_required = True
# special case: Git credential cache
if cache_daemon_required:
try:
self.ensure_git_credential_cache_daemon(cwd=path, env=env)
except Exception as e:
get_logger().error(
"Error setting up Git credential cache daemon: %s", e, exc_info=True
)
def ensure_git_credential_cache_daemon(
self,
socket: Optional[pathlib.Path] = None,
debug: bool = False,
force: bool = False,
cwd: Optional[str] = None,
env: Dict[str, str] = None,
) -> None:
"""
Spawn a Git credential cache daemon with the socket file being `socket` if it does not exist.
If `debug` is `True`, the daemon will be spawned with `--debug` flag.
If `socket` is empty, it is set to `~/.git-credential-cache-daemon`.
If `force` is `True`, a daemon will be spawned, and if the daemon process is accessible,
the existing daemon process will be terminated before spawning a new one.
Otherwise, if `force` is `False`, the PID of the existing daemon process is returned.
If the daemon process is not accessible, `-1` is returned.
`cwd` and `env` are passed to the process that spawns the daemon.
"""
if not socket:
socket = pathlib.Path.home() / ".git-credential-cache" / "socket"
if socket.exists():
return
if self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS is None or force:
if force and self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS:
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.terminate()
if not socket.parent.exists():
socket.parent.mkdir(parents=True, exist_ok=True)
socket.parent.chmod(0o700)
args: List[str] = ["git", "credential-cache--daemon"]
if debug:
args.append("--debug")
args.append(socket)
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS = subprocess.Popen(
args,
cwd=cwd,
env=env,
)
get_logger().debug(
"A credential cache daemon has been spawned with PID %d",
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.pid,
)
elif self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.poll():
self.ensure_git_credential_cache_daemon(socket, debug, True, cwd, env)
async def rebase(self, branch: str, path: str) -> dict:
"""
Execute git rebase command & return the result.
Args:
branch: Branch to rebase onto
path: Git repository path
"""
cmd = ["git", "rebase", branch]
code, output, error = await execute(cmd, cwd=path)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code, "message": output.strip()}
async def resolve_rebase(self, path: str, action: RebaseAction) -> dict:
"""
Execute git rebase --<action> command & return the result.
Args:
path: Git repository path
"""
option = action.name.lower()
cmd = ["git", "rebase", f"--{option}"]
env = None
# For continue we force the editor to not show up
# Ref: https://stackoverflow.com/questions/43489971/how-to-suppress-the-editor-for-git-rebase-continue
if option == "continue":
env = os.environ.copy()
env["GIT_EDITOR"] = "true"
code, output, error = await execute(cmd, cwd=path, env=env)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code, "message": output.strip()}
async def stash(self, path: str, stashMsg: str = "") -> dict:
"""
Stash changes in a dirty working directory away
path: str Git path repository
stashMsg (optional): str
A message that describes the stash entry
"""
cmd = ["git", "stash"]
if len(stashMsg) > 0:
cmd.extend(["save", "-m", stashMsg])
env = os.environ.copy()
# if the git command is run in a non-interactive terminal, it will not prompt for user input
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(cmd, cwd=path, env=env)
# code 0: no changes to stash
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code, "message": output.strip()}
async def stash_list(self, path: str) -> dict:
"""
Execute git stash list command
"""
cmd = ["git", "stash", "list"]
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(cmd, cwd=path, env=env)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
stashes = []
for line in output.strip("\n").splitlines():
match = GIT_STASH_LIST.match(line)
if match is not None:
d = match.groupdict()
d["index"] = int(d["index"])
stashes.append(d)
return {"code": code, "stashes": stashes}
async def stash_show(self, path: str, index: int) -> dict:
"""
Execute git stash show command
"""
# stash_index = "stash@{" + str(index) + "}"
stash_index = f"stash@{{{index!s}}}"
cmd = ["git", "stash", "show", "-p", stash_index, "--name-only"]
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(cmd, cwd=path, env=env)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
files = output.strip("\n").splitlines()
return {"code": code, "files": files}
async def pop_stash(self, path: str, stash_index: Optional[int] = None) -> dict:
"""
Execute git stash pop for a certain index of the stash list. If no index is provided, it will
path: str
Git path repository
stash_index: number
Index of the stash list is first applied to the current branch, then removed from the stash.
If the index is not provided, the most recent stash (index=0) will be removed from the stash.
"""
cmd = ["git", "stash", "pop"]
if stash_index:
cmd.append(str(stash_index))
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(cmd, cwd=path, env=env)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code, "message": output.strip()}
async def drop_stash(self, path, stash_index: Optional[int] = None) -> dict:
"""
Execute git stash drop to delete a single stash entry.
If not stash_index is provided, delete the entire stash.
path: Git path repository
stash_index: number or None
Index of the stash list to remove from the stash.
If None, the entire stash is removed.
"""
cmd = ["git", "stash"]
if stash_index is None:
cmd.append("clear")
else:
cmd.extend(["drop", str(stash_index)])
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(cmd, cwd=path, env=env)
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code, "message": output.strip()}
async def apply_stash(self, path: str, stash_index: Optional[int] = None) -> dict:
"""
Execute git stash apply to apply a single stash entry to the repository.
If not stash_index is provided, apply the latest stash.
path: str
Git path repository
stash_index: number
Index of the stash list is applied to the repository.
"""
# Clear
cmd = ["git", "stash", "apply"]
if stash_index is not None:
cmd.append("stash@{" + str(stash_index) + "}")
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
code, output, error = await self.__execute(cmd, cwd=path, env=env)
# error:
if code != 0:
return {"code": code, "command": " ".join(cmd), "message": error}
return {"code": code, "message": output.strip()}
@property
def excluded_paths(self) -> List[str]:
"""Wildcard-style path patterns that do not support git commands.
You can use ``*`` to match everything or ``?`` to match any single character.
"""
return self._config.excluded_paths
| (config=None) |
23,948 | jupyterlab_git.git | __execute | null | def __del__(self):
if self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS:
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.terminate()
| (self, cmdline: List[str], cwd: str, env: Optional[Dict[str, str]] = None, username: Optional[str] = None, password: Optional[str] = None, is_binary=False) -> Tuple[int, str, str] |
23,950 | jupyterlab_git.git | __init__ | null | def __init__(self, config=None):
self._config = config
self._execute_timeout = (
20.0 if self._config is None else self._config.git_command_timeout
)
| (self, config=None) |
23,951 | jupyterlab_git.git | _get_base_ref | Get the object reference for an unmerged ``filename`` at base stage.
Execute git ls-files -u -z <filename>
Returns:
The object reference or None
| def _is_remote_branch(self, branch_reference):
"""Check if given branch is remote branch by comparing with 'remotes/',
TODO : Consider a better way to check remote branch
"""
return branch_reference.startswith("refs/remotes/")
| (self, path, filename) |
23,952 | jupyterlab_git.git | _get_branch_reference |
Execute git rev-parse --symbolic-full-name <branch-name> and return the result (or None).
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, branchname, path) |
23,953 | jupyterlab_git.git | _get_current_branch_detached | Execute 'git branch -a' to get current branch details in case of dirty state (rebasing, detached head,...). | def _is_remote_branch(self, branch_reference):
"""Check if given branch is remote branch by comparing with 'remotes/',
TODO : Consider a better way to check remote branch
"""
return branch_reference.startswith("refs/remotes/")
| (self, path) |
23,954 | jupyterlab_git.git | _get_tag | Execute 'git describe commit_sha' to get
nearest tag associated with latest commit in branch.
Reference : https://git-scm.com/docs/git-describe#git-describe-ltcommit-ishgt82308203
| def _is_remote_branch(self, branch_reference):
"""Check if given branch is remote branch by comparing with 'remotes/',
TODO : Consider a better way to check remote branch
"""
return branch_reference.startswith("refs/remotes/")
| (self, path, commit_sha) |
23,955 | jupyterlab_git.git | _is_binary |
Determine whether Git handles a file as binary or text.
## References
- <https://stackoverflow.com/questions/6119956/how-to-determine-if-git-handles-a-file-as-binary-or-as-text/6134127#6134127>
- <https://git-scm.com/docs/git-diff#Documentation/git-diff.txt---numstat>
- <https://git-scm.com/docs/git-diff#_other_diff_formats>
Args:
filename (str): Filename (relative to the git repository)
ref (str): Commit reference or "INDEX" if file is staged
path (str): Git repository filepath
Returns:
bool: Is file binary?
Raises:
HTTPError: if git command failed
| lambda n: n.lower(),
| (self, filename, ref, path) |
23,956 | jupyterlab_git.git | _is_remote_branch | Check if given branch is remote branch by comparing with 'remotes/',
TODO : Consider a better way to check remote branch
| def _is_remote_branch(self, branch_reference):
"""Check if given branch is remote branch by comparing with 'remotes/',
TODO : Consider a better way to check remote branch
"""
return branch_reference.startswith("refs/remotes/")
| (self, branch_reference) |
23,957 | jupyterlab_git.git | _maybe_run_actions | null | for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, name, cwd) |
23,958 | jupyterlab_git.git | add |
Execute git add<filename> command & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, filename, path) |
23,959 | jupyterlab_git.git | add_all |
Execute git add all command & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, path) |
23,960 | jupyterlab_git.git | add_all_unstaged |
Execute git add all unstaged command & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, path) |
23,961 | jupyterlab_git.git | add_all_untracked |
Find all untracked files, execute git add & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, path) |
23,962 | jupyterlab_git.git | apply_stash |
Execute git stash apply to apply a single stash entry to the repository.
If not stash_index is provided, apply the latest stash.
path: str
Git path repository
stash_index: number
Index of the stash list is applied to the repository.
| def ensure_git_credential_cache_daemon(
self,
socket: Optional[pathlib.Path] = None,
debug: bool = False,
force: bool = False,
cwd: Optional[str] = None,
env: Dict[str, str] = None,
) -> None:
"""
Spawn a Git credential cache daemon with the socket file being `socket` if it does not exist.
If `debug` is `True`, the daemon will be spawned with `--debug` flag.
If `socket` is empty, it is set to `~/.git-credential-cache-daemon`.
If `force` is `True`, a daemon will be spawned, and if the daemon process is accessible,
the existing daemon process will be terminated before spawning a new one.
Otherwise, if `force` is `False`, the PID of the existing daemon process is returned.
If the daemon process is not accessible, `-1` is returned.
`cwd` and `env` are passed to the process that spawns the daemon.
"""
if not socket:
socket = pathlib.Path.home() / ".git-credential-cache" / "socket"
if socket.exists():
return
if self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS is None or force:
if force and self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS:
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.terminate()
if not socket.parent.exists():
socket.parent.mkdir(parents=True, exist_ok=True)
socket.parent.chmod(0o700)
args: List[str] = ["git", "credential-cache--daemon"]
if debug:
args.append("--debug")
args.append(socket)
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS = subprocess.Popen(
args,
cwd=cwd,
env=env,
)
get_logger().debug(
"A credential cache daemon has been spawned with PID %d",
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.pid,
)
elif self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.poll():
self.ensure_git_credential_cache_daemon(socket, debug, True, cwd, env)
| (self, path: str, stash_index: Optional[int] = None) -> dict |
23,963 | jupyterlab_git.git | branch |
Execute 'git for-each-ref' command & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, path) |
23,964 | jupyterlab_git.git | branch_delete | Execute 'git branch -D <branchname>' | for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, path, branch) |
23,965 | jupyterlab_git.git | branch_heads |
Execute 'git for-each-ref' command on refs/heads & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, path) |
23,967 | jupyterlab_git.git | changed_files | Gets the list of changed files between two Git refs, or the files changed in a single commit
There are two reserved "refs" for the base
1. WORKING : Represents the Git working tree
2. INDEX: Represents the Git staging area / index
Keyword Arguments:
single_commit {string} -- The single commit ref
base {string} -- the base Git ref
remote {string} -- the remote Git ref
Returns:
dict -- the response of format {
"code": int, # Command status code
"files": [string, string], # List of files changed.
"message": [string] # Error response
}
| def __del__(self):
if self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS:
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.terminate()
| (self, path, base=None, remote=None, single_commit=None) |
23,968 | jupyterlab_git.git | check_credential_helper |
Check if the credential helper exists, and whether we need to setup a Git credential cache daemon in case the credential helper is Git credential cache.
path: str
Git path repository
Return None if the credential helper is not set.
Otherwise, return True if we need to setup a Git credential cache daemon, else False.
Raise an exception if `git config` errored.
| def read_file(self, path):
"""
Reads file content located at path and returns it as a string
path: str
The path of the file
"""
try:
file = pathlib.Path(path)
content = file.read_text()
return {"code": 0, "content": content}
except BaseException as error:
return {"code": -1, "content": ""}
| (self, path: str) -> Optional[bool] |
23,969 | jupyterlab_git.git | checkout |
Execute git checkout command for the filename & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, filename, path) |
23,970 | jupyterlab_git.git | checkout_all |
Execute git checkout command & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, path) |
23,971 | jupyterlab_git.git | checkout_branch |
Execute git checkout <branch-name> command & return the result.
Use the --track parameter for a remote branch.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, branchname, path) |
23,972 | jupyterlab_git.git | checkout_new_branch |
Execute git checkout <make-branch> command & return the result.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, branchname, startpoint, path) |
23,973 | jupyterlab_git.git | clone |
Execute `git clone`.
When no auth is provided, disables prompts for the password to avoid the terminal hanging.
When auth is provided, await prompts for username/passwords and sends them
:param path: the directory where the clone will be performed.
:param repo_url: the URL of the repository to be cloned.
:param auth: OPTIONAL dictionary with 'username' and 'password' fields
:param versioning: OPTIONAL whether to clone or download a snapshot of the remote repository; default clone
:param submodules: OPTIONAL whether to clone submodules content; default False
:return: response with status code and error message.
| def __del__(self):
if self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS:
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.terminate()
| (self, path, repo_url, auth=None, versioning=True, submodules=False) |
23,974 | jupyterlab_git.git | commit |
Execute git commit <filename> command & return the result.
If the amend argument is true, amend the commit instead of creating a new one.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, commit_msg, amend, path, author=None) |
23,975 | jupyterlab_git.git | config | Get or set Git options.
If no kwargs, all options are returned. Otherwise kwargs are set.
| def __del__(self):
if self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS:
self._GIT_CREDENTIAL_CACHE_DAEMON_PROCESS.terminate()
| (self, path, **kwargs) |
23,976 | jupyterlab_git.git | delete_commit |
Delete a specified commit from the repository.
| for line in filter(lambda l: len(l) > 0, strip_and_split(text_output)):
| (self, commit_id, path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.